1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2021, Intel Corporation. */ |
3 | |
4 | /* Inter-Driver Communication */ |
5 | #include "ice.h" |
6 | #include "ice_lib.h" |
7 | #include "ice_dcb_lib.h" |
8 | |
9 | static DEFINE_XARRAY_ALLOC1(ice_aux_id); |
10 | |
11 | /** |
12 | * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct |
13 | * @pf: pointer to PF struct |
14 | * |
15 | * This function has to be called with a device_lock on the |
16 | * pf->adev.dev to avoid race conditions. |
17 | */ |
18 | static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) |
19 | { |
20 | struct auxiliary_device *adev; |
21 | |
22 | adev = pf->adev; |
23 | if (!adev || !adev->dev.driver) |
24 | return NULL; |
25 | |
26 | return container_of(adev->dev.driver, struct iidc_auxiliary_drv, |
27 | adrv.driver); |
28 | } |
29 | |
30 | /** |
31 | * ice_send_event_to_aux - send event to RDMA AUX driver |
32 | * @pf: pointer to PF struct |
33 | * @event: event struct |
34 | */ |
35 | void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) |
36 | { |
37 | struct iidc_auxiliary_drv *iadrv; |
38 | |
39 | if (WARN_ON_ONCE(!in_task())) |
40 | return; |
41 | |
42 | mutex_lock(&pf->adev_mutex); |
43 | if (!pf->adev) |
44 | goto finish; |
45 | |
46 | device_lock(dev: &pf->adev->dev); |
47 | iadrv = ice_get_auxiliary_drv(pf); |
48 | if (iadrv && iadrv->event_handler) |
49 | iadrv->event_handler(pf, event); |
50 | device_unlock(dev: &pf->adev->dev); |
51 | finish: |
52 | mutex_unlock(lock: &pf->adev_mutex); |
53 | } |
54 | |
55 | /** |
56 | * ice_add_rdma_qset - Add Leaf Node for RDMA Qset |
57 | * @pf: PF struct |
58 | * @qset: Resource to be allocated |
59 | */ |
60 | int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) |
61 | { |
62 | u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; |
63 | struct ice_vsi *vsi; |
64 | struct device *dev; |
65 | u32 qset_teid; |
66 | u16 qs_handle; |
67 | int status; |
68 | int i; |
69 | |
70 | if (WARN_ON(!pf || !qset)) |
71 | return -EINVAL; |
72 | |
73 | dev = ice_pf_to_dev(pf); |
74 | |
75 | if (!ice_is_rdma_ena(pf)) |
76 | return -EINVAL; |
77 | |
78 | vsi = ice_get_main_vsi(pf); |
79 | if (!vsi) { |
80 | dev_err(dev, "RDMA QSet invalid VSI\n" ); |
81 | return -EINVAL; |
82 | } |
83 | |
84 | ice_for_each_traffic_class(i) |
85 | max_rdmaqs[i] = 0; |
86 | |
87 | max_rdmaqs[qset->tc]++; |
88 | qs_handle = qset->qs_handle; |
89 | |
90 | status = ice_cfg_vsi_rdma(pi: vsi->port_info, vsi_handle: vsi->idx, tc_bitmap: vsi->tc_cfg.ena_tc, |
91 | max_rdmaqs); |
92 | if (status) { |
93 | dev_err(dev, "Failed VSI RDMA Qset config\n" ); |
94 | return status; |
95 | } |
96 | |
97 | status = ice_ena_vsi_rdma_qset(pi: vsi->port_info, vsi_handle: vsi->idx, tc: qset->tc, |
98 | rdma_qset: &qs_handle, num_qsets: 1, qset_teid: &qset_teid); |
99 | if (status) { |
100 | dev_err(dev, "Failed VSI RDMA Qset enable\n" ); |
101 | return status; |
102 | } |
103 | vsi->qset_handle[qset->tc] = qset->qs_handle; |
104 | qset->teid = qset_teid; |
105 | |
106 | return 0; |
107 | } |
108 | EXPORT_SYMBOL_GPL(ice_add_rdma_qset); |
109 | |
110 | /** |
111 | * ice_del_rdma_qset - Delete leaf node for RDMA Qset |
112 | * @pf: PF struct |
113 | * @qset: Resource to be freed |
114 | */ |
115 | int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) |
116 | { |
117 | struct ice_vsi *vsi; |
118 | u32 teid; |
119 | u16 q_id; |
120 | |
121 | if (WARN_ON(!pf || !qset)) |
122 | return -EINVAL; |
123 | |
124 | vsi = ice_find_vsi(pf, vsi_num: qset->vport_id); |
125 | if (!vsi) { |
126 | dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n" ); |
127 | return -EINVAL; |
128 | } |
129 | |
130 | q_id = qset->qs_handle; |
131 | teid = qset->teid; |
132 | |
133 | vsi->qset_handle[qset->tc] = 0; |
134 | |
135 | return ice_dis_vsi_rdma_qset(pi: vsi->port_info, count: 1, qset_teid: &teid, q_id: &q_id); |
136 | } |
137 | EXPORT_SYMBOL_GPL(ice_del_rdma_qset); |
138 | |
139 | /** |
140 | * ice_rdma_request_reset - accept request from RDMA to perform a reset |
141 | * @pf: struct for PF |
142 | * @reset_type: type of reset |
143 | */ |
144 | int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) |
145 | { |
146 | enum ice_reset_req reset; |
147 | |
148 | if (WARN_ON(!pf)) |
149 | return -EINVAL; |
150 | |
151 | switch (reset_type) { |
152 | case IIDC_PFR: |
153 | reset = ICE_RESET_PFR; |
154 | break; |
155 | case IIDC_CORER: |
156 | reset = ICE_RESET_CORER; |
157 | break; |
158 | case IIDC_GLOBR: |
159 | reset = ICE_RESET_GLOBR; |
160 | break; |
161 | default: |
162 | dev_err(ice_pf_to_dev(pf), "incorrect reset request\n" ); |
163 | return -EINVAL; |
164 | } |
165 | |
166 | return ice_schedule_reset(pf, reset); |
167 | } |
168 | EXPORT_SYMBOL_GPL(ice_rdma_request_reset); |
169 | |
170 | /** |
171 | * ice_rdma_update_vsi_filter - update main VSI filters for RDMA |
172 | * @pf: pointer to struct for PF |
173 | * @vsi_id: VSI HW idx to update filter on |
174 | * @enable: bool whether to enable or disable filters |
175 | */ |
176 | int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) |
177 | { |
178 | struct ice_vsi *vsi; |
179 | int status; |
180 | |
181 | if (WARN_ON(!pf)) |
182 | return -EINVAL; |
183 | |
184 | vsi = ice_find_vsi(pf, vsi_num: vsi_id); |
185 | if (!vsi) |
186 | return -EINVAL; |
187 | |
188 | status = ice_cfg_rdma_fltr(hw: &pf->hw, vsi_handle: vsi->idx, enable); |
189 | if (status) { |
190 | dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n" , |
191 | enable ? "en" : "dis" ); |
192 | } else { |
193 | if (enable) |
194 | vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; |
195 | else |
196 | vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; |
197 | } |
198 | |
199 | return status; |
200 | } |
201 | EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); |
202 | |
203 | /** |
204 | * ice_get_qos_params - parse QoS params for RDMA consumption |
205 | * @pf: pointer to PF struct |
206 | * @qos: set of QoS values |
207 | */ |
208 | void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) |
209 | { |
210 | struct ice_dcbx_cfg *dcbx_cfg; |
211 | unsigned int i; |
212 | u32 up2tc; |
213 | |
214 | dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; |
215 | up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); |
216 | |
217 | qos->num_tc = ice_dcb_get_num_tc(dcbcfg: dcbx_cfg); |
218 | for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) |
219 | qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; |
220 | |
221 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) |
222 | qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; |
223 | |
224 | qos->pfc_mode = dcbx_cfg->pfc_mode; |
225 | if (qos->pfc_mode == IIDC_DSCP_PFC_MODE) |
226 | for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++) |
227 | qos->dscp_map[i] = dcbx_cfg->dscp_map[i]; |
228 | } |
229 | EXPORT_SYMBOL_GPL(ice_get_qos_params); |
230 | |
231 | /** |
232 | * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver |
233 | * @pf: board private structure to initialize |
234 | */ |
235 | static int ice_alloc_rdma_qvectors(struct ice_pf *pf) |
236 | { |
237 | if (ice_is_rdma_ena(pf)) { |
238 | int i; |
239 | |
240 | pf->msix_entries = kcalloc(n: pf->num_rdma_msix, |
241 | size: sizeof(*pf->msix_entries), |
242 | GFP_KERNEL); |
243 | if (!pf->msix_entries) |
244 | return -ENOMEM; |
245 | |
246 | /* RDMA is the only user of pf->msix_entries array */ |
247 | pf->rdma_base_vector = 0; |
248 | |
249 | for (i = 0; i < pf->num_rdma_msix; i++) { |
250 | struct msix_entry *entry = &pf->msix_entries[i]; |
251 | struct msi_map map; |
252 | |
253 | map = ice_alloc_irq(pf, dyn_only: false); |
254 | if (map.index < 0) |
255 | break; |
256 | |
257 | entry->entry = map.index; |
258 | entry->vector = map.virq; |
259 | } |
260 | } |
261 | return 0; |
262 | } |
263 | |
264 | /** |
265 | * ice_free_rdma_qvector - free vector resources reserved for RDMA driver |
266 | * @pf: board private structure to initialize |
267 | */ |
268 | static void ice_free_rdma_qvector(struct ice_pf *pf) |
269 | { |
270 | int i; |
271 | |
272 | if (!pf->msix_entries) |
273 | return; |
274 | |
275 | for (i = 0; i < pf->num_rdma_msix; i++) { |
276 | struct msi_map map; |
277 | |
278 | map.index = pf->msix_entries[i].entry; |
279 | map.virq = pf->msix_entries[i].vector; |
280 | ice_free_irq(pf, map); |
281 | } |
282 | |
283 | kfree(objp: pf->msix_entries); |
284 | pf->msix_entries = NULL; |
285 | } |
286 | |
287 | /** |
288 | * ice_adev_release - function to be mapped to AUX dev's release op |
289 | * @dev: pointer to device to free |
290 | */ |
291 | static void ice_adev_release(struct device *dev) |
292 | { |
293 | struct iidc_auxiliary_dev *iadev; |
294 | |
295 | iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); |
296 | kfree(objp: iadev); |
297 | } |
298 | |
299 | /** |
300 | * ice_plug_aux_dev - allocate and register AUX device |
301 | * @pf: pointer to pf struct |
302 | */ |
303 | int ice_plug_aux_dev(struct ice_pf *pf) |
304 | { |
305 | struct iidc_auxiliary_dev *iadev; |
306 | struct auxiliary_device *adev; |
307 | int ret; |
308 | |
309 | /* if this PF doesn't support a technology that requires auxiliary |
310 | * devices, then gracefully exit |
311 | */ |
312 | if (!ice_is_rdma_ena(pf)) |
313 | return 0; |
314 | |
315 | iadev = kzalloc(size: sizeof(*iadev), GFP_KERNEL); |
316 | if (!iadev) |
317 | return -ENOMEM; |
318 | |
319 | adev = &iadev->adev; |
320 | iadev->pf = pf; |
321 | |
322 | adev->id = pf->aux_idx; |
323 | adev->dev.release = ice_adev_release; |
324 | adev->dev.parent = &pf->pdev->dev; |
325 | adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp" ; |
326 | |
327 | ret = auxiliary_device_init(auxdev: adev); |
328 | if (ret) { |
329 | kfree(objp: iadev); |
330 | return ret; |
331 | } |
332 | |
333 | ret = auxiliary_device_add(adev); |
334 | if (ret) { |
335 | auxiliary_device_uninit(auxdev: adev); |
336 | return ret; |
337 | } |
338 | |
339 | mutex_lock(&pf->adev_mutex); |
340 | pf->adev = adev; |
341 | mutex_unlock(lock: &pf->adev_mutex); |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | /* ice_unplug_aux_dev - unregister and free AUX device |
347 | * @pf: pointer to pf struct |
348 | */ |
349 | void ice_unplug_aux_dev(struct ice_pf *pf) |
350 | { |
351 | struct auxiliary_device *adev; |
352 | |
353 | mutex_lock(&pf->adev_mutex); |
354 | adev = pf->adev; |
355 | pf->adev = NULL; |
356 | mutex_unlock(lock: &pf->adev_mutex); |
357 | |
358 | if (adev) { |
359 | auxiliary_device_delete(auxdev: adev); |
360 | auxiliary_device_uninit(auxdev: adev); |
361 | } |
362 | } |
363 | |
364 | /** |
365 | * ice_init_rdma - initializes PF for RDMA use |
366 | * @pf: ptr to ice_pf |
367 | */ |
368 | int ice_init_rdma(struct ice_pf *pf) |
369 | { |
370 | struct device *dev = &pf->pdev->dev; |
371 | int ret; |
372 | |
373 | if (!ice_is_rdma_ena(pf)) { |
374 | dev_warn(dev, "RDMA is not supported on this device\n" ); |
375 | return 0; |
376 | } |
377 | |
378 | ret = xa_alloc(xa: &ice_aux_id, id: &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), |
379 | GFP_KERNEL); |
380 | if (ret) { |
381 | dev_err(dev, "Failed to allocate device ID for AUX driver\n" ); |
382 | return -ENOMEM; |
383 | } |
384 | |
385 | /* Reserve vector resources */ |
386 | ret = ice_alloc_rdma_qvectors(pf); |
387 | if (ret < 0) { |
388 | dev_err(dev, "failed to reserve vectors for RDMA\n" ); |
389 | goto err_reserve_rdma_qvector; |
390 | } |
391 | pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; |
392 | ret = ice_plug_aux_dev(pf); |
393 | if (ret) |
394 | goto err_plug_aux_dev; |
395 | return 0; |
396 | |
397 | err_plug_aux_dev: |
398 | ice_free_rdma_qvector(pf); |
399 | err_reserve_rdma_qvector: |
400 | pf->adev = NULL; |
401 | xa_erase(&ice_aux_id, index: pf->aux_idx); |
402 | return ret; |
403 | } |
404 | |
405 | /** |
406 | * ice_deinit_rdma - deinitialize RDMA on PF |
407 | * @pf: ptr to ice_pf |
408 | */ |
409 | void ice_deinit_rdma(struct ice_pf *pf) |
410 | { |
411 | if (!ice_is_rdma_ena(pf)) |
412 | return; |
413 | |
414 | ice_unplug_aux_dev(pf); |
415 | ice_free_rdma_qvector(pf); |
416 | xa_erase(&ice_aux_id, index: pf->aux_idx); |
417 | } |
418 | |