1 | /* |
2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
8 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
9 | * |
10 | * This software is available to you under a choice of one of two |
11 | * licenses. You may choose to be licensed under the terms of the GNU |
12 | * General Public License (GPL) Version 2, available from the file |
13 | * COPYING in the main directory of this source tree, or the |
14 | * OpenIB.org BSD license below: |
15 | * |
16 | * Redistribution and use in source and binary forms, with or |
17 | * without modification, are permitted provided that the following |
18 | * conditions are met: |
19 | * |
20 | * - Redistributions of source code must retain the above |
21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer. |
23 | * |
24 | * - Redistributions in binary form must reproduce the above |
25 | * copyright notice, this list of conditions and the following |
26 | * disclaimer in the documentation and/or other materials |
27 | * provided with the distribution. |
28 | * |
29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
36 | * SOFTWARE. |
37 | */ |
38 | |
39 | #include <linux/errno.h> |
40 | #include <linux/err.h> |
41 | #include <linux/export.h> |
42 | #include <linux/string.h> |
43 | #include <linux/slab.h> |
44 | #include <linux/in.h> |
45 | #include <linux/in6.h> |
46 | #include <net/addrconf.h> |
47 | #include <linux/security.h> |
48 | |
49 | #include <rdma/ib_verbs.h> |
50 | #include <rdma/ib_cache.h> |
51 | #include <rdma/ib_addr.h> |
52 | #include <rdma/rw.h> |
53 | #include <rdma/lag.h> |
54 | |
55 | #include "core_priv.h" |
56 | #include <trace/events/rdma_core.h> |
57 | |
58 | static int ib_resolve_eth_dmac(struct ib_device *device, |
59 | struct rdma_ah_attr *ah_attr); |
60 | |
61 | static const char * const ib_events[] = { |
62 | [IB_EVENT_CQ_ERR] = "CQ error" , |
63 | [IB_EVENT_QP_FATAL] = "QP fatal error" , |
64 | [IB_EVENT_QP_REQ_ERR] = "QP request error" , |
65 | [IB_EVENT_QP_ACCESS_ERR] = "QP access error" , |
66 | [IB_EVENT_COMM_EST] = "communication established" , |
67 | [IB_EVENT_SQ_DRAINED] = "send queue drained" , |
68 | [IB_EVENT_PATH_MIG] = "path migration successful" , |
69 | [IB_EVENT_PATH_MIG_ERR] = "path migration error" , |
70 | [IB_EVENT_DEVICE_FATAL] = "device fatal error" , |
71 | [IB_EVENT_PORT_ACTIVE] = "port active" , |
72 | [IB_EVENT_PORT_ERR] = "port error" , |
73 | [IB_EVENT_LID_CHANGE] = "LID change" , |
74 | [IB_EVENT_PKEY_CHANGE] = "P_key change" , |
75 | [IB_EVENT_SM_CHANGE] = "SM change" , |
76 | [IB_EVENT_SRQ_ERR] = "SRQ error" , |
77 | [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached" , |
78 | [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached" , |
79 | [IB_EVENT_CLIENT_REREGISTER] = "client reregister" , |
80 | [IB_EVENT_GID_CHANGE] = "GID changed" , |
81 | }; |
82 | |
83 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event) |
84 | { |
85 | size_t index = event; |
86 | |
87 | return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? |
88 | ib_events[index] : "unrecognized event" ; |
89 | } |
90 | EXPORT_SYMBOL(ib_event_msg); |
91 | |
92 | static const char * const wc_statuses[] = { |
93 | [IB_WC_SUCCESS] = "success" , |
94 | [IB_WC_LOC_LEN_ERR] = "local length error" , |
95 | [IB_WC_LOC_QP_OP_ERR] = "local QP operation error" , |
96 | [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error" , |
97 | [IB_WC_LOC_PROT_ERR] = "local protection error" , |
98 | [IB_WC_WR_FLUSH_ERR] = "WR flushed" , |
99 | [IB_WC_MW_BIND_ERR] = "memory bind operation error" , |
100 | [IB_WC_BAD_RESP_ERR] = "bad response error" , |
101 | [IB_WC_LOC_ACCESS_ERR] = "local access error" , |
102 | [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error" , |
103 | [IB_WC_REM_ACCESS_ERR] = "remote access error" , |
104 | [IB_WC_REM_OP_ERR] = "remote operation error" , |
105 | [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded" , |
106 | [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded" , |
107 | [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error" , |
108 | [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request" , |
109 | [IB_WC_REM_ABORT_ERR] = "operation aborted" , |
110 | [IB_WC_INV_EECN_ERR] = "invalid EE context number" , |
111 | [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state" , |
112 | [IB_WC_FATAL_ERR] = "fatal error" , |
113 | [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error" , |
114 | [IB_WC_GENERAL_ERR] = "general error" , |
115 | }; |
116 | |
117 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) |
118 | { |
119 | size_t index = status; |
120 | |
121 | return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? |
122 | wc_statuses[index] : "unrecognized status" ; |
123 | } |
124 | EXPORT_SYMBOL(ib_wc_status_msg); |
125 | |
126 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) |
127 | { |
128 | switch (rate) { |
129 | case IB_RATE_2_5_GBPS: return 1; |
130 | case IB_RATE_5_GBPS: return 2; |
131 | case IB_RATE_10_GBPS: return 4; |
132 | case IB_RATE_20_GBPS: return 8; |
133 | case IB_RATE_30_GBPS: return 12; |
134 | case IB_RATE_40_GBPS: return 16; |
135 | case IB_RATE_60_GBPS: return 24; |
136 | case IB_RATE_80_GBPS: return 32; |
137 | case IB_RATE_120_GBPS: return 48; |
138 | case IB_RATE_14_GBPS: return 6; |
139 | case IB_RATE_56_GBPS: return 22; |
140 | case IB_RATE_112_GBPS: return 45; |
141 | case IB_RATE_168_GBPS: return 67; |
142 | case IB_RATE_25_GBPS: return 10; |
143 | case IB_RATE_100_GBPS: return 40; |
144 | case IB_RATE_200_GBPS: return 80; |
145 | case IB_RATE_300_GBPS: return 120; |
146 | case IB_RATE_28_GBPS: return 11; |
147 | case IB_RATE_50_GBPS: return 20; |
148 | case IB_RATE_400_GBPS: return 160; |
149 | case IB_RATE_600_GBPS: return 240; |
150 | case IB_RATE_800_GBPS: return 320; |
151 | default: return -1; |
152 | } |
153 | } |
154 | EXPORT_SYMBOL(ib_rate_to_mult); |
155 | |
156 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) |
157 | { |
158 | switch (mult) { |
159 | case 1: return IB_RATE_2_5_GBPS; |
160 | case 2: return IB_RATE_5_GBPS; |
161 | case 4: return IB_RATE_10_GBPS; |
162 | case 8: return IB_RATE_20_GBPS; |
163 | case 12: return IB_RATE_30_GBPS; |
164 | case 16: return IB_RATE_40_GBPS; |
165 | case 24: return IB_RATE_60_GBPS; |
166 | case 32: return IB_RATE_80_GBPS; |
167 | case 48: return IB_RATE_120_GBPS; |
168 | case 6: return IB_RATE_14_GBPS; |
169 | case 22: return IB_RATE_56_GBPS; |
170 | case 45: return IB_RATE_112_GBPS; |
171 | case 67: return IB_RATE_168_GBPS; |
172 | case 10: return IB_RATE_25_GBPS; |
173 | case 40: return IB_RATE_100_GBPS; |
174 | case 80: return IB_RATE_200_GBPS; |
175 | case 120: return IB_RATE_300_GBPS; |
176 | case 11: return IB_RATE_28_GBPS; |
177 | case 20: return IB_RATE_50_GBPS; |
178 | case 160: return IB_RATE_400_GBPS; |
179 | case 240: return IB_RATE_600_GBPS; |
180 | case 320: return IB_RATE_800_GBPS; |
181 | default: return IB_RATE_PORT_CURRENT; |
182 | } |
183 | } |
184 | EXPORT_SYMBOL(mult_to_ib_rate); |
185 | |
186 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) |
187 | { |
188 | switch (rate) { |
189 | case IB_RATE_2_5_GBPS: return 2500; |
190 | case IB_RATE_5_GBPS: return 5000; |
191 | case IB_RATE_10_GBPS: return 10000; |
192 | case IB_RATE_20_GBPS: return 20000; |
193 | case IB_RATE_30_GBPS: return 30000; |
194 | case IB_RATE_40_GBPS: return 40000; |
195 | case IB_RATE_60_GBPS: return 60000; |
196 | case IB_RATE_80_GBPS: return 80000; |
197 | case IB_RATE_120_GBPS: return 120000; |
198 | case IB_RATE_14_GBPS: return 14062; |
199 | case IB_RATE_56_GBPS: return 56250; |
200 | case IB_RATE_112_GBPS: return 112500; |
201 | case IB_RATE_168_GBPS: return 168750; |
202 | case IB_RATE_25_GBPS: return 25781; |
203 | case IB_RATE_100_GBPS: return 103125; |
204 | case IB_RATE_200_GBPS: return 206250; |
205 | case IB_RATE_300_GBPS: return 309375; |
206 | case IB_RATE_28_GBPS: return 28125; |
207 | case IB_RATE_50_GBPS: return 53125; |
208 | case IB_RATE_400_GBPS: return 425000; |
209 | case IB_RATE_600_GBPS: return 637500; |
210 | case IB_RATE_800_GBPS: return 850000; |
211 | default: return -1; |
212 | } |
213 | } |
214 | EXPORT_SYMBOL(ib_rate_to_mbps); |
215 | |
216 | __attribute_const__ enum rdma_transport_type |
217 | rdma_node_get_transport(unsigned int node_type) |
218 | { |
219 | |
220 | if (node_type == RDMA_NODE_USNIC) |
221 | return RDMA_TRANSPORT_USNIC; |
222 | if (node_type == RDMA_NODE_USNIC_UDP) |
223 | return RDMA_TRANSPORT_USNIC_UDP; |
224 | if (node_type == RDMA_NODE_RNIC) |
225 | return RDMA_TRANSPORT_IWARP; |
226 | if (node_type == RDMA_NODE_UNSPECIFIED) |
227 | return RDMA_TRANSPORT_UNSPECIFIED; |
228 | |
229 | return RDMA_TRANSPORT_IB; |
230 | } |
231 | EXPORT_SYMBOL(rdma_node_get_transport); |
232 | |
233 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, |
234 | u32 port_num) |
235 | { |
236 | enum rdma_transport_type lt; |
237 | if (device->ops.get_link_layer) |
238 | return device->ops.get_link_layer(device, port_num); |
239 | |
240 | lt = rdma_node_get_transport(device->node_type); |
241 | if (lt == RDMA_TRANSPORT_IB) |
242 | return IB_LINK_LAYER_INFINIBAND; |
243 | |
244 | return IB_LINK_LAYER_ETHERNET; |
245 | } |
246 | EXPORT_SYMBOL(rdma_port_get_link_layer); |
247 | |
248 | /* Protection domains */ |
249 | |
250 | /** |
251 | * __ib_alloc_pd - Allocates an unused protection domain. |
252 | * @device: The device on which to allocate the protection domain. |
253 | * @flags: protection domain flags |
254 | * @caller: caller's build-time module name |
255 | * |
256 | * A protection domain object provides an association between QPs, shared |
257 | * receive queues, address handles, memory regions, and memory windows. |
258 | * |
259 | * Every PD has a local_dma_lkey which can be used as the lkey value for local |
260 | * memory operations. |
261 | */ |
262 | struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, |
263 | const char *caller) |
264 | { |
265 | struct ib_pd *pd; |
266 | int mr_access_flags = 0; |
267 | int ret; |
268 | |
269 | pd = rdma_zalloc_drv_obj(device, ib_pd); |
270 | if (!pd) |
271 | return ERR_PTR(error: -ENOMEM); |
272 | |
273 | pd->device = device; |
274 | pd->flags = flags; |
275 | |
276 | rdma_restrack_new(res: &pd->res, type: RDMA_RESTRACK_PD); |
277 | rdma_restrack_set_name(res: &pd->res, caller); |
278 | |
279 | ret = device->ops.alloc_pd(pd, NULL); |
280 | if (ret) { |
281 | rdma_restrack_put(res: &pd->res); |
282 | kfree(objp: pd); |
283 | return ERR_PTR(error: ret); |
284 | } |
285 | rdma_restrack_add(res: &pd->res); |
286 | |
287 | if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY) |
288 | pd->local_dma_lkey = device->local_dma_lkey; |
289 | else |
290 | mr_access_flags |= IB_ACCESS_LOCAL_WRITE; |
291 | |
292 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { |
293 | pr_warn("%s: enabling unsafe global rkey\n" , caller); |
294 | mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; |
295 | } |
296 | |
297 | if (mr_access_flags) { |
298 | struct ib_mr *mr; |
299 | |
300 | mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); |
301 | if (IS_ERR(ptr: mr)) { |
302 | ib_dealloc_pd(pd); |
303 | return ERR_CAST(ptr: mr); |
304 | } |
305 | |
306 | mr->device = pd->device; |
307 | mr->pd = pd; |
308 | mr->type = IB_MR_TYPE_DMA; |
309 | mr->uobject = NULL; |
310 | mr->need_inval = false; |
311 | |
312 | pd->__internal_mr = mr; |
313 | |
314 | if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)) |
315 | pd->local_dma_lkey = pd->__internal_mr->lkey; |
316 | |
317 | if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) |
318 | pd->unsafe_global_rkey = pd->__internal_mr->rkey; |
319 | } |
320 | |
321 | return pd; |
322 | } |
323 | EXPORT_SYMBOL(__ib_alloc_pd); |
324 | |
325 | /** |
326 | * ib_dealloc_pd_user - Deallocates a protection domain. |
327 | * @pd: The protection domain to deallocate. |
328 | * @udata: Valid user data or NULL for kernel object |
329 | * |
330 | * It is an error to call this function while any resources in the pd still |
331 | * exist. The caller is responsible to synchronously destroy them and |
332 | * guarantee no new allocations will happen. |
333 | */ |
334 | int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) |
335 | { |
336 | int ret; |
337 | |
338 | if (pd->__internal_mr) { |
339 | ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); |
340 | WARN_ON(ret); |
341 | pd->__internal_mr = NULL; |
342 | } |
343 | |
344 | ret = pd->device->ops.dealloc_pd(pd, udata); |
345 | if (ret) |
346 | return ret; |
347 | |
348 | rdma_restrack_del(res: &pd->res); |
349 | kfree(objp: pd); |
350 | return ret; |
351 | } |
352 | EXPORT_SYMBOL(ib_dealloc_pd_user); |
353 | |
354 | /* Address handles */ |
355 | |
356 | /** |
357 | * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination. |
358 | * @dest: Pointer to destination ah_attr. Contents of the destination |
359 | * pointer is assumed to be invalid and attribute are overwritten. |
360 | * @src: Pointer to source ah_attr. |
361 | */ |
362 | void rdma_copy_ah_attr(struct rdma_ah_attr *dest, |
363 | const struct rdma_ah_attr *src) |
364 | { |
365 | *dest = *src; |
366 | if (dest->grh.sgid_attr) |
367 | rdma_hold_gid_attr(attr: dest->grh.sgid_attr); |
368 | } |
369 | EXPORT_SYMBOL(rdma_copy_ah_attr); |
370 | |
371 | /** |
372 | * rdma_replace_ah_attr - Replace valid ah_attr with new one. |
373 | * @old: Pointer to existing ah_attr which needs to be replaced. |
374 | * old is assumed to be valid or zero'd |
375 | * @new: Pointer to the new ah_attr. |
376 | * |
377 | * rdma_replace_ah_attr() first releases any reference in the old ah_attr if |
378 | * old the ah_attr is valid; after that it copies the new attribute and holds |
379 | * the reference to the replaced ah_attr. |
380 | */ |
381 | void rdma_replace_ah_attr(struct rdma_ah_attr *old, |
382 | const struct rdma_ah_attr *new) |
383 | { |
384 | rdma_destroy_ah_attr(ah_attr: old); |
385 | *old = *new; |
386 | if (old->grh.sgid_attr) |
387 | rdma_hold_gid_attr(attr: old->grh.sgid_attr); |
388 | } |
389 | EXPORT_SYMBOL(rdma_replace_ah_attr); |
390 | |
391 | /** |
392 | * rdma_move_ah_attr - Move ah_attr pointed by source to destination. |
393 | * @dest: Pointer to destination ah_attr to copy to. |
394 | * dest is assumed to be valid or zero'd |
395 | * @src: Pointer to the new ah_attr. |
396 | * |
397 | * rdma_move_ah_attr() first releases any reference in the destination ah_attr |
398 | * if it is valid. This also transfers ownership of internal references from |
399 | * src to dest, making src invalid in the process. No new reference of the src |
400 | * ah_attr is taken. |
401 | */ |
402 | void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src) |
403 | { |
404 | rdma_destroy_ah_attr(ah_attr: dest); |
405 | *dest = *src; |
406 | src->grh.sgid_attr = NULL; |
407 | } |
408 | EXPORT_SYMBOL(rdma_move_ah_attr); |
409 | |
410 | /* |
411 | * Validate that the rdma_ah_attr is valid for the device before passing it |
412 | * off to the driver. |
413 | */ |
414 | static int rdma_check_ah_attr(struct ib_device *device, |
415 | struct rdma_ah_attr *ah_attr) |
416 | { |
417 | if (!rdma_is_port_valid(device, port: ah_attr->port_num)) |
418 | return -EINVAL; |
419 | |
420 | if ((rdma_is_grh_required(device, port_num: ah_attr->port_num) || |
421 | ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) && |
422 | !(ah_attr->ah_flags & IB_AH_GRH)) |
423 | return -EINVAL; |
424 | |
425 | if (ah_attr->grh.sgid_attr) { |
426 | /* |
427 | * Make sure the passed sgid_attr is consistent with the |
428 | * parameters |
429 | */ |
430 | if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index || |
431 | ah_attr->grh.sgid_attr->port_num != ah_attr->port_num) |
432 | return -EINVAL; |
433 | } |
434 | return 0; |
435 | } |
436 | |
437 | /* |
438 | * If the ah requires a GRH then ensure that sgid_attr pointer is filled in. |
439 | * On success the caller is responsible to call rdma_unfill_sgid_attr(). |
440 | */ |
441 | static int rdma_fill_sgid_attr(struct ib_device *device, |
442 | struct rdma_ah_attr *ah_attr, |
443 | const struct ib_gid_attr **old_sgid_attr) |
444 | { |
445 | const struct ib_gid_attr *sgid_attr; |
446 | struct ib_global_route *grh; |
447 | int ret; |
448 | |
449 | *old_sgid_attr = ah_attr->grh.sgid_attr; |
450 | |
451 | ret = rdma_check_ah_attr(device, ah_attr); |
452 | if (ret) |
453 | return ret; |
454 | |
455 | if (!(ah_attr->ah_flags & IB_AH_GRH)) |
456 | return 0; |
457 | |
458 | grh = rdma_ah_retrieve_grh(attr: ah_attr); |
459 | if (grh->sgid_attr) |
460 | return 0; |
461 | |
462 | sgid_attr = |
463 | rdma_get_gid_attr(device, port_num: ah_attr->port_num, index: grh->sgid_index); |
464 | if (IS_ERR(ptr: sgid_attr)) |
465 | return PTR_ERR(ptr: sgid_attr); |
466 | |
467 | /* Move ownerhip of the kref into the ah_attr */ |
468 | grh->sgid_attr = sgid_attr; |
469 | return 0; |
470 | } |
471 | |
472 | static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr, |
473 | const struct ib_gid_attr *old_sgid_attr) |
474 | { |
475 | /* |
476 | * Fill didn't change anything, the caller retains ownership of |
477 | * whatever it passed |
478 | */ |
479 | if (ah_attr->grh.sgid_attr == old_sgid_attr) |
480 | return; |
481 | |
482 | /* |
483 | * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller |
484 | * doesn't see any change in the rdma_ah_attr. If we get here |
485 | * old_sgid_attr is NULL. |
486 | */ |
487 | rdma_destroy_ah_attr(ah_attr); |
488 | } |
489 | |
490 | static const struct ib_gid_attr * |
491 | rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr, |
492 | const struct ib_gid_attr *old_attr) |
493 | { |
494 | if (old_attr) |
495 | rdma_put_gid_attr(attr: old_attr); |
496 | if (ah_attr->ah_flags & IB_AH_GRH) { |
497 | rdma_hold_gid_attr(attr: ah_attr->grh.sgid_attr); |
498 | return ah_attr->grh.sgid_attr; |
499 | } |
500 | return NULL; |
501 | } |
502 | |
503 | static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, |
504 | struct rdma_ah_attr *ah_attr, |
505 | u32 flags, |
506 | struct ib_udata *udata, |
507 | struct net_device *xmit_slave) |
508 | { |
509 | struct rdma_ah_init_attr init_attr = {}; |
510 | struct ib_device *device = pd->device; |
511 | struct ib_ah *ah; |
512 | int ret; |
513 | |
514 | might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE); |
515 | |
516 | if (!udata && !device->ops.create_ah) |
517 | return ERR_PTR(error: -EOPNOTSUPP); |
518 | |
519 | ah = rdma_zalloc_drv_obj_gfp( |
520 | device, ib_ah, |
521 | (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC); |
522 | if (!ah) |
523 | return ERR_PTR(error: -ENOMEM); |
524 | |
525 | ah->device = device; |
526 | ah->pd = pd; |
527 | ah->type = ah_attr->type; |
528 | ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); |
529 | init_attr.ah_attr = ah_attr; |
530 | init_attr.flags = flags; |
531 | init_attr.xmit_slave = xmit_slave; |
532 | |
533 | if (udata) |
534 | ret = device->ops.create_user_ah(ah, &init_attr, udata); |
535 | else |
536 | ret = device->ops.create_ah(ah, &init_attr, NULL); |
537 | if (ret) { |
538 | if (ah->sgid_attr) |
539 | rdma_put_gid_attr(attr: ah->sgid_attr); |
540 | kfree(objp: ah); |
541 | return ERR_PTR(error: ret); |
542 | } |
543 | |
544 | atomic_inc(v: &pd->usecnt); |
545 | return ah; |
546 | } |
547 | |
548 | /** |
549 | * rdma_create_ah - Creates an address handle for the |
550 | * given address vector. |
551 | * @pd: The protection domain associated with the address handle. |
552 | * @ah_attr: The attributes of the address vector. |
553 | * @flags: Create address handle flags (see enum rdma_create_ah_flags). |
554 | * |
555 | * It returns 0 on success and returns appropriate error code on error. |
556 | * The address handle is used to reference a local or global destination |
557 | * in all UD QP post sends. |
558 | */ |
559 | struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
560 | u32 flags) |
561 | { |
562 | const struct ib_gid_attr *old_sgid_attr; |
563 | struct net_device *slave; |
564 | struct ib_ah *ah; |
565 | int ret; |
566 | |
567 | ret = rdma_fill_sgid_attr(device: pd->device, ah_attr, old_sgid_attr: &old_sgid_attr); |
568 | if (ret) |
569 | return ERR_PTR(error: ret); |
570 | slave = rdma_lag_get_ah_roce_slave(device: pd->device, ah_attr, |
571 | flags: (flags & RDMA_CREATE_AH_SLEEPABLE) ? |
572 | GFP_KERNEL : GFP_ATOMIC); |
573 | if (IS_ERR(ptr: slave)) { |
574 | rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
575 | return (void *)slave; |
576 | } |
577 | ah = _rdma_create_ah(pd, ah_attr, flags, NULL, xmit_slave: slave); |
578 | rdma_lag_put_ah_roce_slave(xmit_slave: slave); |
579 | rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
580 | return ah; |
581 | } |
582 | EXPORT_SYMBOL(rdma_create_ah); |
583 | |
584 | /** |
585 | * rdma_create_user_ah - Creates an address handle for the |
586 | * given address vector. |
587 | * It resolves destination mac address for ah attribute of RoCE type. |
588 | * @pd: The protection domain associated with the address handle. |
589 | * @ah_attr: The attributes of the address vector. |
590 | * @udata: pointer to user's input output buffer information need by |
591 | * provider driver. |
592 | * |
593 | * It returns 0 on success and returns appropriate error code on error. |
594 | * The address handle is used to reference a local or global destination |
595 | * in all UD QP post sends. |
596 | */ |
597 | struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, |
598 | struct rdma_ah_attr *ah_attr, |
599 | struct ib_udata *udata) |
600 | { |
601 | const struct ib_gid_attr *old_sgid_attr; |
602 | struct ib_ah *ah; |
603 | int err; |
604 | |
605 | err = rdma_fill_sgid_attr(device: pd->device, ah_attr, old_sgid_attr: &old_sgid_attr); |
606 | if (err) |
607 | return ERR_PTR(error: err); |
608 | |
609 | if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { |
610 | err = ib_resolve_eth_dmac(device: pd->device, ah_attr); |
611 | if (err) { |
612 | ah = ERR_PTR(error: err); |
613 | goto out; |
614 | } |
615 | } |
616 | |
617 | ah = _rdma_create_ah(pd, ah_attr, flags: RDMA_CREATE_AH_SLEEPABLE, |
618 | udata, NULL); |
619 | |
620 | out: |
621 | rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
622 | return ah; |
623 | } |
624 | EXPORT_SYMBOL(rdma_create_user_ah); |
625 | |
626 | int (const union rdma_network_hdr *hdr) |
627 | { |
628 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; |
629 | struct iphdr ip4h_checked; |
630 | const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; |
631 | |
632 | /* If it's IPv6, the version must be 6, otherwise, the first |
633 | * 20 bytes (before the IPv4 header) are garbled. |
634 | */ |
635 | if (ip6h->version != 6) |
636 | return (ip4h->version == 4) ? 4 : 0; |
637 | /* version may be 6 or 4 because the first 20 bytes could be garbled */ |
638 | |
639 | /* RoCE v2 requires no options, thus header length |
640 | * must be 5 words |
641 | */ |
642 | if (ip4h->ihl != 5) |
643 | return 6; |
644 | |
645 | /* Verify checksum. |
646 | * We can't write on scattered buffers so we need to copy to |
647 | * temp buffer. |
648 | */ |
649 | memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); |
650 | ip4h_checked.check = 0; |
651 | ip4h_checked.check = ip_fast_csum(iph: (u8 *)&ip4h_checked, ihl: 5); |
652 | /* if IPv4 header checksum is OK, believe it */ |
653 | if (ip4h->check == ip4h_checked.check) |
654 | return 4; |
655 | return 6; |
656 | } |
657 | EXPORT_SYMBOL(ib_get_rdma_header_version); |
658 | |
659 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, |
660 | u32 port_num, |
661 | const struct ib_grh *grh) |
662 | { |
663 | int grh_version; |
664 | |
665 | if (rdma_protocol_ib(device, port_num)) |
666 | return RDMA_NETWORK_IB; |
667 | |
668 | grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); |
669 | |
670 | if (grh_version == 4) |
671 | return RDMA_NETWORK_IPV4; |
672 | |
673 | if (grh->next_hdr == IPPROTO_UDP) |
674 | return RDMA_NETWORK_IPV6; |
675 | |
676 | return RDMA_NETWORK_ROCE_V1; |
677 | } |
678 | |
679 | struct find_gid_index_context { |
680 | u16 vlan_id; |
681 | enum ib_gid_type gid_type; |
682 | }; |
683 | |
684 | static bool find_gid_index(const union ib_gid *gid, |
685 | const struct ib_gid_attr *gid_attr, |
686 | void *context) |
687 | { |
688 | struct find_gid_index_context *ctx = context; |
689 | u16 vlan_id = 0xffff; |
690 | int ret; |
691 | |
692 | if (ctx->gid_type != gid_attr->gid_type) |
693 | return false; |
694 | |
695 | ret = rdma_read_gid_l2_fields(attr: gid_attr, vlan_id: &vlan_id, NULL); |
696 | if (ret) |
697 | return false; |
698 | |
699 | return ctx->vlan_id == vlan_id; |
700 | } |
701 | |
702 | static const struct ib_gid_attr * |
703 | get_sgid_attr_from_eth(struct ib_device *device, u32 port_num, |
704 | u16 vlan_id, const union ib_gid *sgid, |
705 | enum ib_gid_type gid_type) |
706 | { |
707 | struct find_gid_index_context context = {.vlan_id = vlan_id, |
708 | .gid_type = gid_type}; |
709 | |
710 | return rdma_find_gid_by_filter(device, gid: sgid, port_num, filter: find_gid_index, |
711 | context: &context); |
712 | } |
713 | |
714 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
715 | enum rdma_network_type net_type, |
716 | union ib_gid *sgid, union ib_gid *dgid) |
717 | { |
718 | struct sockaddr_in src_in; |
719 | struct sockaddr_in dst_in; |
720 | __be32 src_saddr, dst_saddr; |
721 | |
722 | if (!sgid || !dgid) |
723 | return -EINVAL; |
724 | |
725 | if (net_type == RDMA_NETWORK_IPV4) { |
726 | memcpy(&src_in.sin_addr.s_addr, |
727 | &hdr->roce4grh.saddr, 4); |
728 | memcpy(&dst_in.sin_addr.s_addr, |
729 | &hdr->roce4grh.daddr, 4); |
730 | src_saddr = src_in.sin_addr.s_addr; |
731 | dst_saddr = dst_in.sin_addr.s_addr; |
732 | ipv6_addr_set_v4mapped(addr: src_saddr, |
733 | v4mapped: (struct in6_addr *)sgid); |
734 | ipv6_addr_set_v4mapped(addr: dst_saddr, |
735 | v4mapped: (struct in6_addr *)dgid); |
736 | return 0; |
737 | } else if (net_type == RDMA_NETWORK_IPV6 || |
738 | net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) { |
739 | *dgid = hdr->ibgrh.dgid; |
740 | *sgid = hdr->ibgrh.sgid; |
741 | return 0; |
742 | } else { |
743 | return -EINVAL; |
744 | } |
745 | } |
746 | EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); |
747 | |
748 | /* Resolve destination mac address and hop limit for unicast destination |
749 | * GID entry, considering the source GID entry as well. |
750 | * ah_attribute must have valid port_num, sgid_index. |
751 | */ |
752 | static int ib_resolve_unicast_gid_dmac(struct ib_device *device, |
753 | struct rdma_ah_attr *ah_attr) |
754 | { |
755 | struct ib_global_route *grh = rdma_ah_retrieve_grh(attr: ah_attr); |
756 | const struct ib_gid_attr *sgid_attr = grh->sgid_attr; |
757 | int hop_limit = 0xff; |
758 | int ret = 0; |
759 | |
760 | /* If destination is link local and source GID is RoCEv1, |
761 | * IP stack is not used. |
762 | */ |
763 | if (rdma_link_local_addr(addr: (struct in6_addr *)grh->dgid.raw) && |
764 | sgid_attr->gid_type == IB_GID_TYPE_ROCE) { |
765 | rdma_get_ll_mac(addr: (struct in6_addr *)grh->dgid.raw, |
766 | mac: ah_attr->roce.dmac); |
767 | return ret; |
768 | } |
769 | |
770 | ret = rdma_addr_find_l2_eth_by_grh(sgid: &sgid_attr->gid, dgid: &grh->dgid, |
771 | dmac: ah_attr->roce.dmac, |
772 | sgid_attr, hoplimit: &hop_limit); |
773 | |
774 | grh->hop_limit = hop_limit; |
775 | return ret; |
776 | } |
777 | |
778 | /* |
779 | * This function initializes address handle attributes from the incoming packet. |
780 | * Incoming packet has dgid of the receiver node on which this code is |
781 | * getting executed and, sgid contains the GID of the sender. |
782 | * |
783 | * When resolving mac address of destination, the arrived dgid is used |
784 | * as sgid and, sgid is used as dgid because sgid contains destinations |
785 | * GID whom to respond to. |
786 | * |
787 | * On success the caller is responsible to call rdma_destroy_ah_attr on the |
788 | * attr. |
789 | */ |
790 | int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, |
791 | const struct ib_wc *wc, const struct ib_grh *grh, |
792 | struct rdma_ah_attr *ah_attr) |
793 | { |
794 | u32 flow_class; |
795 | int ret; |
796 | enum rdma_network_type net_type = RDMA_NETWORK_IB; |
797 | enum ib_gid_type gid_type = IB_GID_TYPE_IB; |
798 | const struct ib_gid_attr *sgid_attr; |
799 | int hoplimit = 0xff; |
800 | union ib_gid dgid; |
801 | union ib_gid sgid; |
802 | |
803 | might_sleep(); |
804 | |
805 | memset(ah_attr, 0, sizeof *ah_attr); |
806 | ah_attr->type = rdma_ah_find_type(dev: device, port_num); |
807 | if (rdma_cap_eth_ah(device, port_num)) { |
808 | if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) |
809 | net_type = wc->network_hdr_type; |
810 | else |
811 | net_type = ib_get_net_type_by_grh(device, port_num, grh); |
812 | gid_type = ib_network_to_gid_type(network_type: net_type); |
813 | } |
814 | ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, |
815 | &sgid, &dgid); |
816 | if (ret) |
817 | return ret; |
818 | |
819 | rdma_ah_set_sl(attr: ah_attr, sl: wc->sl); |
820 | rdma_ah_set_port_num(attr: ah_attr, port_num); |
821 | |
822 | if (rdma_protocol_roce(device, port_num)) { |
823 | u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? |
824 | wc->vlan_id : 0xffff; |
825 | |
826 | if (!(wc->wc_flags & IB_WC_GRH)) |
827 | return -EPROTOTYPE; |
828 | |
829 | sgid_attr = get_sgid_attr_from_eth(device, port_num, |
830 | vlan_id, sgid: &dgid, |
831 | gid_type); |
832 | if (IS_ERR(ptr: sgid_attr)) |
833 | return PTR_ERR(ptr: sgid_attr); |
834 | |
835 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
836 | rdma_move_grh_sgid_attr(attr: ah_attr, |
837 | dgid: &sgid, |
838 | flow_label: flow_class & 0xFFFFF, |
839 | hop_limit: hoplimit, |
840 | traffic_class: (flow_class >> 20) & 0xFF, |
841 | sgid_attr); |
842 | |
843 | ret = ib_resolve_unicast_gid_dmac(device, ah_attr); |
844 | if (ret) |
845 | rdma_destroy_ah_attr(ah_attr); |
846 | |
847 | return ret; |
848 | } else { |
849 | rdma_ah_set_dlid(attr: ah_attr, dlid: wc->slid); |
850 | rdma_ah_set_path_bits(attr: ah_attr, src_path_bits: wc->dlid_path_bits); |
851 | |
852 | if ((wc->wc_flags & IB_WC_GRH) == 0) |
853 | return 0; |
854 | |
855 | if (dgid.global.interface_id != |
856 | cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
857 | sgid_attr = rdma_find_gid_by_port( |
858 | ib_dev: device, gid: &dgid, gid_type: IB_GID_TYPE_IB, port: port_num, NULL); |
859 | } else |
860 | sgid_attr = rdma_get_gid_attr(device, port_num, index: 0); |
861 | |
862 | if (IS_ERR(ptr: sgid_attr)) |
863 | return PTR_ERR(ptr: sgid_attr); |
864 | flow_class = be32_to_cpu(grh->version_tclass_flow); |
865 | rdma_move_grh_sgid_attr(attr: ah_attr, |
866 | dgid: &sgid, |
867 | flow_label: flow_class & 0xFFFFF, |
868 | hop_limit: hoplimit, |
869 | traffic_class: (flow_class >> 20) & 0xFF, |
870 | sgid_attr); |
871 | |
872 | return 0; |
873 | } |
874 | } |
875 | EXPORT_SYMBOL(ib_init_ah_attr_from_wc); |
876 | |
877 | /** |
878 | * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership |
879 | * of the reference |
880 | * |
881 | * @attr: Pointer to AH attribute structure |
882 | * @dgid: Destination GID |
883 | * @flow_label: Flow label |
884 | * @hop_limit: Hop limit |
885 | * @traffic_class: traffic class |
886 | * @sgid_attr: Pointer to SGID attribute |
887 | * |
888 | * This takes ownership of the sgid_attr reference. The caller must ensure |
889 | * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after |
890 | * calling this function. |
891 | */ |
892 | void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, |
893 | u32 flow_label, u8 hop_limit, u8 traffic_class, |
894 | const struct ib_gid_attr *sgid_attr) |
895 | { |
896 | rdma_ah_set_grh(attr, dgid, flow_label, sgid_index: sgid_attr->index, hop_limit, |
897 | traffic_class); |
898 | attr->grh.sgid_attr = sgid_attr; |
899 | } |
900 | EXPORT_SYMBOL(rdma_move_grh_sgid_attr); |
901 | |
902 | /** |
903 | * rdma_destroy_ah_attr - Release reference to SGID attribute of |
904 | * ah attribute. |
905 | * @ah_attr: Pointer to ah attribute |
906 | * |
907 | * Release reference to the SGID attribute of the ah attribute if it is |
908 | * non NULL. It is safe to call this multiple times, and safe to call it on |
909 | * a zero initialized ah_attr. |
910 | */ |
911 | void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr) |
912 | { |
913 | if (ah_attr->grh.sgid_attr) { |
914 | rdma_put_gid_attr(attr: ah_attr->grh.sgid_attr); |
915 | ah_attr->grh.sgid_attr = NULL; |
916 | } |
917 | } |
918 | EXPORT_SYMBOL(rdma_destroy_ah_attr); |
919 | |
920 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
921 | const struct ib_grh *grh, u32 port_num) |
922 | { |
923 | struct rdma_ah_attr ah_attr; |
924 | struct ib_ah *ah; |
925 | int ret; |
926 | |
927 | ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); |
928 | if (ret) |
929 | return ERR_PTR(error: ret); |
930 | |
931 | ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE); |
932 | |
933 | rdma_destroy_ah_attr(&ah_attr); |
934 | return ah; |
935 | } |
936 | EXPORT_SYMBOL(ib_create_ah_from_wc); |
937 | |
938 | int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
939 | { |
940 | const struct ib_gid_attr *old_sgid_attr; |
941 | int ret; |
942 | |
943 | if (ah->type != ah_attr->type) |
944 | return -EINVAL; |
945 | |
946 | ret = rdma_fill_sgid_attr(device: ah->device, ah_attr, old_sgid_attr: &old_sgid_attr); |
947 | if (ret) |
948 | return ret; |
949 | |
950 | ret = ah->device->ops.modify_ah ? |
951 | ah->device->ops.modify_ah(ah, ah_attr) : |
952 | -EOPNOTSUPP; |
953 | |
954 | ah->sgid_attr = rdma_update_sgid_attr(ah_attr, old_attr: ah->sgid_attr); |
955 | rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
956 | return ret; |
957 | } |
958 | EXPORT_SYMBOL(rdma_modify_ah); |
959 | |
960 | int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) |
961 | { |
962 | ah_attr->grh.sgid_attr = NULL; |
963 | |
964 | return ah->device->ops.query_ah ? |
965 | ah->device->ops.query_ah(ah, ah_attr) : |
966 | -EOPNOTSUPP; |
967 | } |
968 | EXPORT_SYMBOL(rdma_query_ah); |
969 | |
970 | int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
971 | { |
972 | const struct ib_gid_attr *sgid_attr = ah->sgid_attr; |
973 | struct ib_pd *pd; |
974 | int ret; |
975 | |
976 | might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); |
977 | |
978 | pd = ah->pd; |
979 | |
980 | ret = ah->device->ops.destroy_ah(ah, flags); |
981 | if (ret) |
982 | return ret; |
983 | |
984 | atomic_dec(v: &pd->usecnt); |
985 | if (sgid_attr) |
986 | rdma_put_gid_attr(attr: sgid_attr); |
987 | |
988 | kfree(objp: ah); |
989 | return ret; |
990 | } |
991 | EXPORT_SYMBOL(rdma_destroy_ah_user); |
992 | |
993 | /* Shared receive queues */ |
994 | |
995 | /** |
996 | * ib_create_srq_user - Creates a SRQ associated with the specified protection |
997 | * domain. |
998 | * @pd: The protection domain associated with the SRQ. |
999 | * @srq_init_attr: A list of initial attributes required to create the |
1000 | * SRQ. If SRQ creation succeeds, then the attributes are updated to |
1001 | * the actual capabilities of the created SRQ. |
1002 | * @uobject: uobject pointer if this is not a kernel SRQ |
1003 | * @udata: udata pointer if this is not a kernel SRQ |
1004 | * |
1005 | * srq_attr->max_wr and srq_attr->max_sge are read the determine the |
1006 | * requested size of the SRQ, and set to the actual values allocated |
1007 | * on return. If ib_create_srq() succeeds, then max_wr and max_sge |
1008 | * will always be at least as large as the requested values. |
1009 | */ |
1010 | struct ib_srq *ib_create_srq_user(struct ib_pd *pd, |
1011 | struct ib_srq_init_attr *srq_init_attr, |
1012 | struct ib_usrq_object *uobject, |
1013 | struct ib_udata *udata) |
1014 | { |
1015 | struct ib_srq *srq; |
1016 | int ret; |
1017 | |
1018 | srq = rdma_zalloc_drv_obj(pd->device, ib_srq); |
1019 | if (!srq) |
1020 | return ERR_PTR(error: -ENOMEM); |
1021 | |
1022 | srq->device = pd->device; |
1023 | srq->pd = pd; |
1024 | srq->event_handler = srq_init_attr->event_handler; |
1025 | srq->srq_context = srq_init_attr->srq_context; |
1026 | srq->srq_type = srq_init_attr->srq_type; |
1027 | srq->uobject = uobject; |
1028 | |
1029 | if (ib_srq_has_cq(srq_type: srq->srq_type)) { |
1030 | srq->ext.cq = srq_init_attr->ext.cq; |
1031 | atomic_inc(v: &srq->ext.cq->usecnt); |
1032 | } |
1033 | if (srq->srq_type == IB_SRQT_XRC) { |
1034 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; |
1035 | if (srq->ext.xrc.xrcd) |
1036 | atomic_inc(v: &srq->ext.xrc.xrcd->usecnt); |
1037 | } |
1038 | atomic_inc(v: &pd->usecnt); |
1039 | |
1040 | rdma_restrack_new(res: &srq->res, type: RDMA_RESTRACK_SRQ); |
1041 | rdma_restrack_parent_name(dst: &srq->res, parent: &pd->res); |
1042 | |
1043 | ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); |
1044 | if (ret) { |
1045 | rdma_restrack_put(res: &srq->res); |
1046 | atomic_dec(v: &pd->usecnt); |
1047 | if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) |
1048 | atomic_dec(v: &srq->ext.xrc.xrcd->usecnt); |
1049 | if (ib_srq_has_cq(srq_type: srq->srq_type)) |
1050 | atomic_dec(v: &srq->ext.cq->usecnt); |
1051 | kfree(objp: srq); |
1052 | return ERR_PTR(error: ret); |
1053 | } |
1054 | |
1055 | rdma_restrack_add(res: &srq->res); |
1056 | |
1057 | return srq; |
1058 | } |
1059 | EXPORT_SYMBOL(ib_create_srq_user); |
1060 | |
1061 | int ib_modify_srq(struct ib_srq *srq, |
1062 | struct ib_srq_attr *srq_attr, |
1063 | enum ib_srq_attr_mask srq_attr_mask) |
1064 | { |
1065 | return srq->device->ops.modify_srq ? |
1066 | srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, |
1067 | NULL) : -EOPNOTSUPP; |
1068 | } |
1069 | EXPORT_SYMBOL(ib_modify_srq); |
1070 | |
1071 | int ib_query_srq(struct ib_srq *srq, |
1072 | struct ib_srq_attr *srq_attr) |
1073 | { |
1074 | return srq->device->ops.query_srq ? |
1075 | srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; |
1076 | } |
1077 | EXPORT_SYMBOL(ib_query_srq); |
1078 | |
1079 | int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) |
1080 | { |
1081 | int ret; |
1082 | |
1083 | if (atomic_read(v: &srq->usecnt)) |
1084 | return -EBUSY; |
1085 | |
1086 | ret = srq->device->ops.destroy_srq(srq, udata); |
1087 | if (ret) |
1088 | return ret; |
1089 | |
1090 | atomic_dec(v: &srq->pd->usecnt); |
1091 | if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) |
1092 | atomic_dec(v: &srq->ext.xrc.xrcd->usecnt); |
1093 | if (ib_srq_has_cq(srq_type: srq->srq_type)) |
1094 | atomic_dec(v: &srq->ext.cq->usecnt); |
1095 | rdma_restrack_del(res: &srq->res); |
1096 | kfree(objp: srq); |
1097 | |
1098 | return ret; |
1099 | } |
1100 | EXPORT_SYMBOL(ib_destroy_srq_user); |
1101 | |
1102 | /* Queue pairs */ |
1103 | |
1104 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) |
1105 | { |
1106 | struct ib_qp *qp = context; |
1107 | unsigned long flags; |
1108 | |
1109 | spin_lock_irqsave(&qp->device->qp_open_list_lock, flags); |
1110 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
1111 | if (event->element.qp->event_handler) |
1112 | event->element.qp->event_handler(event, event->element.qp->qp_context); |
1113 | spin_unlock_irqrestore(lock: &qp->device->qp_open_list_lock, flags); |
1114 | } |
1115 | |
1116 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
1117 | void (*event_handler)(struct ib_event *, void *), |
1118 | void *qp_context) |
1119 | { |
1120 | struct ib_qp *qp; |
1121 | unsigned long flags; |
1122 | int err; |
1123 | |
1124 | qp = kzalloc(size: sizeof *qp, GFP_KERNEL); |
1125 | if (!qp) |
1126 | return ERR_PTR(error: -ENOMEM); |
1127 | |
1128 | qp->real_qp = real_qp; |
1129 | err = ib_open_shared_qp_security(qp, dev: real_qp->device); |
1130 | if (err) { |
1131 | kfree(objp: qp); |
1132 | return ERR_PTR(error: err); |
1133 | } |
1134 | |
1135 | qp->real_qp = real_qp; |
1136 | atomic_inc(v: &real_qp->usecnt); |
1137 | qp->device = real_qp->device; |
1138 | qp->event_handler = event_handler; |
1139 | qp->qp_context = qp_context; |
1140 | qp->qp_num = real_qp->qp_num; |
1141 | qp->qp_type = real_qp->qp_type; |
1142 | |
1143 | spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); |
1144 | list_add(new: &qp->open_list, head: &real_qp->open_list); |
1145 | spin_unlock_irqrestore(lock: &real_qp->device->qp_open_list_lock, flags); |
1146 | |
1147 | return qp; |
1148 | } |
1149 | |
1150 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
1151 | struct ib_qp_open_attr *qp_open_attr) |
1152 | { |
1153 | struct ib_qp *qp, *real_qp; |
1154 | |
1155 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) |
1156 | return ERR_PTR(error: -EINVAL); |
1157 | |
1158 | down_read(sem: &xrcd->tgt_qps_rwsem); |
1159 | real_qp = xa_load(&xrcd->tgt_qps, index: qp_open_attr->qp_num); |
1160 | if (!real_qp) { |
1161 | up_read(sem: &xrcd->tgt_qps_rwsem); |
1162 | return ERR_PTR(error: -EINVAL); |
1163 | } |
1164 | qp = __ib_open_qp(real_qp, event_handler: qp_open_attr->event_handler, |
1165 | qp_context: qp_open_attr->qp_context); |
1166 | up_read(sem: &xrcd->tgt_qps_rwsem); |
1167 | return qp; |
1168 | } |
1169 | EXPORT_SYMBOL(ib_open_qp); |
1170 | |
1171 | static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, |
1172 | struct ib_qp_init_attr *qp_init_attr) |
1173 | { |
1174 | struct ib_qp *real_qp = qp; |
1175 | int err; |
1176 | |
1177 | qp->event_handler = __ib_shared_qp_event_handler; |
1178 | qp->qp_context = qp; |
1179 | qp->pd = NULL; |
1180 | qp->send_cq = qp->recv_cq = NULL; |
1181 | qp->srq = NULL; |
1182 | qp->xrcd = qp_init_attr->xrcd; |
1183 | atomic_inc(v: &qp_init_attr->xrcd->usecnt); |
1184 | INIT_LIST_HEAD(list: &qp->open_list); |
1185 | |
1186 | qp = __ib_open_qp(real_qp, event_handler: qp_init_attr->event_handler, |
1187 | qp_context: qp_init_attr->qp_context); |
1188 | if (IS_ERR(ptr: qp)) |
1189 | return qp; |
1190 | |
1191 | err = xa_err(entry: xa_store(&qp_init_attr->xrcd->tgt_qps, index: real_qp->qp_num, |
1192 | entry: real_qp, GFP_KERNEL)); |
1193 | if (err) { |
1194 | ib_close_qp(qp); |
1195 | return ERR_PTR(error: err); |
1196 | } |
1197 | return qp; |
1198 | } |
1199 | |
1200 | static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd, |
1201 | struct ib_qp_init_attr *attr, |
1202 | struct ib_udata *udata, |
1203 | struct ib_uqp_object *uobj, const char *caller) |
1204 | { |
1205 | struct ib_udata dummy = {}; |
1206 | struct ib_qp *qp; |
1207 | int ret; |
1208 | |
1209 | if (!dev->ops.create_qp) |
1210 | return ERR_PTR(error: -EOPNOTSUPP); |
1211 | |
1212 | qp = rdma_zalloc_drv_obj_numa(dev, ib_qp); |
1213 | if (!qp) |
1214 | return ERR_PTR(error: -ENOMEM); |
1215 | |
1216 | qp->device = dev; |
1217 | qp->pd = pd; |
1218 | qp->uobject = uobj; |
1219 | qp->real_qp = qp; |
1220 | |
1221 | qp->qp_type = attr->qp_type; |
1222 | qp->rwq_ind_tbl = attr->rwq_ind_tbl; |
1223 | qp->srq = attr->srq; |
1224 | qp->event_handler = attr->event_handler; |
1225 | qp->port = attr->port_num; |
1226 | qp->qp_context = attr->qp_context; |
1227 | |
1228 | spin_lock_init(&qp->mr_lock); |
1229 | INIT_LIST_HEAD(list: &qp->rdma_mrs); |
1230 | INIT_LIST_HEAD(list: &qp->sig_mrs); |
1231 | |
1232 | qp->send_cq = attr->send_cq; |
1233 | qp->recv_cq = attr->recv_cq; |
1234 | |
1235 | rdma_restrack_new(res: &qp->res, type: RDMA_RESTRACK_QP); |
1236 | WARN_ONCE(!udata && !caller, "Missing kernel QP owner" ); |
1237 | rdma_restrack_set_name(res: &qp->res, caller: udata ? NULL : caller); |
1238 | ret = dev->ops.create_qp(qp, attr, udata); |
1239 | if (ret) |
1240 | goto err_create; |
1241 | |
1242 | /* |
1243 | * TODO: The mlx4 internally overwrites send_cq and recv_cq. |
1244 | * Unfortunately, it is not an easy task to fix that driver. |
1245 | */ |
1246 | qp->send_cq = attr->send_cq; |
1247 | qp->recv_cq = attr->recv_cq; |
1248 | |
1249 | ret = ib_create_qp_security(qp, dev); |
1250 | if (ret) |
1251 | goto err_security; |
1252 | |
1253 | rdma_restrack_add(res: &qp->res); |
1254 | return qp; |
1255 | |
1256 | err_security: |
1257 | qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL); |
1258 | err_create: |
1259 | rdma_restrack_put(res: &qp->res); |
1260 | kfree(objp: qp); |
1261 | return ERR_PTR(error: ret); |
1262 | |
1263 | } |
1264 | |
1265 | /** |
1266 | * ib_create_qp_user - Creates a QP associated with the specified protection |
1267 | * domain. |
1268 | * @dev: IB device |
1269 | * @pd: The protection domain associated with the QP. |
1270 | * @attr: A list of initial attributes required to create the |
1271 | * QP. If QP creation succeeds, then the attributes are updated to |
1272 | * the actual capabilities of the created QP. |
1273 | * @udata: User data |
1274 | * @uobj: uverbs obect |
1275 | * @caller: caller's build-time module name |
1276 | */ |
1277 | struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd, |
1278 | struct ib_qp_init_attr *attr, |
1279 | struct ib_udata *udata, |
1280 | struct ib_uqp_object *uobj, const char *caller) |
1281 | { |
1282 | struct ib_qp *qp, *xrc_qp; |
1283 | |
1284 | if (attr->qp_type == IB_QPT_XRC_TGT) |
1285 | qp = create_qp(dev, pd, attr, NULL, NULL, caller); |
1286 | else |
1287 | qp = create_qp(dev, pd, attr, udata, uobj, NULL); |
1288 | if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(ptr: qp)) |
1289 | return qp; |
1290 | |
1291 | xrc_qp = create_xrc_qp_user(qp, qp_init_attr: attr); |
1292 | if (IS_ERR(ptr: xrc_qp)) { |
1293 | ib_destroy_qp(qp); |
1294 | return xrc_qp; |
1295 | } |
1296 | |
1297 | xrc_qp->uobject = uobj; |
1298 | return xrc_qp; |
1299 | } |
1300 | EXPORT_SYMBOL(ib_create_qp_user); |
1301 | |
1302 | void ib_qp_usecnt_inc(struct ib_qp *qp) |
1303 | { |
1304 | if (qp->pd) |
1305 | atomic_inc(v: &qp->pd->usecnt); |
1306 | if (qp->send_cq) |
1307 | atomic_inc(v: &qp->send_cq->usecnt); |
1308 | if (qp->recv_cq) |
1309 | atomic_inc(v: &qp->recv_cq->usecnt); |
1310 | if (qp->srq) |
1311 | atomic_inc(v: &qp->srq->usecnt); |
1312 | if (qp->rwq_ind_tbl) |
1313 | atomic_inc(v: &qp->rwq_ind_tbl->usecnt); |
1314 | } |
1315 | EXPORT_SYMBOL(ib_qp_usecnt_inc); |
1316 | |
1317 | void ib_qp_usecnt_dec(struct ib_qp *qp) |
1318 | { |
1319 | if (qp->rwq_ind_tbl) |
1320 | atomic_dec(v: &qp->rwq_ind_tbl->usecnt); |
1321 | if (qp->srq) |
1322 | atomic_dec(v: &qp->srq->usecnt); |
1323 | if (qp->recv_cq) |
1324 | atomic_dec(v: &qp->recv_cq->usecnt); |
1325 | if (qp->send_cq) |
1326 | atomic_dec(v: &qp->send_cq->usecnt); |
1327 | if (qp->pd) |
1328 | atomic_dec(v: &qp->pd->usecnt); |
1329 | } |
1330 | EXPORT_SYMBOL(ib_qp_usecnt_dec); |
1331 | |
1332 | struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd, |
1333 | struct ib_qp_init_attr *qp_init_attr, |
1334 | const char *caller) |
1335 | { |
1336 | struct ib_device *device = pd->device; |
1337 | struct ib_qp *qp; |
1338 | int ret; |
1339 | |
1340 | /* |
1341 | * If the callers is using the RDMA API calculate the resources |
1342 | * needed for the RDMA READ/WRITE operations. |
1343 | * |
1344 | * Note that these callers need to pass in a port number. |
1345 | */ |
1346 | if (qp_init_attr->cap.max_rdma_ctxs) |
1347 | rdma_rw_init_qp(dev: device, attr: qp_init_attr); |
1348 | |
1349 | qp = create_qp(dev: device, pd, attr: qp_init_attr, NULL, NULL, caller); |
1350 | if (IS_ERR(ptr: qp)) |
1351 | return qp; |
1352 | |
1353 | ib_qp_usecnt_inc(qp); |
1354 | |
1355 | if (qp_init_attr->cap.max_rdma_ctxs) { |
1356 | ret = rdma_rw_init_mrs(qp, attr: qp_init_attr); |
1357 | if (ret) |
1358 | goto err; |
1359 | } |
1360 | |
1361 | /* |
1362 | * Note: all hw drivers guarantee that max_send_sge is lower than |
1363 | * the device RDMA WRITE SGE limit but not all hw drivers ensure that |
1364 | * max_send_sge <= max_sge_rd. |
1365 | */ |
1366 | qp->max_write_sge = qp_init_attr->cap.max_send_sge; |
1367 | qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, |
1368 | device->attrs.max_sge_rd); |
1369 | if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) |
1370 | qp->integrity_en = true; |
1371 | |
1372 | return qp; |
1373 | |
1374 | err: |
1375 | ib_destroy_qp(qp); |
1376 | return ERR_PTR(error: ret); |
1377 | |
1378 | } |
1379 | EXPORT_SYMBOL(ib_create_qp_kernel); |
1380 | |
1381 | static const struct { |
1382 | int valid; |
1383 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; |
1384 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; |
1385 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
1386 | [IB_QPS_RESET] = { |
1387 | [IB_QPS_RESET] = { .valid = 1 }, |
1388 | [IB_QPS_INIT] = { |
1389 | .valid = 1, |
1390 | .req_param = { |
1391 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
1392 | IB_QP_PORT | |
1393 | IB_QP_QKEY), |
1394 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, |
1395 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
1396 | IB_QP_PORT | |
1397 | IB_QP_ACCESS_FLAGS), |
1398 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
1399 | IB_QP_PORT | |
1400 | IB_QP_ACCESS_FLAGS), |
1401 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
1402 | IB_QP_PORT | |
1403 | IB_QP_ACCESS_FLAGS), |
1404 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
1405 | IB_QP_PORT | |
1406 | IB_QP_ACCESS_FLAGS), |
1407 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1408 | IB_QP_QKEY), |
1409 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
1410 | IB_QP_QKEY), |
1411 | } |
1412 | }, |
1413 | }, |
1414 | [IB_QPS_INIT] = { |
1415 | [IB_QPS_RESET] = { .valid = 1 }, |
1416 | [IB_QPS_ERR] = { .valid = 1 }, |
1417 | [IB_QPS_INIT] = { |
1418 | .valid = 1, |
1419 | .opt_param = { |
1420 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
1421 | IB_QP_PORT | |
1422 | IB_QP_QKEY), |
1423 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
1424 | IB_QP_PORT | |
1425 | IB_QP_ACCESS_FLAGS), |
1426 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
1427 | IB_QP_PORT | |
1428 | IB_QP_ACCESS_FLAGS), |
1429 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | |
1430 | IB_QP_PORT | |
1431 | IB_QP_ACCESS_FLAGS), |
1432 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | |
1433 | IB_QP_PORT | |
1434 | IB_QP_ACCESS_FLAGS), |
1435 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1436 | IB_QP_QKEY), |
1437 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
1438 | IB_QP_QKEY), |
1439 | } |
1440 | }, |
1441 | [IB_QPS_RTR] = { |
1442 | .valid = 1, |
1443 | .req_param = { |
1444 | [IB_QPT_UC] = (IB_QP_AV | |
1445 | IB_QP_PATH_MTU | |
1446 | IB_QP_DEST_QPN | |
1447 | IB_QP_RQ_PSN), |
1448 | [IB_QPT_RC] = (IB_QP_AV | |
1449 | IB_QP_PATH_MTU | |
1450 | IB_QP_DEST_QPN | |
1451 | IB_QP_RQ_PSN | |
1452 | IB_QP_MAX_DEST_RD_ATOMIC | |
1453 | IB_QP_MIN_RNR_TIMER), |
1454 | [IB_QPT_XRC_INI] = (IB_QP_AV | |
1455 | IB_QP_PATH_MTU | |
1456 | IB_QP_DEST_QPN | |
1457 | IB_QP_RQ_PSN), |
1458 | [IB_QPT_XRC_TGT] = (IB_QP_AV | |
1459 | IB_QP_PATH_MTU | |
1460 | IB_QP_DEST_QPN | |
1461 | IB_QP_RQ_PSN | |
1462 | IB_QP_MAX_DEST_RD_ATOMIC | |
1463 | IB_QP_MIN_RNR_TIMER), |
1464 | }, |
1465 | .opt_param = { |
1466 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
1467 | IB_QP_QKEY), |
1468 | [IB_QPT_UC] = (IB_QP_ALT_PATH | |
1469 | IB_QP_ACCESS_FLAGS | |
1470 | IB_QP_PKEY_INDEX), |
1471 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
1472 | IB_QP_ACCESS_FLAGS | |
1473 | IB_QP_PKEY_INDEX), |
1474 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | |
1475 | IB_QP_ACCESS_FLAGS | |
1476 | IB_QP_PKEY_INDEX), |
1477 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | |
1478 | IB_QP_ACCESS_FLAGS | |
1479 | IB_QP_PKEY_INDEX), |
1480 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1481 | IB_QP_QKEY), |
1482 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
1483 | IB_QP_QKEY), |
1484 | }, |
1485 | }, |
1486 | }, |
1487 | [IB_QPS_RTR] = { |
1488 | [IB_QPS_RESET] = { .valid = 1 }, |
1489 | [IB_QPS_ERR] = { .valid = 1 }, |
1490 | [IB_QPS_RTS] = { |
1491 | .valid = 1, |
1492 | .req_param = { |
1493 | [IB_QPT_UD] = IB_QP_SQ_PSN, |
1494 | [IB_QPT_UC] = IB_QP_SQ_PSN, |
1495 | [IB_QPT_RC] = (IB_QP_TIMEOUT | |
1496 | IB_QP_RETRY_CNT | |
1497 | IB_QP_RNR_RETRY | |
1498 | IB_QP_SQ_PSN | |
1499 | IB_QP_MAX_QP_RD_ATOMIC), |
1500 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | |
1501 | IB_QP_RETRY_CNT | |
1502 | IB_QP_RNR_RETRY | |
1503 | IB_QP_SQ_PSN | |
1504 | IB_QP_MAX_QP_RD_ATOMIC), |
1505 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | |
1506 | IB_QP_SQ_PSN), |
1507 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
1508 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
1509 | }, |
1510 | .opt_param = { |
1511 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
1512 | IB_QP_QKEY), |
1513 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
1514 | IB_QP_ALT_PATH | |
1515 | IB_QP_ACCESS_FLAGS | |
1516 | IB_QP_PATH_MIG_STATE), |
1517 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
1518 | IB_QP_ALT_PATH | |
1519 | IB_QP_ACCESS_FLAGS | |
1520 | IB_QP_MIN_RNR_TIMER | |
1521 | IB_QP_PATH_MIG_STATE), |
1522 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
1523 | IB_QP_ALT_PATH | |
1524 | IB_QP_ACCESS_FLAGS | |
1525 | IB_QP_PATH_MIG_STATE), |
1526 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
1527 | IB_QP_ALT_PATH | |
1528 | IB_QP_ACCESS_FLAGS | |
1529 | IB_QP_MIN_RNR_TIMER | |
1530 | IB_QP_PATH_MIG_STATE), |
1531 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1532 | IB_QP_QKEY), |
1533 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
1534 | IB_QP_QKEY), |
1535 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
1536 | } |
1537 | } |
1538 | }, |
1539 | [IB_QPS_RTS] = { |
1540 | [IB_QPS_RESET] = { .valid = 1 }, |
1541 | [IB_QPS_ERR] = { .valid = 1 }, |
1542 | [IB_QPS_RTS] = { |
1543 | .valid = 1, |
1544 | .opt_param = { |
1545 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
1546 | IB_QP_QKEY), |
1547 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
1548 | IB_QP_ACCESS_FLAGS | |
1549 | IB_QP_ALT_PATH | |
1550 | IB_QP_PATH_MIG_STATE), |
1551 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
1552 | IB_QP_ACCESS_FLAGS | |
1553 | IB_QP_ALT_PATH | |
1554 | IB_QP_PATH_MIG_STATE | |
1555 | IB_QP_MIN_RNR_TIMER), |
1556 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
1557 | IB_QP_ACCESS_FLAGS | |
1558 | IB_QP_ALT_PATH | |
1559 | IB_QP_PATH_MIG_STATE), |
1560 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
1561 | IB_QP_ACCESS_FLAGS | |
1562 | IB_QP_ALT_PATH | |
1563 | IB_QP_PATH_MIG_STATE | |
1564 | IB_QP_MIN_RNR_TIMER), |
1565 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1566 | IB_QP_QKEY), |
1567 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
1568 | IB_QP_QKEY), |
1569 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, |
1570 | } |
1571 | }, |
1572 | [IB_QPS_SQD] = { |
1573 | .valid = 1, |
1574 | .opt_param = { |
1575 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1576 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1577 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1578 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1579 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ |
1580 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
1581 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
1582 | } |
1583 | }, |
1584 | }, |
1585 | [IB_QPS_SQD] = { |
1586 | [IB_QPS_RESET] = { .valid = 1 }, |
1587 | [IB_QPS_ERR] = { .valid = 1 }, |
1588 | [IB_QPS_RTS] = { |
1589 | .valid = 1, |
1590 | .opt_param = { |
1591 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
1592 | IB_QP_QKEY), |
1593 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
1594 | IB_QP_ALT_PATH | |
1595 | IB_QP_ACCESS_FLAGS | |
1596 | IB_QP_PATH_MIG_STATE), |
1597 | [IB_QPT_RC] = (IB_QP_CUR_STATE | |
1598 | IB_QP_ALT_PATH | |
1599 | IB_QP_ACCESS_FLAGS | |
1600 | IB_QP_MIN_RNR_TIMER | |
1601 | IB_QP_PATH_MIG_STATE), |
1602 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | |
1603 | IB_QP_ALT_PATH | |
1604 | IB_QP_ACCESS_FLAGS | |
1605 | IB_QP_PATH_MIG_STATE), |
1606 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | |
1607 | IB_QP_ALT_PATH | |
1608 | IB_QP_ACCESS_FLAGS | |
1609 | IB_QP_MIN_RNR_TIMER | |
1610 | IB_QP_PATH_MIG_STATE), |
1611 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1612 | IB_QP_QKEY), |
1613 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
1614 | IB_QP_QKEY), |
1615 | } |
1616 | }, |
1617 | [IB_QPS_SQD] = { |
1618 | .valid = 1, |
1619 | .opt_param = { |
1620 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
1621 | IB_QP_QKEY), |
1622 | [IB_QPT_UC] = (IB_QP_AV | |
1623 | IB_QP_ALT_PATH | |
1624 | IB_QP_ACCESS_FLAGS | |
1625 | IB_QP_PKEY_INDEX | |
1626 | IB_QP_PATH_MIG_STATE), |
1627 | [IB_QPT_RC] = (IB_QP_PORT | |
1628 | IB_QP_AV | |
1629 | IB_QP_TIMEOUT | |
1630 | IB_QP_RETRY_CNT | |
1631 | IB_QP_RNR_RETRY | |
1632 | IB_QP_MAX_QP_RD_ATOMIC | |
1633 | IB_QP_MAX_DEST_RD_ATOMIC | |
1634 | IB_QP_ALT_PATH | |
1635 | IB_QP_ACCESS_FLAGS | |
1636 | IB_QP_PKEY_INDEX | |
1637 | IB_QP_MIN_RNR_TIMER | |
1638 | IB_QP_PATH_MIG_STATE), |
1639 | [IB_QPT_XRC_INI] = (IB_QP_PORT | |
1640 | IB_QP_AV | |
1641 | IB_QP_TIMEOUT | |
1642 | IB_QP_RETRY_CNT | |
1643 | IB_QP_RNR_RETRY | |
1644 | IB_QP_MAX_QP_RD_ATOMIC | |
1645 | IB_QP_ALT_PATH | |
1646 | IB_QP_ACCESS_FLAGS | |
1647 | IB_QP_PKEY_INDEX | |
1648 | IB_QP_PATH_MIG_STATE), |
1649 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | |
1650 | IB_QP_AV | |
1651 | IB_QP_TIMEOUT | |
1652 | IB_QP_MAX_DEST_RD_ATOMIC | |
1653 | IB_QP_ALT_PATH | |
1654 | IB_QP_ACCESS_FLAGS | |
1655 | IB_QP_PKEY_INDEX | |
1656 | IB_QP_MIN_RNR_TIMER | |
1657 | IB_QP_PATH_MIG_STATE), |
1658 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
1659 | IB_QP_QKEY), |
1660 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
1661 | IB_QP_QKEY), |
1662 | } |
1663 | } |
1664 | }, |
1665 | [IB_QPS_SQE] = { |
1666 | [IB_QPS_RESET] = { .valid = 1 }, |
1667 | [IB_QPS_ERR] = { .valid = 1 }, |
1668 | [IB_QPS_RTS] = { |
1669 | .valid = 1, |
1670 | .opt_param = { |
1671 | [IB_QPT_UD] = (IB_QP_CUR_STATE | |
1672 | IB_QP_QKEY), |
1673 | [IB_QPT_UC] = (IB_QP_CUR_STATE | |
1674 | IB_QP_ACCESS_FLAGS), |
1675 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
1676 | IB_QP_QKEY), |
1677 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
1678 | IB_QP_QKEY), |
1679 | } |
1680 | } |
1681 | }, |
1682 | [IB_QPS_ERR] = { |
1683 | [IB_QPS_RESET] = { .valid = 1 }, |
1684 | [IB_QPS_ERR] = { .valid = 1 } |
1685 | } |
1686 | }; |
1687 | |
1688 | bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
1689 | enum ib_qp_type type, enum ib_qp_attr_mask mask) |
1690 | { |
1691 | enum ib_qp_attr_mask req_param, opt_param; |
1692 | |
1693 | if (mask & IB_QP_CUR_STATE && |
1694 | cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && |
1695 | cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) |
1696 | return false; |
1697 | |
1698 | if (!qp_state_table[cur_state][next_state].valid) |
1699 | return false; |
1700 | |
1701 | req_param = qp_state_table[cur_state][next_state].req_param[type]; |
1702 | opt_param = qp_state_table[cur_state][next_state].opt_param[type]; |
1703 | |
1704 | if ((mask & req_param) != req_param) |
1705 | return false; |
1706 | |
1707 | if (mask & ~(req_param | opt_param | IB_QP_STATE)) |
1708 | return false; |
1709 | |
1710 | return true; |
1711 | } |
1712 | EXPORT_SYMBOL(ib_modify_qp_is_ok); |
1713 | |
1714 | /** |
1715 | * ib_resolve_eth_dmac - Resolve destination mac address |
1716 | * @device: Device to consider |
1717 | * @ah_attr: address handle attribute which describes the |
1718 | * source and destination parameters |
1719 | * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It |
1720 | * returns 0 on success or appropriate error code. It initializes the |
1721 | * necessary ah_attr fields when call is successful. |
1722 | */ |
1723 | static int ib_resolve_eth_dmac(struct ib_device *device, |
1724 | struct rdma_ah_attr *ah_attr) |
1725 | { |
1726 | int ret = 0; |
1727 | |
1728 | if (rdma_is_multicast_addr(addr: (struct in6_addr *)ah_attr->grh.dgid.raw)) { |
1729 | if (ipv6_addr_v4mapped(a: (struct in6_addr *)ah_attr->grh.dgid.raw)) { |
1730 | __be32 addr = 0; |
1731 | |
1732 | memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); |
1733 | ip_eth_mc_map(naddr: addr, buf: (char *)ah_attr->roce.dmac); |
1734 | } else { |
1735 | ipv6_eth_mc_map(addr: (struct in6_addr *)ah_attr->grh.dgid.raw, |
1736 | buf: (char *)ah_attr->roce.dmac); |
1737 | } |
1738 | } else { |
1739 | ret = ib_resolve_unicast_gid_dmac(device, ah_attr); |
1740 | } |
1741 | return ret; |
1742 | } |
1743 | |
1744 | static bool is_qp_type_connected(const struct ib_qp *qp) |
1745 | { |
1746 | return (qp->qp_type == IB_QPT_UC || |
1747 | qp->qp_type == IB_QPT_RC || |
1748 | qp->qp_type == IB_QPT_XRC_INI || |
1749 | qp->qp_type == IB_QPT_XRC_TGT); |
1750 | } |
1751 | |
1752 | /* |
1753 | * IB core internal function to perform QP attributes modification. |
1754 | */ |
1755 | static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
1756 | int attr_mask, struct ib_udata *udata) |
1757 | { |
1758 | u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
1759 | const struct ib_gid_attr *old_sgid_attr_av; |
1760 | const struct ib_gid_attr *old_sgid_attr_alt_av; |
1761 | int ret; |
1762 | |
1763 | attr->xmit_slave = NULL; |
1764 | if (attr_mask & IB_QP_AV) { |
1765 | ret = rdma_fill_sgid_attr(device: qp->device, ah_attr: &attr->ah_attr, |
1766 | old_sgid_attr: &old_sgid_attr_av); |
1767 | if (ret) |
1768 | return ret; |
1769 | |
1770 | if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && |
1771 | is_qp_type_connected(qp)) { |
1772 | struct net_device *slave; |
1773 | |
1774 | /* |
1775 | * If the user provided the qp_attr then we have to |
1776 | * resolve it. Kerne users have to provide already |
1777 | * resolved rdma_ah_attr's. |
1778 | */ |
1779 | if (udata) { |
1780 | ret = ib_resolve_eth_dmac(device: qp->device, |
1781 | ah_attr: &attr->ah_attr); |
1782 | if (ret) |
1783 | goto out_av; |
1784 | } |
1785 | slave = rdma_lag_get_ah_roce_slave(device: qp->device, |
1786 | ah_attr: &attr->ah_attr, |
1787 | GFP_KERNEL); |
1788 | if (IS_ERR(ptr: slave)) { |
1789 | ret = PTR_ERR(ptr: slave); |
1790 | goto out_av; |
1791 | } |
1792 | attr->xmit_slave = slave; |
1793 | } |
1794 | } |
1795 | if (attr_mask & IB_QP_ALT_PATH) { |
1796 | /* |
1797 | * FIXME: This does not track the migration state, so if the |
1798 | * user loads a new alternate path after the HW has migrated |
1799 | * from primary->alternate we will keep the wrong |
1800 | * references. This is OK for IB because the reference |
1801 | * counting does not serve any functional purpose. |
1802 | */ |
1803 | ret = rdma_fill_sgid_attr(device: qp->device, ah_attr: &attr->alt_ah_attr, |
1804 | old_sgid_attr: &old_sgid_attr_alt_av); |
1805 | if (ret) |
1806 | goto out_av; |
1807 | |
1808 | /* |
1809 | * Today the core code can only handle alternate paths and APM |
1810 | * for IB. Ban them in roce mode. |
1811 | */ |
1812 | if (!(rdma_protocol_ib(device: qp->device, |
1813 | port_num: attr->alt_ah_attr.port_num) && |
1814 | rdma_protocol_ib(device: qp->device, port_num: port))) { |
1815 | ret = -EINVAL; |
1816 | goto out; |
1817 | } |
1818 | } |
1819 | |
1820 | if (rdma_ib_or_roce(device: qp->device, port_num: port)) { |
1821 | if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { |
1822 | dev_warn(&qp->device->dev, |
1823 | "%s rq_psn overflow, masking to 24 bits\n" , |
1824 | __func__); |
1825 | attr->rq_psn &= 0xffffff; |
1826 | } |
1827 | |
1828 | if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { |
1829 | dev_warn(&qp->device->dev, |
1830 | " %s sq_psn overflow, masking to 24 bits\n" , |
1831 | __func__); |
1832 | attr->sq_psn &= 0xffffff; |
1833 | } |
1834 | } |
1835 | |
1836 | /* |
1837 | * Bind this qp to a counter automatically based on the rdma counter |
1838 | * rules. This only set in RST2INIT with port specified |
1839 | */ |
1840 | if (!qp->counter && (attr_mask & IB_QP_PORT) && |
1841 | ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT)) |
1842 | rdma_counter_bind_qp_auto(qp, port: attr->port_num); |
1843 | |
1844 | ret = ib_security_modify_qp(qp, qp_attr: attr, qp_attr_mask: attr_mask, udata); |
1845 | if (ret) |
1846 | goto out; |
1847 | |
1848 | if (attr_mask & IB_QP_PORT) |
1849 | qp->port = attr->port_num; |
1850 | if (attr_mask & IB_QP_AV) |
1851 | qp->av_sgid_attr = |
1852 | rdma_update_sgid_attr(ah_attr: &attr->ah_attr, old_attr: qp->av_sgid_attr); |
1853 | if (attr_mask & IB_QP_ALT_PATH) |
1854 | qp->alt_path_sgid_attr = rdma_update_sgid_attr( |
1855 | ah_attr: &attr->alt_ah_attr, old_attr: qp->alt_path_sgid_attr); |
1856 | |
1857 | out: |
1858 | if (attr_mask & IB_QP_ALT_PATH) |
1859 | rdma_unfill_sgid_attr(ah_attr: &attr->alt_ah_attr, old_sgid_attr: old_sgid_attr_alt_av); |
1860 | out_av: |
1861 | if (attr_mask & IB_QP_AV) { |
1862 | rdma_lag_put_ah_roce_slave(xmit_slave: attr->xmit_slave); |
1863 | rdma_unfill_sgid_attr(ah_attr: &attr->ah_attr, old_sgid_attr: old_sgid_attr_av); |
1864 | } |
1865 | return ret; |
1866 | } |
1867 | |
1868 | /** |
1869 | * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. |
1870 | * @ib_qp: The QP to modify. |
1871 | * @attr: On input, specifies the QP attributes to modify. On output, |
1872 | * the current values of selected QP attributes are returned. |
1873 | * @attr_mask: A bit-mask used to specify which attributes of the QP |
1874 | * are being modified. |
1875 | * @udata: pointer to user's input output buffer information |
1876 | * are being modified. |
1877 | * It returns 0 on success and returns appropriate error code on error. |
1878 | */ |
1879 | int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, |
1880 | int attr_mask, struct ib_udata *udata) |
1881 | { |
1882 | return _ib_modify_qp(qp: ib_qp->real_qp, attr, attr_mask, udata); |
1883 | } |
1884 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
1885 | |
1886 | static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes, |
1887 | u16 *speed, u8 *width) |
1888 | { |
1889 | if (!lanes) { |
1890 | if (netdev_speed <= SPEED_1000) { |
1891 | *width = IB_WIDTH_1X; |
1892 | *speed = IB_SPEED_SDR; |
1893 | } else if (netdev_speed <= SPEED_10000) { |
1894 | *width = IB_WIDTH_1X; |
1895 | *speed = IB_SPEED_FDR10; |
1896 | } else if (netdev_speed <= SPEED_20000) { |
1897 | *width = IB_WIDTH_4X; |
1898 | *speed = IB_SPEED_DDR; |
1899 | } else if (netdev_speed <= SPEED_25000) { |
1900 | *width = IB_WIDTH_1X; |
1901 | *speed = IB_SPEED_EDR; |
1902 | } else if (netdev_speed <= SPEED_40000) { |
1903 | *width = IB_WIDTH_4X; |
1904 | *speed = IB_SPEED_FDR10; |
1905 | } else if (netdev_speed <= SPEED_50000) { |
1906 | *width = IB_WIDTH_2X; |
1907 | *speed = IB_SPEED_EDR; |
1908 | } else if (netdev_speed <= SPEED_100000) { |
1909 | *width = IB_WIDTH_4X; |
1910 | *speed = IB_SPEED_EDR; |
1911 | } else if (netdev_speed <= SPEED_200000) { |
1912 | *width = IB_WIDTH_4X; |
1913 | *speed = IB_SPEED_HDR; |
1914 | } else { |
1915 | *width = IB_WIDTH_4X; |
1916 | *speed = IB_SPEED_NDR; |
1917 | } |
1918 | |
1919 | return; |
1920 | } |
1921 | |
1922 | switch (lanes) { |
1923 | case 1: |
1924 | *width = IB_WIDTH_1X; |
1925 | break; |
1926 | case 2: |
1927 | *width = IB_WIDTH_2X; |
1928 | break; |
1929 | case 4: |
1930 | *width = IB_WIDTH_4X; |
1931 | break; |
1932 | case 8: |
1933 | *width = IB_WIDTH_8X; |
1934 | break; |
1935 | case 12: |
1936 | *width = IB_WIDTH_12X; |
1937 | break; |
1938 | default: |
1939 | *width = IB_WIDTH_1X; |
1940 | } |
1941 | |
1942 | switch (netdev_speed / lanes) { |
1943 | case SPEED_2500: |
1944 | *speed = IB_SPEED_SDR; |
1945 | break; |
1946 | case SPEED_5000: |
1947 | *speed = IB_SPEED_DDR; |
1948 | break; |
1949 | case SPEED_10000: |
1950 | *speed = IB_SPEED_FDR10; |
1951 | break; |
1952 | case SPEED_14000: |
1953 | *speed = IB_SPEED_FDR; |
1954 | break; |
1955 | case SPEED_25000: |
1956 | *speed = IB_SPEED_EDR; |
1957 | break; |
1958 | case SPEED_50000: |
1959 | *speed = IB_SPEED_HDR; |
1960 | break; |
1961 | case SPEED_100000: |
1962 | *speed = IB_SPEED_NDR; |
1963 | break; |
1964 | default: |
1965 | *speed = IB_SPEED_SDR; |
1966 | } |
1967 | } |
1968 | |
1969 | int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width) |
1970 | { |
1971 | int rc; |
1972 | u32 netdev_speed; |
1973 | struct net_device *netdev; |
1974 | struct ethtool_link_ksettings lksettings = {}; |
1975 | |
1976 | if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) |
1977 | return -EINVAL; |
1978 | |
1979 | netdev = ib_device_get_netdev(ib_dev: dev, port: port_num); |
1980 | if (!netdev) |
1981 | return -ENODEV; |
1982 | |
1983 | rtnl_lock(); |
1984 | rc = __ethtool_get_link_ksettings(dev: netdev, link_ksettings: &lksettings); |
1985 | rtnl_unlock(); |
1986 | |
1987 | dev_put(dev: netdev); |
1988 | |
1989 | if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { |
1990 | netdev_speed = lksettings.base.speed; |
1991 | } else { |
1992 | netdev_speed = SPEED_1000; |
1993 | if (rc) |
1994 | pr_warn("%s speed is unknown, defaulting to %u\n" , |
1995 | netdev->name, netdev_speed); |
1996 | } |
1997 | |
1998 | ib_get_width_and_speed(netdev_speed, lanes: lksettings.lanes, |
1999 | speed, width); |
2000 | |
2001 | return 0; |
2002 | } |
2003 | EXPORT_SYMBOL(ib_get_eth_speed); |
2004 | |
2005 | int ib_modify_qp(struct ib_qp *qp, |
2006 | struct ib_qp_attr *qp_attr, |
2007 | int qp_attr_mask) |
2008 | { |
2009 | return _ib_modify_qp(qp: qp->real_qp, attr: qp_attr, attr_mask: qp_attr_mask, NULL); |
2010 | } |
2011 | EXPORT_SYMBOL(ib_modify_qp); |
2012 | |
2013 | int ib_query_qp(struct ib_qp *qp, |
2014 | struct ib_qp_attr *qp_attr, |
2015 | int qp_attr_mask, |
2016 | struct ib_qp_init_attr *qp_init_attr) |
2017 | { |
2018 | qp_attr->ah_attr.grh.sgid_attr = NULL; |
2019 | qp_attr->alt_ah_attr.grh.sgid_attr = NULL; |
2020 | |
2021 | return qp->device->ops.query_qp ? |
2022 | qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, |
2023 | qp_init_attr) : -EOPNOTSUPP; |
2024 | } |
2025 | EXPORT_SYMBOL(ib_query_qp); |
2026 | |
2027 | int ib_close_qp(struct ib_qp *qp) |
2028 | { |
2029 | struct ib_qp *real_qp; |
2030 | unsigned long flags; |
2031 | |
2032 | real_qp = qp->real_qp; |
2033 | if (real_qp == qp) |
2034 | return -EINVAL; |
2035 | |
2036 | spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); |
2037 | list_del(entry: &qp->open_list); |
2038 | spin_unlock_irqrestore(lock: &real_qp->device->qp_open_list_lock, flags); |
2039 | |
2040 | atomic_dec(v: &real_qp->usecnt); |
2041 | if (qp->qp_sec) |
2042 | ib_close_shared_qp_security(sec: qp->qp_sec); |
2043 | kfree(objp: qp); |
2044 | |
2045 | return 0; |
2046 | } |
2047 | EXPORT_SYMBOL(ib_close_qp); |
2048 | |
2049 | static int __ib_destroy_shared_qp(struct ib_qp *qp) |
2050 | { |
2051 | struct ib_xrcd *xrcd; |
2052 | struct ib_qp *real_qp; |
2053 | int ret; |
2054 | |
2055 | real_qp = qp->real_qp; |
2056 | xrcd = real_qp->xrcd; |
2057 | down_write(sem: &xrcd->tgt_qps_rwsem); |
2058 | ib_close_qp(qp); |
2059 | if (atomic_read(v: &real_qp->usecnt) == 0) |
2060 | xa_erase(&xrcd->tgt_qps, index: real_qp->qp_num); |
2061 | else |
2062 | real_qp = NULL; |
2063 | up_write(sem: &xrcd->tgt_qps_rwsem); |
2064 | |
2065 | if (real_qp) { |
2066 | ret = ib_destroy_qp(qp: real_qp); |
2067 | if (!ret) |
2068 | atomic_dec(v: &xrcd->usecnt); |
2069 | } |
2070 | |
2071 | return 0; |
2072 | } |
2073 | |
2074 | int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata) |
2075 | { |
2076 | const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; |
2077 | const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; |
2078 | struct ib_qp_security *sec; |
2079 | int ret; |
2080 | |
2081 | WARN_ON_ONCE(qp->mrs_used > 0); |
2082 | |
2083 | if (atomic_read(v: &qp->usecnt)) |
2084 | return -EBUSY; |
2085 | |
2086 | if (qp->real_qp != qp) |
2087 | return __ib_destroy_shared_qp(qp); |
2088 | |
2089 | sec = qp->qp_sec; |
2090 | if (sec) |
2091 | ib_destroy_qp_security_begin(sec); |
2092 | |
2093 | if (!qp->uobject) |
2094 | rdma_rw_cleanup_mrs(qp); |
2095 | |
2096 | rdma_counter_unbind_qp(qp, force: true); |
2097 | ret = qp->device->ops.destroy_qp(qp, udata); |
2098 | if (ret) { |
2099 | if (sec) |
2100 | ib_destroy_qp_security_abort(sec); |
2101 | return ret; |
2102 | } |
2103 | |
2104 | if (alt_path_sgid_attr) |
2105 | rdma_put_gid_attr(attr: alt_path_sgid_attr); |
2106 | if (av_sgid_attr) |
2107 | rdma_put_gid_attr(attr: av_sgid_attr); |
2108 | |
2109 | ib_qp_usecnt_dec(qp); |
2110 | if (sec) |
2111 | ib_destroy_qp_security_end(sec); |
2112 | |
2113 | rdma_restrack_del(res: &qp->res); |
2114 | kfree(objp: qp); |
2115 | return ret; |
2116 | } |
2117 | EXPORT_SYMBOL(ib_destroy_qp_user); |
2118 | |
2119 | /* Completion queues */ |
2120 | |
2121 | struct ib_cq *__ib_create_cq(struct ib_device *device, |
2122 | ib_comp_handler comp_handler, |
2123 | void (*event_handler)(struct ib_event *, void *), |
2124 | void *cq_context, |
2125 | const struct ib_cq_init_attr *cq_attr, |
2126 | const char *caller) |
2127 | { |
2128 | struct ib_cq *cq; |
2129 | int ret; |
2130 | |
2131 | cq = rdma_zalloc_drv_obj(device, ib_cq); |
2132 | if (!cq) |
2133 | return ERR_PTR(error: -ENOMEM); |
2134 | |
2135 | cq->device = device; |
2136 | cq->uobject = NULL; |
2137 | cq->comp_handler = comp_handler; |
2138 | cq->event_handler = event_handler; |
2139 | cq->cq_context = cq_context; |
2140 | atomic_set(v: &cq->usecnt, i: 0); |
2141 | |
2142 | rdma_restrack_new(res: &cq->res, type: RDMA_RESTRACK_CQ); |
2143 | rdma_restrack_set_name(res: &cq->res, caller); |
2144 | |
2145 | ret = device->ops.create_cq(cq, cq_attr, NULL); |
2146 | if (ret) { |
2147 | rdma_restrack_put(res: &cq->res); |
2148 | kfree(objp: cq); |
2149 | return ERR_PTR(error: ret); |
2150 | } |
2151 | |
2152 | rdma_restrack_add(res: &cq->res); |
2153 | return cq; |
2154 | } |
2155 | EXPORT_SYMBOL(__ib_create_cq); |
2156 | |
2157 | int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
2158 | { |
2159 | if (cq->shared) |
2160 | return -EOPNOTSUPP; |
2161 | |
2162 | return cq->device->ops.modify_cq ? |
2163 | cq->device->ops.modify_cq(cq, cq_count, |
2164 | cq_period) : -EOPNOTSUPP; |
2165 | } |
2166 | EXPORT_SYMBOL(rdma_set_cq_moderation); |
2167 | |
2168 | int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) |
2169 | { |
2170 | int ret; |
2171 | |
2172 | if (WARN_ON_ONCE(cq->shared)) |
2173 | return -EOPNOTSUPP; |
2174 | |
2175 | if (atomic_read(v: &cq->usecnt)) |
2176 | return -EBUSY; |
2177 | |
2178 | ret = cq->device->ops.destroy_cq(cq, udata); |
2179 | if (ret) |
2180 | return ret; |
2181 | |
2182 | rdma_restrack_del(res: &cq->res); |
2183 | kfree(objp: cq); |
2184 | return ret; |
2185 | } |
2186 | EXPORT_SYMBOL(ib_destroy_cq_user); |
2187 | |
2188 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
2189 | { |
2190 | if (cq->shared) |
2191 | return -EOPNOTSUPP; |
2192 | |
2193 | return cq->device->ops.resize_cq ? |
2194 | cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; |
2195 | } |
2196 | EXPORT_SYMBOL(ib_resize_cq); |
2197 | |
2198 | /* Memory regions */ |
2199 | |
2200 | struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
2201 | u64 virt_addr, int access_flags) |
2202 | { |
2203 | struct ib_mr *mr; |
2204 | |
2205 | if (access_flags & IB_ACCESS_ON_DEMAND) { |
2206 | if (!(pd->device->attrs.kernel_cap_flags & |
2207 | IBK_ON_DEMAND_PAGING)) { |
2208 | pr_debug("ODP support not available\n" ); |
2209 | return ERR_PTR(error: -EINVAL); |
2210 | } |
2211 | } |
2212 | |
2213 | mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr, |
2214 | access_flags, NULL); |
2215 | |
2216 | if (IS_ERR(ptr: mr)) |
2217 | return mr; |
2218 | |
2219 | mr->device = pd->device; |
2220 | mr->type = IB_MR_TYPE_USER; |
2221 | mr->pd = pd; |
2222 | mr->dm = NULL; |
2223 | atomic_inc(v: &pd->usecnt); |
2224 | mr->iova = virt_addr; |
2225 | mr->length = length; |
2226 | |
2227 | rdma_restrack_new(res: &mr->res, type: RDMA_RESTRACK_MR); |
2228 | rdma_restrack_parent_name(dst: &mr->res, parent: &pd->res); |
2229 | rdma_restrack_add(res: &mr->res); |
2230 | |
2231 | return mr; |
2232 | } |
2233 | EXPORT_SYMBOL(ib_reg_user_mr); |
2234 | |
2235 | int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, |
2236 | u32 flags, struct ib_sge *sg_list, u32 num_sge) |
2237 | { |
2238 | if (!pd->device->ops.advise_mr) |
2239 | return -EOPNOTSUPP; |
2240 | |
2241 | if (!num_sge) |
2242 | return 0; |
2243 | |
2244 | return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge, |
2245 | NULL); |
2246 | } |
2247 | EXPORT_SYMBOL(ib_advise_mr); |
2248 | |
2249 | int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata) |
2250 | { |
2251 | struct ib_pd *pd = mr->pd; |
2252 | struct ib_dm *dm = mr->dm; |
2253 | struct ib_sig_attrs *sig_attrs = mr->sig_attrs; |
2254 | int ret; |
2255 | |
2256 | trace_mr_dereg(mr); |
2257 | rdma_restrack_del(res: &mr->res); |
2258 | ret = mr->device->ops.dereg_mr(mr, udata); |
2259 | if (!ret) { |
2260 | atomic_dec(v: &pd->usecnt); |
2261 | if (dm) |
2262 | atomic_dec(v: &dm->usecnt); |
2263 | kfree(objp: sig_attrs); |
2264 | } |
2265 | |
2266 | return ret; |
2267 | } |
2268 | EXPORT_SYMBOL(ib_dereg_mr_user); |
2269 | |
2270 | /** |
2271 | * ib_alloc_mr() - Allocates a memory region |
2272 | * @pd: protection domain associated with the region |
2273 | * @mr_type: memory region type |
2274 | * @max_num_sg: maximum sg entries available for registration. |
2275 | * |
2276 | * Notes: |
2277 | * Memory registeration page/sg lists must not exceed max_num_sg. |
2278 | * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed |
2279 | * max_num_sg * used_page_size. |
2280 | * |
2281 | */ |
2282 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
2283 | u32 max_num_sg) |
2284 | { |
2285 | struct ib_mr *mr; |
2286 | |
2287 | if (!pd->device->ops.alloc_mr) { |
2288 | mr = ERR_PTR(error: -EOPNOTSUPP); |
2289 | goto out; |
2290 | } |
2291 | |
2292 | if (mr_type == IB_MR_TYPE_INTEGRITY) { |
2293 | WARN_ON_ONCE(1); |
2294 | mr = ERR_PTR(error: -EINVAL); |
2295 | goto out; |
2296 | } |
2297 | |
2298 | mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); |
2299 | if (IS_ERR(ptr: mr)) |
2300 | goto out; |
2301 | |
2302 | mr->device = pd->device; |
2303 | mr->pd = pd; |
2304 | mr->dm = NULL; |
2305 | mr->uobject = NULL; |
2306 | atomic_inc(v: &pd->usecnt); |
2307 | mr->need_inval = false; |
2308 | mr->type = mr_type; |
2309 | mr->sig_attrs = NULL; |
2310 | |
2311 | rdma_restrack_new(res: &mr->res, type: RDMA_RESTRACK_MR); |
2312 | rdma_restrack_parent_name(dst: &mr->res, parent: &pd->res); |
2313 | rdma_restrack_add(res: &mr->res); |
2314 | out: |
2315 | trace_mr_alloc(pd, mr_type, max_num_sg, mr); |
2316 | return mr; |
2317 | } |
2318 | EXPORT_SYMBOL(ib_alloc_mr); |
2319 | |
2320 | /** |
2321 | * ib_alloc_mr_integrity() - Allocates an integrity memory region |
2322 | * @pd: protection domain associated with the region |
2323 | * @max_num_data_sg: maximum data sg entries available for registration |
2324 | * @max_num_meta_sg: maximum metadata sg entries available for |
2325 | * registration |
2326 | * |
2327 | * Notes: |
2328 | * Memory registration page/sg lists must not exceed max_num_sg, |
2329 | * also the integrity page/sg lists must not exceed max_num_meta_sg. |
2330 | * |
2331 | */ |
2332 | struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, |
2333 | u32 max_num_data_sg, |
2334 | u32 max_num_meta_sg) |
2335 | { |
2336 | struct ib_mr *mr; |
2337 | struct ib_sig_attrs *sig_attrs; |
2338 | |
2339 | if (!pd->device->ops.alloc_mr_integrity || |
2340 | !pd->device->ops.map_mr_sg_pi) { |
2341 | mr = ERR_PTR(error: -EOPNOTSUPP); |
2342 | goto out; |
2343 | } |
2344 | |
2345 | if (!max_num_meta_sg) { |
2346 | mr = ERR_PTR(error: -EINVAL); |
2347 | goto out; |
2348 | } |
2349 | |
2350 | sig_attrs = kzalloc(size: sizeof(struct ib_sig_attrs), GFP_KERNEL); |
2351 | if (!sig_attrs) { |
2352 | mr = ERR_PTR(error: -ENOMEM); |
2353 | goto out; |
2354 | } |
2355 | |
2356 | mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg, |
2357 | max_num_meta_sg); |
2358 | if (IS_ERR(ptr: mr)) { |
2359 | kfree(objp: sig_attrs); |
2360 | goto out; |
2361 | } |
2362 | |
2363 | mr->device = pd->device; |
2364 | mr->pd = pd; |
2365 | mr->dm = NULL; |
2366 | mr->uobject = NULL; |
2367 | atomic_inc(v: &pd->usecnt); |
2368 | mr->need_inval = false; |
2369 | mr->type = IB_MR_TYPE_INTEGRITY; |
2370 | mr->sig_attrs = sig_attrs; |
2371 | |
2372 | rdma_restrack_new(res: &mr->res, type: RDMA_RESTRACK_MR); |
2373 | rdma_restrack_parent_name(dst: &mr->res, parent: &pd->res); |
2374 | rdma_restrack_add(res: &mr->res); |
2375 | out: |
2376 | trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr); |
2377 | return mr; |
2378 | } |
2379 | EXPORT_SYMBOL(ib_alloc_mr_integrity); |
2380 | |
2381 | /* Multicast groups */ |
2382 | |
2383 | static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) |
2384 | { |
2385 | struct ib_qp_init_attr init_attr = {}; |
2386 | struct ib_qp_attr attr = {}; |
2387 | int num_eth_ports = 0; |
2388 | unsigned int port; |
2389 | |
2390 | /* If QP state >= init, it is assigned to a port and we can check this |
2391 | * port only. |
2392 | */ |
2393 | if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { |
2394 | if (attr.qp_state >= IB_QPS_INIT) { |
2395 | if (rdma_port_get_link_layer(qp->device, attr.port_num) != |
2396 | IB_LINK_LAYER_INFINIBAND) |
2397 | return true; |
2398 | goto lid_check; |
2399 | } |
2400 | } |
2401 | |
2402 | /* Can't get a quick answer, iterate over all ports */ |
2403 | rdma_for_each_port(qp->device, port) |
2404 | if (rdma_port_get_link_layer(qp->device, port) != |
2405 | IB_LINK_LAYER_INFINIBAND) |
2406 | num_eth_ports++; |
2407 | |
2408 | /* If we have at lease one Ethernet port, RoCE annex declares that |
2409 | * multicast LID should be ignored. We can't tell at this step if the |
2410 | * QP belongs to an IB or Ethernet port. |
2411 | */ |
2412 | if (num_eth_ports) |
2413 | return true; |
2414 | |
2415 | /* If all the ports are IB, we can check according to IB spec. */ |
2416 | lid_check: |
2417 | return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || |
2418 | lid == be16_to_cpu(IB_LID_PERMISSIVE)); |
2419 | } |
2420 | |
2421 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
2422 | { |
2423 | int ret; |
2424 | |
2425 | if (!qp->device->ops.attach_mcast) |
2426 | return -EOPNOTSUPP; |
2427 | |
2428 | if (!rdma_is_multicast_addr(addr: (struct in6_addr *)gid->raw) || |
2429 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
2430 | return -EINVAL; |
2431 | |
2432 | ret = qp->device->ops.attach_mcast(qp, gid, lid); |
2433 | if (!ret) |
2434 | atomic_inc(v: &qp->usecnt); |
2435 | return ret; |
2436 | } |
2437 | EXPORT_SYMBOL(ib_attach_mcast); |
2438 | |
2439 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
2440 | { |
2441 | int ret; |
2442 | |
2443 | if (!qp->device->ops.detach_mcast) |
2444 | return -EOPNOTSUPP; |
2445 | |
2446 | if (!rdma_is_multicast_addr(addr: (struct in6_addr *)gid->raw) || |
2447 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
2448 | return -EINVAL; |
2449 | |
2450 | ret = qp->device->ops.detach_mcast(qp, gid, lid); |
2451 | if (!ret) |
2452 | atomic_dec(v: &qp->usecnt); |
2453 | return ret; |
2454 | } |
2455 | EXPORT_SYMBOL(ib_detach_mcast); |
2456 | |
2457 | /** |
2458 | * ib_alloc_xrcd_user - Allocates an XRC domain. |
2459 | * @device: The device on which to allocate the XRC domain. |
2460 | * @inode: inode to connect XRCD |
2461 | * @udata: Valid user data or NULL for kernel object |
2462 | */ |
2463 | struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, |
2464 | struct inode *inode, struct ib_udata *udata) |
2465 | { |
2466 | struct ib_xrcd *xrcd; |
2467 | int ret; |
2468 | |
2469 | if (!device->ops.alloc_xrcd) |
2470 | return ERR_PTR(error: -EOPNOTSUPP); |
2471 | |
2472 | xrcd = rdma_zalloc_drv_obj(device, ib_xrcd); |
2473 | if (!xrcd) |
2474 | return ERR_PTR(error: -ENOMEM); |
2475 | |
2476 | xrcd->device = device; |
2477 | xrcd->inode = inode; |
2478 | atomic_set(v: &xrcd->usecnt, i: 0); |
2479 | init_rwsem(&xrcd->tgt_qps_rwsem); |
2480 | xa_init(xa: &xrcd->tgt_qps); |
2481 | |
2482 | ret = device->ops.alloc_xrcd(xrcd, udata); |
2483 | if (ret) |
2484 | goto err; |
2485 | return xrcd; |
2486 | err: |
2487 | kfree(objp: xrcd); |
2488 | return ERR_PTR(error: ret); |
2489 | } |
2490 | EXPORT_SYMBOL(ib_alloc_xrcd_user); |
2491 | |
2492 | /** |
2493 | * ib_dealloc_xrcd_user - Deallocates an XRC domain. |
2494 | * @xrcd: The XRC domain to deallocate. |
2495 | * @udata: Valid user data or NULL for kernel object |
2496 | */ |
2497 | int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) |
2498 | { |
2499 | int ret; |
2500 | |
2501 | if (atomic_read(v: &xrcd->usecnt)) |
2502 | return -EBUSY; |
2503 | |
2504 | WARN_ON(!xa_empty(&xrcd->tgt_qps)); |
2505 | ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); |
2506 | if (ret) |
2507 | return ret; |
2508 | kfree(objp: xrcd); |
2509 | return ret; |
2510 | } |
2511 | EXPORT_SYMBOL(ib_dealloc_xrcd_user); |
2512 | |
2513 | /** |
2514 | * ib_create_wq - Creates a WQ associated with the specified protection |
2515 | * domain. |
2516 | * @pd: The protection domain associated with the WQ. |
2517 | * @wq_attr: A list of initial attributes required to create the |
2518 | * WQ. If WQ creation succeeds, then the attributes are updated to |
2519 | * the actual capabilities of the created WQ. |
2520 | * |
2521 | * wq_attr->max_wr and wq_attr->max_sge determine |
2522 | * the requested size of the WQ, and set to the actual values allocated |
2523 | * on return. |
2524 | * If ib_create_wq() succeeds, then max_wr and max_sge will always be |
2525 | * at least as large as the requested values. |
2526 | */ |
2527 | struct ib_wq *ib_create_wq(struct ib_pd *pd, |
2528 | struct ib_wq_init_attr *wq_attr) |
2529 | { |
2530 | struct ib_wq *wq; |
2531 | |
2532 | if (!pd->device->ops.create_wq) |
2533 | return ERR_PTR(error: -EOPNOTSUPP); |
2534 | |
2535 | wq = pd->device->ops.create_wq(pd, wq_attr, NULL); |
2536 | if (!IS_ERR(ptr: wq)) { |
2537 | wq->event_handler = wq_attr->event_handler; |
2538 | wq->wq_context = wq_attr->wq_context; |
2539 | wq->wq_type = wq_attr->wq_type; |
2540 | wq->cq = wq_attr->cq; |
2541 | wq->device = pd->device; |
2542 | wq->pd = pd; |
2543 | wq->uobject = NULL; |
2544 | atomic_inc(v: &pd->usecnt); |
2545 | atomic_inc(v: &wq_attr->cq->usecnt); |
2546 | atomic_set(v: &wq->usecnt, i: 0); |
2547 | } |
2548 | return wq; |
2549 | } |
2550 | EXPORT_SYMBOL(ib_create_wq); |
2551 | |
2552 | /** |
2553 | * ib_destroy_wq_user - Destroys the specified user WQ. |
2554 | * @wq: The WQ to destroy. |
2555 | * @udata: Valid user data |
2556 | */ |
2557 | int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata) |
2558 | { |
2559 | struct ib_cq *cq = wq->cq; |
2560 | struct ib_pd *pd = wq->pd; |
2561 | int ret; |
2562 | |
2563 | if (atomic_read(v: &wq->usecnt)) |
2564 | return -EBUSY; |
2565 | |
2566 | ret = wq->device->ops.destroy_wq(wq, udata); |
2567 | if (ret) |
2568 | return ret; |
2569 | |
2570 | atomic_dec(v: &pd->usecnt); |
2571 | atomic_dec(v: &cq->usecnt); |
2572 | return ret; |
2573 | } |
2574 | EXPORT_SYMBOL(ib_destroy_wq_user); |
2575 | |
2576 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
2577 | struct ib_mr_status *mr_status) |
2578 | { |
2579 | if (!mr->device->ops.check_mr_status) |
2580 | return -EOPNOTSUPP; |
2581 | |
2582 | return mr->device->ops.check_mr_status(mr, check_mask, mr_status); |
2583 | } |
2584 | EXPORT_SYMBOL(ib_check_mr_status); |
2585 | |
2586 | int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, |
2587 | int state) |
2588 | { |
2589 | if (!device->ops.set_vf_link_state) |
2590 | return -EOPNOTSUPP; |
2591 | |
2592 | return device->ops.set_vf_link_state(device, vf, port, state); |
2593 | } |
2594 | EXPORT_SYMBOL(ib_set_vf_link_state); |
2595 | |
2596 | int ib_get_vf_config(struct ib_device *device, int vf, u32 port, |
2597 | struct ifla_vf_info *info) |
2598 | { |
2599 | if (!device->ops.get_vf_config) |
2600 | return -EOPNOTSUPP; |
2601 | |
2602 | return device->ops.get_vf_config(device, vf, port, info); |
2603 | } |
2604 | EXPORT_SYMBOL(ib_get_vf_config); |
2605 | |
2606 | int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, |
2607 | struct ifla_vf_stats *stats) |
2608 | { |
2609 | if (!device->ops.get_vf_stats) |
2610 | return -EOPNOTSUPP; |
2611 | |
2612 | return device->ops.get_vf_stats(device, vf, port, stats); |
2613 | } |
2614 | EXPORT_SYMBOL(ib_get_vf_stats); |
2615 | |
2616 | int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, |
2617 | int type) |
2618 | { |
2619 | if (!device->ops.set_vf_guid) |
2620 | return -EOPNOTSUPP; |
2621 | |
2622 | return device->ops.set_vf_guid(device, vf, port, guid, type); |
2623 | } |
2624 | EXPORT_SYMBOL(ib_set_vf_guid); |
2625 | |
2626 | int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, |
2627 | struct ifla_vf_guid *node_guid, |
2628 | struct ifla_vf_guid *port_guid) |
2629 | { |
2630 | if (!device->ops.get_vf_guid) |
2631 | return -EOPNOTSUPP; |
2632 | |
2633 | return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid); |
2634 | } |
2635 | EXPORT_SYMBOL(ib_get_vf_guid); |
2636 | /** |
2637 | * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection |
2638 | * information) and set an appropriate memory region for registration. |
2639 | * @mr: memory region |
2640 | * @data_sg: dma mapped scatterlist for data |
2641 | * @data_sg_nents: number of entries in data_sg |
2642 | * @data_sg_offset: offset in bytes into data_sg |
2643 | * @meta_sg: dma mapped scatterlist for metadata |
2644 | * @meta_sg_nents: number of entries in meta_sg |
2645 | * @meta_sg_offset: offset in bytes into meta_sg |
2646 | * @page_size: page vector desired page size |
2647 | * |
2648 | * Constraints: |
2649 | * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY. |
2650 | * |
2651 | * Return: 0 on success. |
2652 | * |
2653 | * After this completes successfully, the memory region |
2654 | * is ready for registration. |
2655 | */ |
2656 | int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, |
2657 | int data_sg_nents, unsigned int *data_sg_offset, |
2658 | struct scatterlist *meta_sg, int meta_sg_nents, |
2659 | unsigned int *meta_sg_offset, unsigned int page_size) |
2660 | { |
2661 | if (unlikely(!mr->device->ops.map_mr_sg_pi || |
2662 | WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY))) |
2663 | return -EOPNOTSUPP; |
2664 | |
2665 | mr->page_size = page_size; |
2666 | |
2667 | return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents, |
2668 | data_sg_offset, meta_sg, |
2669 | meta_sg_nents, meta_sg_offset); |
2670 | } |
2671 | EXPORT_SYMBOL(ib_map_mr_sg_pi); |
2672 | |
2673 | /** |
2674 | * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list |
2675 | * and set it the memory region. |
2676 | * @mr: memory region |
2677 | * @sg: dma mapped scatterlist |
2678 | * @sg_nents: number of entries in sg |
2679 | * @sg_offset: offset in bytes into sg |
2680 | * @page_size: page vector desired page size |
2681 | * |
2682 | * Constraints: |
2683 | * |
2684 | * - The first sg element is allowed to have an offset. |
2685 | * - Each sg element must either be aligned to page_size or virtually |
2686 | * contiguous to the previous element. In case an sg element has a |
2687 | * non-contiguous offset, the mapping prefix will not include it. |
2688 | * - The last sg element is allowed to have length less than page_size. |
2689 | * - If sg_nents total byte length exceeds the mr max_num_sge * page_size |
2690 | * then only max_num_sg entries will be mapped. |
2691 | * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these |
2692 | * constraints holds and the page_size argument is ignored. |
2693 | * |
2694 | * Returns the number of sg elements that were mapped to the memory region. |
2695 | * |
2696 | * After this completes successfully, the memory region |
2697 | * is ready for registration. |
2698 | */ |
2699 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
2700 | unsigned int *sg_offset, unsigned int page_size) |
2701 | { |
2702 | if (unlikely(!mr->device->ops.map_mr_sg)) |
2703 | return -EOPNOTSUPP; |
2704 | |
2705 | mr->page_size = page_size; |
2706 | |
2707 | return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); |
2708 | } |
2709 | EXPORT_SYMBOL(ib_map_mr_sg); |
2710 | |
2711 | /** |
2712 | * ib_sg_to_pages() - Convert the largest prefix of a sg list |
2713 | * to a page vector |
2714 | * @mr: memory region |
2715 | * @sgl: dma mapped scatterlist |
2716 | * @sg_nents: number of entries in sg |
2717 | * @sg_offset_p: ==== ======================================================= |
2718 | * IN start offset in bytes into sg |
2719 | * OUT offset in bytes for element n of the sg of the first |
2720 | * byte that has not been processed where n is the return |
2721 | * value of this function. |
2722 | * ==== ======================================================= |
2723 | * @set_page: driver page assignment function pointer |
2724 | * |
2725 | * Core service helper for drivers to convert the largest |
2726 | * prefix of given sg list to a page vector. The sg list |
2727 | * prefix converted is the prefix that meet the requirements |
2728 | * of ib_map_mr_sg. |
2729 | * |
2730 | * Returns the number of sg elements that were assigned to |
2731 | * a page vector. |
2732 | */ |
2733 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
2734 | unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) |
2735 | { |
2736 | struct scatterlist *sg; |
2737 | u64 last_end_dma_addr = 0; |
2738 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
2739 | unsigned int last_page_off = 0; |
2740 | u64 page_mask = ~((u64)mr->page_size - 1); |
2741 | int i, ret; |
2742 | |
2743 | if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) |
2744 | return -EINVAL; |
2745 | |
2746 | mr->iova = sg_dma_address(&sgl[0]) + sg_offset; |
2747 | mr->length = 0; |
2748 | |
2749 | for_each_sg(sgl, sg, sg_nents, i) { |
2750 | u64 dma_addr = sg_dma_address(sg) + sg_offset; |
2751 | u64 prev_addr = dma_addr; |
2752 | unsigned int dma_len = sg_dma_len(sg) - sg_offset; |
2753 | u64 end_dma_addr = dma_addr + dma_len; |
2754 | u64 page_addr = dma_addr & page_mask; |
2755 | |
2756 | /* |
2757 | * For the second and later elements, check whether either the |
2758 | * end of element i-1 or the start of element i is not aligned |
2759 | * on a page boundary. |
2760 | */ |
2761 | if (i && (last_page_off != 0 || page_addr != dma_addr)) { |
2762 | /* Stop mapping if there is a gap. */ |
2763 | if (last_end_dma_addr != dma_addr) |
2764 | break; |
2765 | |
2766 | /* |
2767 | * Coalesce this element with the last. If it is small |
2768 | * enough just update mr->length. Otherwise start |
2769 | * mapping from the next page. |
2770 | */ |
2771 | goto next_page; |
2772 | } |
2773 | |
2774 | do { |
2775 | ret = set_page(mr, page_addr); |
2776 | if (unlikely(ret < 0)) { |
2777 | sg_offset = prev_addr - sg_dma_address(sg); |
2778 | mr->length += prev_addr - dma_addr; |
2779 | if (sg_offset_p) |
2780 | *sg_offset_p = sg_offset; |
2781 | return i || sg_offset ? i : ret; |
2782 | } |
2783 | prev_addr = page_addr; |
2784 | next_page: |
2785 | page_addr += mr->page_size; |
2786 | } while (page_addr < end_dma_addr); |
2787 | |
2788 | mr->length += dma_len; |
2789 | last_end_dma_addr = end_dma_addr; |
2790 | last_page_off = end_dma_addr & ~page_mask; |
2791 | |
2792 | sg_offset = 0; |
2793 | } |
2794 | |
2795 | if (sg_offset_p) |
2796 | *sg_offset_p = 0; |
2797 | return i; |
2798 | } |
2799 | EXPORT_SYMBOL(ib_sg_to_pages); |
2800 | |
2801 | struct ib_drain_cqe { |
2802 | struct ib_cqe cqe; |
2803 | struct completion done; |
2804 | }; |
2805 | |
2806 | static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) |
2807 | { |
2808 | struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, |
2809 | cqe); |
2810 | |
2811 | complete(&cqe->done); |
2812 | } |
2813 | |
2814 | /* |
2815 | * Post a WR and block until its completion is reaped for the SQ. |
2816 | */ |
2817 | static void __ib_drain_sq(struct ib_qp *qp) |
2818 | { |
2819 | struct ib_cq *cq = qp->send_cq; |
2820 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
2821 | struct ib_drain_cqe sdrain; |
2822 | struct ib_rdma_wr swr = { |
2823 | .wr = { |
2824 | .next = NULL, |
2825 | { .wr_cqe = &sdrain.cqe, }, |
2826 | .opcode = IB_WR_RDMA_WRITE, |
2827 | }, |
2828 | }; |
2829 | int ret; |
2830 | |
2831 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
2832 | if (ret) { |
2833 | WARN_ONCE(ret, "failed to drain send queue: %d\n" , ret); |
2834 | return; |
2835 | } |
2836 | |
2837 | sdrain.cqe.done = ib_drain_qp_done; |
2838 | init_completion(x: &sdrain.done); |
2839 | |
2840 | ret = ib_post_send(qp, send_wr: &swr.wr, NULL); |
2841 | if (ret) { |
2842 | WARN_ONCE(ret, "failed to drain send queue: %d\n" , ret); |
2843 | return; |
2844 | } |
2845 | |
2846 | if (cq->poll_ctx == IB_POLL_DIRECT) |
2847 | while (wait_for_completion_timeout(x: &sdrain.done, HZ / 10) <= 0) |
2848 | ib_process_cq_direct(cq, budget: -1); |
2849 | else |
2850 | wait_for_completion(&sdrain.done); |
2851 | } |
2852 | |
2853 | /* |
2854 | * Post a WR and block until its completion is reaped for the RQ. |
2855 | */ |
2856 | static void __ib_drain_rq(struct ib_qp *qp) |
2857 | { |
2858 | struct ib_cq *cq = qp->recv_cq; |
2859 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; |
2860 | struct ib_drain_cqe rdrain; |
2861 | struct ib_recv_wr rwr = {}; |
2862 | int ret; |
2863 | |
2864 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); |
2865 | if (ret) { |
2866 | WARN_ONCE(ret, "failed to drain recv queue: %d\n" , ret); |
2867 | return; |
2868 | } |
2869 | |
2870 | rwr.wr_cqe = &rdrain.cqe; |
2871 | rdrain.cqe.done = ib_drain_qp_done; |
2872 | init_completion(x: &rdrain.done); |
2873 | |
2874 | ret = ib_post_recv(qp, recv_wr: &rwr, NULL); |
2875 | if (ret) { |
2876 | WARN_ONCE(ret, "failed to drain recv queue: %d\n" , ret); |
2877 | return; |
2878 | } |
2879 | |
2880 | if (cq->poll_ctx == IB_POLL_DIRECT) |
2881 | while (wait_for_completion_timeout(x: &rdrain.done, HZ / 10) <= 0) |
2882 | ib_process_cq_direct(cq, budget: -1); |
2883 | else |
2884 | wait_for_completion(&rdrain.done); |
2885 | } |
2886 | |
2887 | /** |
2888 | * ib_drain_sq() - Block until all SQ CQEs have been consumed by the |
2889 | * application. |
2890 | * @qp: queue pair to drain |
2891 | * |
2892 | * If the device has a provider-specific drain function, then |
2893 | * call that. Otherwise call the generic drain function |
2894 | * __ib_drain_sq(). |
2895 | * |
2896 | * The caller must: |
2897 | * |
2898 | * ensure there is room in the CQ and SQ for the drain work request and |
2899 | * completion. |
2900 | * |
2901 | * allocate the CQ using ib_alloc_cq(). |
2902 | * |
2903 | * ensure that there are no other contexts that are posting WRs concurrently. |
2904 | * Otherwise the drain is not guaranteed. |
2905 | */ |
2906 | void ib_drain_sq(struct ib_qp *qp) |
2907 | { |
2908 | if (qp->device->ops.drain_sq) |
2909 | qp->device->ops.drain_sq(qp); |
2910 | else |
2911 | __ib_drain_sq(qp); |
2912 | trace_cq_drain_complete(cq: qp->send_cq); |
2913 | } |
2914 | EXPORT_SYMBOL(ib_drain_sq); |
2915 | |
2916 | /** |
2917 | * ib_drain_rq() - Block until all RQ CQEs have been consumed by the |
2918 | * application. |
2919 | * @qp: queue pair to drain |
2920 | * |
2921 | * If the device has a provider-specific drain function, then |
2922 | * call that. Otherwise call the generic drain function |
2923 | * __ib_drain_rq(). |
2924 | * |
2925 | * The caller must: |
2926 | * |
2927 | * ensure there is room in the CQ and RQ for the drain work request and |
2928 | * completion. |
2929 | * |
2930 | * allocate the CQ using ib_alloc_cq(). |
2931 | * |
2932 | * ensure that there are no other contexts that are posting WRs concurrently. |
2933 | * Otherwise the drain is not guaranteed. |
2934 | */ |
2935 | void ib_drain_rq(struct ib_qp *qp) |
2936 | { |
2937 | if (qp->device->ops.drain_rq) |
2938 | qp->device->ops.drain_rq(qp); |
2939 | else |
2940 | __ib_drain_rq(qp); |
2941 | trace_cq_drain_complete(cq: qp->recv_cq); |
2942 | } |
2943 | EXPORT_SYMBOL(ib_drain_rq); |
2944 | |
2945 | /** |
2946 | * ib_drain_qp() - Block until all CQEs have been consumed by the |
2947 | * application on both the RQ and SQ. |
2948 | * @qp: queue pair to drain |
2949 | * |
2950 | * The caller must: |
2951 | * |
2952 | * ensure there is room in the CQ(s), SQ, and RQ for drain work requests |
2953 | * and completions. |
2954 | * |
2955 | * allocate the CQs using ib_alloc_cq(). |
2956 | * |
2957 | * ensure that there are no other contexts that are posting WRs concurrently. |
2958 | * Otherwise the drain is not guaranteed. |
2959 | */ |
2960 | void ib_drain_qp(struct ib_qp *qp) |
2961 | { |
2962 | ib_drain_sq(qp); |
2963 | if (!qp->srq) |
2964 | ib_drain_rq(qp); |
2965 | } |
2966 | EXPORT_SYMBOL(ib_drain_qp); |
2967 | |
2968 | struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, |
2969 | enum rdma_netdev_t type, const char *name, |
2970 | unsigned char name_assign_type, |
2971 | void (*setup)(struct net_device *)) |
2972 | { |
2973 | struct rdma_netdev_alloc_params params; |
2974 | struct net_device *netdev; |
2975 | int rc; |
2976 | |
2977 | if (!device->ops.rdma_netdev_get_params) |
2978 | return ERR_PTR(error: -EOPNOTSUPP); |
2979 | |
2980 | rc = device->ops.rdma_netdev_get_params(device, port_num, type, |
2981 | ¶ms); |
2982 | if (rc) |
2983 | return ERR_PTR(error: rc); |
2984 | |
2985 | netdev = alloc_netdev_mqs(sizeof_priv: params.sizeof_priv, name, name_assign_type, |
2986 | setup, txqs: params.txqs, rxqs: params.rxqs); |
2987 | if (!netdev) |
2988 | return ERR_PTR(error: -ENOMEM); |
2989 | |
2990 | return netdev; |
2991 | } |
2992 | EXPORT_SYMBOL(rdma_alloc_netdev); |
2993 | |
2994 | int rdma_init_netdev(struct ib_device *device, u32 port_num, |
2995 | enum rdma_netdev_t type, const char *name, |
2996 | unsigned char name_assign_type, |
2997 | void (*setup)(struct net_device *), |
2998 | struct net_device *netdev) |
2999 | { |
3000 | struct rdma_netdev_alloc_params params; |
3001 | int rc; |
3002 | |
3003 | if (!device->ops.rdma_netdev_get_params) |
3004 | return -EOPNOTSUPP; |
3005 | |
3006 | rc = device->ops.rdma_netdev_get_params(device, port_num, type, |
3007 | ¶ms); |
3008 | if (rc) |
3009 | return rc; |
3010 | |
3011 | return params.initialize_rdma_netdev(device, port_num, |
3012 | netdev, params.param); |
3013 | } |
3014 | EXPORT_SYMBOL(rdma_init_netdev); |
3015 | |
3016 | void __rdma_block_iter_start(struct ib_block_iter *biter, |
3017 | struct scatterlist *sglist, unsigned int nents, |
3018 | unsigned long pgsz) |
3019 | { |
3020 | memset(biter, 0, sizeof(struct ib_block_iter)); |
3021 | biter->__sg = sglist; |
3022 | biter->__sg_nents = nents; |
3023 | |
3024 | /* Driver provides best block size to use */ |
3025 | biter->__pg_bit = __fls(word: pgsz); |
3026 | } |
3027 | EXPORT_SYMBOL(__rdma_block_iter_start); |
3028 | |
3029 | bool __rdma_block_iter_next(struct ib_block_iter *biter) |
3030 | { |
3031 | unsigned int block_offset; |
3032 | unsigned int sg_delta; |
3033 | |
3034 | if (!biter->__sg_nents || !biter->__sg) |
3035 | return false; |
3036 | |
3037 | biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; |
3038 | block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); |
3039 | sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; |
3040 | |
3041 | if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { |
3042 | biter->__sg_advance += sg_delta; |
3043 | } else { |
3044 | biter->__sg_advance = 0; |
3045 | biter->__sg = sg_next(biter->__sg); |
3046 | biter->__sg_nents--; |
3047 | } |
3048 | |
3049 | return true; |
3050 | } |
3051 | EXPORT_SYMBOL(__rdma_block_iter_next); |
3052 | |
3053 | /** |
3054 | * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct |
3055 | * for the drivers. |
3056 | * @descs: array of static descriptors |
3057 | * @num_counters: number of elements in array |
3058 | * @lifespan: milliseconds between updates |
3059 | */ |
3060 | struct rdma_hw_stats *rdma_alloc_hw_stats_struct( |
3061 | const struct rdma_stat_desc *descs, int num_counters, |
3062 | unsigned long lifespan) |
3063 | { |
3064 | struct rdma_hw_stats *stats; |
3065 | |
3066 | stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL); |
3067 | if (!stats) |
3068 | return NULL; |
3069 | |
3070 | stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters), |
3071 | size: sizeof(*stats->is_disabled), GFP_KERNEL); |
3072 | if (!stats->is_disabled) |
3073 | goto err; |
3074 | |
3075 | stats->descs = descs; |
3076 | stats->num_counters = num_counters; |
3077 | stats->lifespan = msecs_to_jiffies(m: lifespan); |
3078 | mutex_init(&stats->lock); |
3079 | |
3080 | return stats; |
3081 | |
3082 | err: |
3083 | kfree(objp: stats); |
3084 | return NULL; |
3085 | } |
3086 | EXPORT_SYMBOL(rdma_alloc_hw_stats_struct); |
3087 | |
3088 | /** |
3089 | * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats |
3090 | * @stats: statistics to release |
3091 | */ |
3092 | void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats) |
3093 | { |
3094 | if (!stats) |
3095 | return; |
3096 | |
3097 | kfree(objp: stats->is_disabled); |
3098 | kfree(objp: stats); |
3099 | } |
3100 | EXPORT_SYMBOL(rdma_free_hw_stats_struct); |
3101 | |