1 | /* |
---|---|
2 | * Broadcom NetXtreme-E RoCE driver. |
3 | * |
4 | * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term |
5 | * Broadcom refers to Broadcom Limited and/or its subsidiaries. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions |
15 | * are met: |
16 | * |
17 | * 1. Redistributions of source code must retain the above copyright |
18 | * notice, this list of conditions and the following disclaimer. |
19 | * 2. Redistributions in binary form must reproduce the above copyright |
20 | * notice, this list of conditions and the following disclaimer in |
21 | * the documentation and/or other materials provided with the |
22 | * distribution. |
23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
26 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
27 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS |
28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
32 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
33 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
34 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
35 | * |
36 | * Description: IB Verbs interpreter |
37 | */ |
38 | |
39 | #include <linux/interrupt.h> |
40 | #include <linux/types.h> |
41 | #include <linux/pci.h> |
42 | #include <linux/netdevice.h> |
43 | #include <linux/if_ether.h> |
44 | #include <net/addrconf.h> |
45 | |
46 | #include <rdma/ib_verbs.h> |
47 | #include <rdma/ib_user_verbs.h> |
48 | #include <rdma/ib_umem.h> |
49 | #include <rdma/ib_addr.h> |
50 | #include <rdma/ib_mad.h> |
51 | #include <rdma/ib_cache.h> |
52 | #include <rdma/ib_pma.h> |
53 | #include <rdma/uverbs_ioctl.h> |
54 | #include <linux/hashtable.h> |
55 | |
56 | #include "roce_hsi.h" |
57 | #include "qplib_res.h" |
58 | #include "qplib_sp.h" |
59 | #include "qplib_fp.h" |
60 | #include "qplib_rcfw.h" |
61 | |
62 | #include "bnxt_re.h" |
63 | #include "ib_verbs.h" |
64 | #include "debugfs.h" |
65 | |
66 | #include <rdma/uverbs_types.h> |
67 | #include <rdma/uverbs_std_types.h> |
68 | |
69 | #include <rdma/ib_user_ioctl_cmds.h> |
70 | |
71 | #define UVERBS_MODULE_NAME bnxt_re |
72 | #include <rdma/uverbs_named_ioctl.h> |
73 | |
74 | #include <rdma/bnxt_re-abi.h> |
75 | |
76 | static int __from_ib_access_flags(int iflags) |
77 | { |
78 | int qflags = 0; |
79 | |
80 | if (iflags & IB_ACCESS_LOCAL_WRITE) |
81 | qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; |
82 | if (iflags & IB_ACCESS_REMOTE_READ) |
83 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; |
84 | if (iflags & IB_ACCESS_REMOTE_WRITE) |
85 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; |
86 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) |
87 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; |
88 | if (iflags & IB_ACCESS_MW_BIND) |
89 | qflags |= BNXT_QPLIB_ACCESS_MW_BIND; |
90 | if (iflags & IB_ZERO_BASED) |
91 | qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; |
92 | if (iflags & IB_ACCESS_ON_DEMAND) |
93 | qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; |
94 | return qflags; |
95 | }; |
96 | |
97 | static int __to_ib_access_flags(int qflags) |
98 | { |
99 | int iflags = 0; |
100 | |
101 | if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) |
102 | iflags |= IB_ACCESS_LOCAL_WRITE; |
103 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) |
104 | iflags |= IB_ACCESS_REMOTE_WRITE; |
105 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) |
106 | iflags |= IB_ACCESS_REMOTE_READ; |
107 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) |
108 | iflags |= IB_ACCESS_REMOTE_ATOMIC; |
109 | if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) |
110 | iflags |= IB_ACCESS_MW_BIND; |
111 | if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) |
112 | iflags |= IB_ZERO_BASED; |
113 | if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) |
114 | iflags |= IB_ACCESS_ON_DEMAND; |
115 | return iflags; |
116 | } |
117 | |
118 | static u8 __qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx *cctx, int iflags) |
119 | { |
120 | u8 qflags = 0; |
121 | |
122 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx)) |
123 | /* For Wh+ */ |
124 | return (u8)__from_ib_access_flags(iflags); |
125 | |
126 | /* For P5, P7 and later chips */ |
127 | if (iflags & IB_ACCESS_LOCAL_WRITE) |
128 | qflags |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE; |
129 | if (iflags & IB_ACCESS_REMOTE_WRITE) |
130 | qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; |
131 | if (iflags & IB_ACCESS_REMOTE_READ) |
132 | qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; |
133 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) |
134 | qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC; |
135 | |
136 | return qflags; |
137 | } |
138 | |
139 | static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags) |
140 | { |
141 | int iflags = 0; |
142 | |
143 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx)) |
144 | /* For Wh+ */ |
145 | return __to_ib_access_flags(qflags); |
146 | |
147 | /* For P5, P7 and later chips */ |
148 | if (qflags & CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE) |
149 | iflags |= IB_ACCESS_LOCAL_WRITE; |
150 | if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE) |
151 | iflags |= IB_ACCESS_REMOTE_WRITE; |
152 | if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_READ) |
153 | iflags |= IB_ACCESS_REMOTE_READ; |
154 | if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC) |
155 | iflags |= IB_ACCESS_REMOTE_ATOMIC; |
156 | |
157 | return iflags; |
158 | } |
159 | |
160 | static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev, |
161 | struct bnxt_qplib_mrw *qplib_mr) |
162 | { |
163 | if (_is_relaxed_ordering_supported(dev_cap_ext_flags2: rdev->dev_attr->dev_cap_flags2) && |
164 | pcie_relaxed_ordering_enabled(dev: rdev->en_dev->pdev)) |
165 | qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO; |
166 | } |
167 | |
168 | static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, |
169 | struct bnxt_qplib_sge *sg_list, int num) |
170 | { |
171 | int i, total = 0; |
172 | |
173 | for (i = 0; i < num; i++) { |
174 | sg_list[i].addr = ib_sg_list[i].addr; |
175 | sg_list[i].lkey = ib_sg_list[i].lkey; |
176 | sg_list[i].size = ib_sg_list[i].length; |
177 | total += sg_list[i].size; |
178 | } |
179 | return total; |
180 | } |
181 | |
182 | /* Device */ |
183 | int bnxt_re_query_device(struct ib_device *ibdev, |
184 | struct ib_device_attr *ib_attr, |
185 | struct ib_udata *udata) |
186 | { |
187 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
188 | struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; |
189 | |
190 | memset(ib_attr, 0, sizeof(*ib_attr)); |
191 | memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, |
192 | min(sizeof(dev_attr->fw_ver), |
193 | sizeof(ib_attr->fw_ver))); |
194 | addrconf_addr_eui48(eui: (u8 *)&ib_attr->sys_image_guid, |
195 | addr: rdev->netdev->dev_addr); |
196 | ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; |
197 | ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED; |
198 | |
199 | ib_attr->vendor_id = rdev->en_dev->pdev->vendor; |
200 | ib_attr->vendor_part_id = rdev->en_dev->pdev->device; |
201 | ib_attr->hw_ver = rdev->en_dev->pdev->revision; |
202 | ib_attr->max_qp = dev_attr->max_qp; |
203 | ib_attr->max_qp_wr = dev_attr->max_qp_wqes; |
204 | ib_attr->device_cap_flags = |
205 | IB_DEVICE_CURR_QP_STATE_MOD |
206 | | IB_DEVICE_RC_RNR_NAK_GEN |
207 | | IB_DEVICE_SHUTDOWN_PORT |
208 | | IB_DEVICE_SYS_IMAGE_GUID |
209 | | IB_DEVICE_RESIZE_MAX_WR |
210 | | IB_DEVICE_PORT_ACTIVE_EVENT |
211 | | IB_DEVICE_N_NOTIFY_CQ |
212 | | IB_DEVICE_MEM_WINDOW |
213 | | IB_DEVICE_MEM_WINDOW_TYPE_2B |
214 | | IB_DEVICE_MEM_MGT_EXTENSIONS; |
215 | ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; |
216 | ib_attr->max_send_sge = dev_attr->max_qp_sges; |
217 | ib_attr->max_recv_sge = dev_attr->max_qp_sges; |
218 | ib_attr->max_sge_rd = dev_attr->max_qp_sges; |
219 | ib_attr->max_cq = dev_attr->max_cq; |
220 | ib_attr->max_cqe = dev_attr->max_cq_wqes; |
221 | ib_attr->max_mr = dev_attr->max_mr; |
222 | ib_attr->max_pd = dev_attr->max_pd; |
223 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; |
224 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; |
225 | ib_attr->atomic_cap = IB_ATOMIC_NONE; |
226 | ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; |
227 | if (dev_attr->is_atomic) { |
228 | ib_attr->atomic_cap = IB_ATOMIC_GLOB; |
229 | ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB; |
230 | } |
231 | |
232 | ib_attr->max_ee_rd_atom = 0; |
233 | ib_attr->max_res_rd_atom = 0; |
234 | ib_attr->max_ee_init_rd_atom = 0; |
235 | ib_attr->max_ee = 0; |
236 | ib_attr->max_rdd = 0; |
237 | ib_attr->max_mw = dev_attr->max_mw; |
238 | ib_attr->max_raw_ipv6_qp = 0; |
239 | ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp; |
240 | ib_attr->max_mcast_grp = 0; |
241 | ib_attr->max_mcast_qp_attach = 0; |
242 | ib_attr->max_total_mcast_qp_attach = 0; |
243 | ib_attr->max_ah = dev_attr->max_ah; |
244 | |
245 | ib_attr->max_srq = dev_attr->max_srq; |
246 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; |
247 | ib_attr->max_srq_sge = dev_attr->max_srq_sges; |
248 | |
249 | ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; |
250 | |
251 | ib_attr->max_pkeys = 1; |
252 | ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; |
253 | return 0; |
254 | } |
255 | |
256 | int bnxt_re_modify_device(struct ib_device *ibdev, |
257 | int device_modify_mask, |
258 | struct ib_device_modify *device_modify) |
259 | { |
260 | ibdev_dbg(ibdev, "Modify device with mask 0x%x", device_modify_mask); |
261 | |
262 | if (device_modify_mask & ~IB_DEVICE_MODIFY_NODE_DESC) |
263 | return -EOPNOTSUPP; |
264 | |
265 | if (!(device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)) |
266 | return 0; |
267 | |
268 | memcpy(ibdev->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX); |
269 | return 0; |
270 | } |
271 | |
272 | /* Port */ |
273 | int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, |
274 | struct ib_port_attr *port_attr) |
275 | { |
276 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
277 | struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; |
278 | int rc; |
279 | |
280 | memset(port_attr, 0, sizeof(*port_attr)); |
281 | |
282 | if (netif_running(dev: rdev->netdev) && netif_carrier_ok(dev: rdev->netdev)) { |
283 | port_attr->state = IB_PORT_ACTIVE; |
284 | port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
285 | } else { |
286 | port_attr->state = IB_PORT_DOWN; |
287 | port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
288 | } |
289 | port_attr->max_mtu = IB_MTU_4096; |
290 | port_attr->active_mtu = iboe_get_mtu(mtu: rdev->netdev->mtu); |
291 | port_attr->gid_tbl_len = dev_attr->max_sgid; |
292 | port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | |
293 | IB_PORT_DEVICE_MGMT_SUP | |
294 | IB_PORT_VENDOR_CLASS_SUP; |
295 | port_attr->ip_gids = true; |
296 | |
297 | port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW; |
298 | port_attr->bad_pkey_cntr = 0; |
299 | port_attr->qkey_viol_cntr = 0; |
300 | port_attr->pkey_tbl_len = dev_attr->max_pkey; |
301 | port_attr->lid = 0; |
302 | port_attr->sm_lid = 0; |
303 | port_attr->lmc = 0; |
304 | port_attr->max_vl_num = 4; |
305 | port_attr->sm_sl = 0; |
306 | port_attr->subnet_timeout = 0; |
307 | port_attr->init_type_reply = 0; |
308 | rc = ib_get_eth_speed(dev: &rdev->ibdev, port_num, speed: &port_attr->active_speed, |
309 | width: &port_attr->active_width); |
310 | |
311 | return rc; |
312 | } |
313 | |
314 | int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, |
315 | struct ib_port_immutable *immutable) |
316 | { |
317 | struct ib_port_attr port_attr; |
318 | |
319 | if (bnxt_re_query_port(ibdev, port_num, port_attr: &port_attr)) |
320 | return -EINVAL; |
321 | |
322 | immutable->pkey_tbl_len = port_attr.pkey_tbl_len; |
323 | immutable->gid_tbl_len = port_attr.gid_tbl_len; |
324 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; |
325 | immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; |
326 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
327 | return 0; |
328 | } |
329 | |
330 | void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) |
331 | { |
332 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
333 | |
334 | snprintf(buf: str, IB_FW_VERSION_NAME_MAX, fmt: "%d.%d.%d.%d", |
335 | rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1], |
336 | rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]); |
337 | } |
338 | |
339 | int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, |
340 | u16 index, u16 *pkey) |
341 | { |
342 | if (index > 0) |
343 | return -EINVAL; |
344 | |
345 | *pkey = IB_DEFAULT_PKEY_FULL; |
346 | |
347 | return 0; |
348 | } |
349 | |
350 | int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, |
351 | int index, union ib_gid *gid) |
352 | { |
353 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
354 | int rc; |
355 | |
356 | /* Ignore port_num */ |
357 | memset(gid, 0, sizeof(*gid)); |
358 | rc = bnxt_qplib_get_sgid(res: &rdev->qplib_res, |
359 | sgid_tbl: &rdev->qplib_res.sgid_tbl, index, |
360 | gid: (struct bnxt_qplib_gid *)gid); |
361 | return rc; |
362 | } |
363 | |
364 | int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) |
365 | { |
366 | int rc = 0; |
367 | struct bnxt_re_gid_ctx *ctx, **ctx_tbl; |
368 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
369 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
370 | struct bnxt_qplib_gid *gid_to_del; |
371 | u16 vlan_id = 0xFFFF; |
372 | |
373 | /* Delete the entry from the hardware */ |
374 | ctx = *context; |
375 | if (!ctx) |
376 | return -EINVAL; |
377 | |
378 | if (sgid_tbl && sgid_tbl->active) { |
379 | if (ctx->idx >= sgid_tbl->max) |
380 | return -EINVAL; |
381 | gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; |
382 | vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; |
383 | /* DEL_GID is called in WQ context(netdevice_event_work_handler) |
384 | * or via the ib_unregister_device path. In the former case QP1 |
385 | * may not be destroyed yet, in which case just return as FW |
386 | * needs that entry to be present and will fail it's deletion. |
387 | * We could get invoked again after QP1 is destroyed OR get an |
388 | * ADD_GID call with a different GID value for the same index |
389 | * where we issue MODIFY_GID cmd to update the GID entry -- TBD |
390 | */ |
391 | if (ctx->idx == 0 && |
392 | rdma_link_local_addr(addr: (struct in6_addr *)gid_to_del) && |
393 | ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { |
394 | ibdev_dbg(&rdev->ibdev, |
395 | "Trying to delete GID0 while QP1 is alive\n"); |
396 | return -EFAULT; |
397 | } |
398 | ctx->refcnt--; |
399 | if (!ctx->refcnt) { |
400 | rc = bnxt_qplib_del_sgid(sgid_tbl, gid: gid_to_del, |
401 | vlan_id, update: true); |
402 | if (rc) { |
403 | ibdev_err(ibdev: &rdev->ibdev, |
404 | format: "Failed to remove GID: %#x", rc); |
405 | } else { |
406 | ctx_tbl = sgid_tbl->ctx; |
407 | ctx_tbl[ctx->idx] = NULL; |
408 | kfree(objp: ctx); |
409 | } |
410 | } |
411 | } else { |
412 | return -EINVAL; |
413 | } |
414 | return rc; |
415 | } |
416 | |
417 | int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) |
418 | { |
419 | int rc; |
420 | u32 tbl_idx = 0; |
421 | u16 vlan_id = 0xFFFF; |
422 | struct bnxt_re_gid_ctx *ctx, **ctx_tbl; |
423 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
424 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
425 | |
426 | rc = rdma_read_gid_l2_fields(attr, vlan_id: &vlan_id, NULL); |
427 | if (rc) |
428 | return rc; |
429 | |
430 | rc = bnxt_qplib_add_sgid(sgid_tbl, gid: (struct bnxt_qplib_gid *)&attr->gid, |
431 | mac: rdev->qplib_res.netdev->dev_addr, |
432 | vlan_id, update: true, index: &tbl_idx); |
433 | if (rc == -EALREADY) { |
434 | ctx_tbl = sgid_tbl->ctx; |
435 | ctx_tbl[tbl_idx]->refcnt++; |
436 | *context = ctx_tbl[tbl_idx]; |
437 | return 0; |
438 | } |
439 | |
440 | if (rc < 0) { |
441 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to add GID: %#x", rc); |
442 | return rc; |
443 | } |
444 | |
445 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
446 | if (!ctx) |
447 | return -ENOMEM; |
448 | ctx_tbl = sgid_tbl->ctx; |
449 | ctx->idx = tbl_idx; |
450 | ctx->refcnt = 1; |
451 | ctx_tbl[tbl_idx] = ctx; |
452 | *context = ctx; |
453 | |
454 | return rc; |
455 | } |
456 | |
457 | enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, |
458 | u32 port_num) |
459 | { |
460 | return IB_LINK_LAYER_ETHERNET; |
461 | } |
462 | |
463 | #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) |
464 | |
465 | static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) |
466 | { |
467 | struct bnxt_re_fence_data *fence = &pd->fence; |
468 | struct ib_mr *ib_mr = &fence->mr->ib_mr; |
469 | struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; |
470 | struct bnxt_re_dev *rdev = pd->rdev; |
471 | |
472 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) |
473 | return; |
474 | |
475 | memset(wqe, 0, sizeof(*wqe)); |
476 | wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; |
477 | wqe->wr_id = BNXT_QPLIB_FENCE_WRID; |
478 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
479 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
480 | wqe->bind.zero_based = false; |
481 | wqe->bind.parent_l_key = ib_mr->lkey; |
482 | wqe->bind.va = (u64)(unsigned long)fence->va; |
483 | wqe->bind.length = fence->size; |
484 | wqe->bind.access_cntl = __from_ib_access_flags(iflags: IB_ACCESS_REMOTE_READ); |
485 | wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; |
486 | |
487 | /* Save the initial rkey in fence structure for now; |
488 | * wqe->bind.r_key will be set at (re)bind time. |
489 | */ |
490 | fence->bind_rkey = ib_inc_rkey(rkey: fence->mw->rkey); |
491 | } |
492 | |
493 | static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) |
494 | { |
495 | struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, |
496 | qplib_qp); |
497 | struct ib_pd *ib_pd = qp->ib_qp.pd; |
498 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
499 | struct bnxt_re_fence_data *fence = &pd->fence; |
500 | struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; |
501 | struct bnxt_qplib_swqe wqe; |
502 | int rc; |
503 | |
504 | memcpy(&wqe, fence_wqe, sizeof(wqe)); |
505 | wqe.bind.r_key = fence->bind_rkey; |
506 | fence->bind_rkey = ib_inc_rkey(rkey: fence->bind_rkey); |
507 | |
508 | ibdev_dbg(&qp->rdev->ibdev, |
509 | "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", |
510 | wqe.bind.r_key, qp->qplib_qp.id, pd); |
511 | rc = bnxt_qplib_post_send(qp: &qp->qplib_qp, wqe: &wqe); |
512 | if (rc) { |
513 | ibdev_err(ibdev: &qp->rdev->ibdev, format: "Failed to bind fence-WQE\n"); |
514 | return rc; |
515 | } |
516 | bnxt_qplib_post_send_db(qp: &qp->qplib_qp); |
517 | |
518 | return rc; |
519 | } |
520 | |
521 | static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) |
522 | { |
523 | struct bnxt_re_fence_data *fence = &pd->fence; |
524 | struct bnxt_re_dev *rdev = pd->rdev; |
525 | struct device *dev = &rdev->en_dev->pdev->dev; |
526 | struct bnxt_re_mr *mr = fence->mr; |
527 | |
528 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) |
529 | return; |
530 | |
531 | if (fence->mw) { |
532 | bnxt_re_dealloc_mw(mw: fence->mw); |
533 | fence->mw = NULL; |
534 | } |
535 | if (mr) { |
536 | if (mr->ib_mr.rkey) |
537 | bnxt_qplib_dereg_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr, |
538 | block: true); |
539 | if (mr->ib_mr.lkey) |
540 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
541 | kfree(objp: mr); |
542 | fence->mr = NULL; |
543 | } |
544 | if (fence->dma_addr) { |
545 | dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, |
546 | DMA_BIDIRECTIONAL); |
547 | fence->dma_addr = 0; |
548 | } |
549 | } |
550 | |
551 | static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) |
552 | { |
553 | int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; |
554 | struct bnxt_re_fence_data *fence = &pd->fence; |
555 | struct bnxt_re_dev *rdev = pd->rdev; |
556 | struct device *dev = &rdev->en_dev->pdev->dev; |
557 | struct bnxt_re_mr *mr = NULL; |
558 | dma_addr_t dma_addr = 0; |
559 | struct ib_mw *mw; |
560 | int rc; |
561 | |
562 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) |
563 | return 0; |
564 | |
565 | dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, |
566 | DMA_BIDIRECTIONAL); |
567 | rc = dma_mapping_error(dev, dma_addr); |
568 | if (rc) { |
569 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to dma-map fence-MR-mem\n"); |
570 | rc = -EIO; |
571 | fence->dma_addr = 0; |
572 | goto fail; |
573 | } |
574 | fence->dma_addr = dma_addr; |
575 | |
576 | /* Allocate a MR */ |
577 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
578 | if (!mr) { |
579 | rc = -ENOMEM; |
580 | goto fail; |
581 | } |
582 | fence->mr = mr; |
583 | mr->rdev = rdev; |
584 | mr->qplib_mr.pd = &pd->qplib_pd; |
585 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; |
586 | mr->qplib_mr.access_flags = __from_ib_access_flags(iflags: mr_access_flags); |
587 | if (!_is_alloc_mr_unified(dev_cap_flags: rdev->dev_attr->dev_cap_flags)) { |
588 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
589 | if (rc) { |
590 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to alloc fence-HW-MR\n"); |
591 | goto fail; |
592 | } |
593 | |
594 | /* Register MR */ |
595 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
596 | } else { |
597 | mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR; |
598 | } |
599 | mr->qplib_mr.va = (u64)(unsigned long)fence->va; |
600 | mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; |
601 | rc = bnxt_qplib_reg_mr(res: &rdev->qplib_res, mr: &mr->qplib_mr, NULL, |
602 | BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE); |
603 | if (rc) { |
604 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to register fence-MR\n"); |
605 | goto fail; |
606 | } |
607 | mr->ib_mr.rkey = mr->qplib_mr.rkey; |
608 | |
609 | /* Create a fence MW only for kernel consumers */ |
610 | mw = bnxt_re_alloc_mw(ib_pd: &pd->ib_pd, type: IB_MW_TYPE_1, NULL); |
611 | if (IS_ERR(ptr: mw)) { |
612 | ibdev_err(ibdev: &rdev->ibdev, |
613 | format: "Failed to create fence-MW for PD: %p\n", pd); |
614 | rc = PTR_ERR(ptr: mw); |
615 | goto fail; |
616 | } |
617 | fence->mw = mw; |
618 | |
619 | bnxt_re_create_fence_wqe(pd); |
620 | return 0; |
621 | |
622 | fail: |
623 | bnxt_re_destroy_fence_mr(pd); |
624 | return rc; |
625 | } |
626 | |
627 | static struct bnxt_re_user_mmap_entry* |
628 | bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, |
629 | enum bnxt_re_mmap_flag mmap_flag, u64 *offset) |
630 | { |
631 | struct bnxt_re_user_mmap_entry *entry; |
632 | int ret; |
633 | |
634 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
635 | if (!entry) |
636 | return NULL; |
637 | |
638 | entry->mem_offset = mem_offset; |
639 | entry->mmap_flag = mmap_flag; |
640 | entry->uctx = uctx; |
641 | |
642 | switch (mmap_flag) { |
643 | case BNXT_RE_MMAP_SH_PAGE: |
644 | ret = rdma_user_mmap_entry_insert_exact(ucontext: &uctx->ib_uctx, |
645 | entry: &entry->rdma_entry, PAGE_SIZE, pgoff: 0); |
646 | break; |
647 | case BNXT_RE_MMAP_UC_DB: |
648 | case BNXT_RE_MMAP_WC_DB: |
649 | case BNXT_RE_MMAP_DBR_BAR: |
650 | case BNXT_RE_MMAP_DBR_PAGE: |
651 | case BNXT_RE_MMAP_TOGGLE_PAGE: |
652 | ret = rdma_user_mmap_entry_insert(ucontext: &uctx->ib_uctx, |
653 | entry: &entry->rdma_entry, PAGE_SIZE); |
654 | break; |
655 | default: |
656 | ret = -EINVAL; |
657 | break; |
658 | } |
659 | |
660 | if (ret) { |
661 | kfree(objp: entry); |
662 | return NULL; |
663 | } |
664 | if (offset) |
665 | *offset = rdma_user_mmap_get_offset(entry: &entry->rdma_entry); |
666 | |
667 | return entry; |
668 | } |
669 | |
670 | /* Protection Domains */ |
671 | int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) |
672 | { |
673 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
674 | struct bnxt_re_dev *rdev = pd->rdev; |
675 | |
676 | if (udata) { |
677 | rdma_user_mmap_entry_remove(entry: pd->pd_db_mmap); |
678 | pd->pd_db_mmap = NULL; |
679 | } |
680 | |
681 | bnxt_re_destroy_fence_mr(pd); |
682 | |
683 | if (pd->qplib_pd.id) { |
684 | if (!bnxt_qplib_dealloc_pd(res: &rdev->qplib_res, |
685 | pd_tbl: &rdev->qplib_res.pd_tbl, |
686 | pd: &pd->qplib_pd)) |
687 | atomic_dec(v: &rdev->stats.res.pd_count); |
688 | } |
689 | return 0; |
690 | } |
691 | |
692 | int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
693 | { |
694 | struct ib_device *ibdev = ibpd->device; |
695 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
696 | struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( |
697 | udata, struct bnxt_re_ucontext, ib_uctx); |
698 | struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); |
699 | struct bnxt_re_user_mmap_entry *entry = NULL; |
700 | u32 active_pds; |
701 | int rc = 0; |
702 | |
703 | pd->rdev = rdev; |
704 | if (bnxt_qplib_alloc_pd(res: &rdev->qplib_res, pd: &pd->qplib_pd)) { |
705 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to allocate HW PD"); |
706 | rc = -ENOMEM; |
707 | goto fail; |
708 | } |
709 | |
710 | if (udata) { |
711 | struct bnxt_re_pd_resp resp = {}; |
712 | |
713 | if (!ucntx->dpi.dbr) { |
714 | /* Allocate DPI in alloc_pd to avoid failing of |
715 | * ibv_devinfo and family of application when DPIs |
716 | * are depleted. |
717 | */ |
718 | if (bnxt_qplib_alloc_dpi(res: &rdev->qplib_res, |
719 | dpi: &ucntx->dpi, app: ucntx, type: BNXT_QPLIB_DPI_TYPE_UC)) { |
720 | rc = -ENOMEM; |
721 | goto dbfail; |
722 | } |
723 | } |
724 | |
725 | resp.pdid = pd->qplib_pd.id; |
726 | /* Still allow mapping this DBR to the new user PD. */ |
727 | resp.dpi = ucntx->dpi.dpi; |
728 | |
729 | entry = bnxt_re_mmap_entry_insert(uctx: ucntx, mem_offset: (u64)ucntx->dpi.umdbr, |
730 | mmap_flag: BNXT_RE_MMAP_UC_DB, offset: &resp.dbr); |
731 | |
732 | if (!entry) { |
733 | rc = -ENOMEM; |
734 | goto dbfail; |
735 | } |
736 | |
737 | pd->pd_db_mmap = &entry->rdma_entry; |
738 | |
739 | rc = ib_copy_to_udata(udata, src: &resp, min(sizeof(resp), udata->outlen)); |
740 | if (rc) { |
741 | rdma_user_mmap_entry_remove(entry: pd->pd_db_mmap); |
742 | rc = -EFAULT; |
743 | goto dbfail; |
744 | } |
745 | } |
746 | |
747 | if (!udata) |
748 | if (bnxt_re_create_fence_mr(pd)) |
749 | ibdev_warn(ibdev: &rdev->ibdev, |
750 | format: "Failed to create Fence-MR\n"); |
751 | active_pds = atomic_inc_return(v: &rdev->stats.res.pd_count); |
752 | if (active_pds > rdev->stats.res.pd_watermark) |
753 | rdev->stats.res.pd_watermark = active_pds; |
754 | |
755 | return 0; |
756 | dbfail: |
757 | bnxt_qplib_dealloc_pd(res: &rdev->qplib_res, pd_tbl: &rdev->qplib_res.pd_tbl, |
758 | pd: &pd->qplib_pd); |
759 | fail: |
760 | return rc; |
761 | } |
762 | |
763 | /* Address Handles */ |
764 | int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) |
765 | { |
766 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
767 | struct bnxt_re_dev *rdev = ah->rdev; |
768 | bool block = true; |
769 | int rc; |
770 | |
771 | block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); |
772 | rc = bnxt_qplib_destroy_ah(res: &rdev->qplib_res, ah: &ah->qplib_ah, block); |
773 | if (BNXT_RE_CHECK_RC(rc)) { |
774 | if (rc == -ETIMEDOUT) |
775 | rc = 0; |
776 | else |
777 | goto fail; |
778 | } |
779 | atomic_dec(v: &rdev->stats.res.ah_count); |
780 | fail: |
781 | return rc; |
782 | } |
783 | |
784 | static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) |
785 | { |
786 | u8 nw_type; |
787 | |
788 | switch (ntype) { |
789 | case RDMA_NETWORK_IPV4: |
790 | nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; |
791 | break; |
792 | case RDMA_NETWORK_IPV6: |
793 | nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; |
794 | break; |
795 | default: |
796 | nw_type = CMDQ_CREATE_AH_TYPE_V1; |
797 | break; |
798 | } |
799 | return nw_type; |
800 | } |
801 | |
802 | int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, |
803 | struct ib_udata *udata) |
804 | { |
805 | struct ib_pd *ib_pd = ib_ah->pd; |
806 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
807 | struct rdma_ah_attr *ah_attr = init_attr->ah_attr; |
808 | const struct ib_global_route *grh = rdma_ah_read_grh(attr: ah_attr); |
809 | struct bnxt_re_dev *rdev = pd->rdev; |
810 | const struct ib_gid_attr *sgid_attr; |
811 | struct bnxt_re_gid_ctx *ctx; |
812 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
813 | u32 active_ahs; |
814 | u8 nw_type; |
815 | int rc; |
816 | |
817 | if (!(rdma_ah_get_ah_flags(attr: ah_attr) & IB_AH_GRH)) { |
818 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to alloc AH: GRH not set"); |
819 | return -EINVAL; |
820 | } |
821 | |
822 | ah->rdev = rdev; |
823 | ah->qplib_ah.pd = &pd->qplib_pd; |
824 | |
825 | /* Supply the configuration for the HW */ |
826 | memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, |
827 | sizeof(union ib_gid)); |
828 | sgid_attr = grh->sgid_attr; |
829 | /* Get the HW context of the GID. The reference |
830 | * of GID table entry is already taken by the caller. |
831 | */ |
832 | ctx = rdma_read_gid_hw_context(attr: sgid_attr); |
833 | ah->qplib_ah.sgid_index = ctx->idx; |
834 | ah->qplib_ah.host_sgid_index = grh->sgid_index; |
835 | ah->qplib_ah.traffic_class = grh->traffic_class; |
836 | ah->qplib_ah.flow_label = grh->flow_label; |
837 | ah->qplib_ah.hop_limit = grh->hop_limit; |
838 | ah->qplib_ah.sl = rdma_ah_get_sl(attr: ah_attr); |
839 | |
840 | /* Get network header type for this GID */ |
841 | nw_type = rdma_gid_attr_network_type(attr: sgid_attr); |
842 | ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(ntype: nw_type); |
843 | |
844 | memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN); |
845 | rc = bnxt_qplib_create_ah(res: &rdev->qplib_res, ah: &ah->qplib_ah, |
846 | block: !(init_attr->flags & |
847 | RDMA_CREATE_AH_SLEEPABLE)); |
848 | if (rc) { |
849 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to allocate HW AH"); |
850 | return rc; |
851 | } |
852 | |
853 | /* Write AVID to shared page. */ |
854 | if (udata) { |
855 | struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( |
856 | udata, struct bnxt_re_ucontext, ib_uctx); |
857 | unsigned long flag; |
858 | u32 *wrptr; |
859 | |
860 | spin_lock_irqsave(&uctx->sh_lock, flag); |
861 | wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); |
862 | *wrptr = ah->qplib_ah.id; |
863 | wmb(); /* make sure cache is updated. */ |
864 | spin_unlock_irqrestore(lock: &uctx->sh_lock, flags: flag); |
865 | } |
866 | active_ahs = atomic_inc_return(v: &rdev->stats.res.ah_count); |
867 | if (active_ahs > rdev->stats.res.ah_watermark) |
868 | rdev->stats.res.ah_watermark = active_ahs; |
869 | |
870 | return 0; |
871 | } |
872 | |
873 | int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) |
874 | { |
875 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
876 | |
877 | ah_attr->type = ib_ah->type; |
878 | rdma_ah_set_sl(attr: ah_attr, sl: ah->qplib_ah.sl); |
879 | memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN); |
880 | rdma_ah_set_grh(attr: ah_attr, NULL, flow_label: 0, |
881 | sgid_index: ah->qplib_ah.host_sgid_index, |
882 | hop_limit: 0, traffic_class: ah->qplib_ah.traffic_class); |
883 | rdma_ah_set_dgid_raw(attr: ah_attr, dgid: ah->qplib_ah.dgid.data); |
884 | rdma_ah_set_port_num(attr: ah_attr, port_num: 1); |
885 | rdma_ah_set_static_rate(attr: ah_attr, static_rate: 0); |
886 | return 0; |
887 | } |
888 | |
889 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) |
890 | __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) |
891 | { |
892 | unsigned long flags; |
893 | |
894 | spin_lock_irqsave(&qp->scq->cq_lock, flags); |
895 | if (qp->rcq != qp->scq) |
896 | spin_lock(lock: &qp->rcq->cq_lock); |
897 | else |
898 | __acquire(&qp->rcq->cq_lock); |
899 | |
900 | return flags; |
901 | } |
902 | |
903 | void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, |
904 | unsigned long flags) |
905 | __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) |
906 | { |
907 | if (qp->rcq != qp->scq) |
908 | spin_unlock(lock: &qp->rcq->cq_lock); |
909 | else |
910 | __release(&qp->rcq->cq_lock); |
911 | spin_unlock_irqrestore(lock: &qp->scq->cq_lock, flags); |
912 | } |
913 | |
914 | static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) |
915 | { |
916 | struct bnxt_re_qp *gsi_sqp; |
917 | struct bnxt_re_ah *gsi_sah; |
918 | struct bnxt_re_dev *rdev; |
919 | int rc; |
920 | |
921 | rdev = qp->rdev; |
922 | gsi_sqp = rdev->gsi_ctx.gsi_sqp; |
923 | gsi_sah = rdev->gsi_ctx.gsi_sah; |
924 | |
925 | ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); |
926 | bnxt_qplib_destroy_ah(res: &rdev->qplib_res, |
927 | ah: &gsi_sah->qplib_ah, |
928 | block: true); |
929 | atomic_dec(v: &rdev->stats.res.ah_count); |
930 | bnxt_qplib_clean_qp(qp: &qp->qplib_qp); |
931 | |
932 | ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); |
933 | rc = bnxt_qplib_destroy_qp(res: &rdev->qplib_res, qp: &gsi_sqp->qplib_qp); |
934 | if (rc) { |
935 | ibdev_err(ibdev: &rdev->ibdev, format: "Destroy Shadow QP failed"); |
936 | goto fail; |
937 | } |
938 | bnxt_qplib_free_qp_res(res: &rdev->qplib_res, qp: &gsi_sqp->qplib_qp); |
939 | |
940 | /* remove from active qp list */ |
941 | mutex_lock(&rdev->qp_lock); |
942 | list_del(entry: &gsi_sqp->list); |
943 | mutex_unlock(lock: &rdev->qp_lock); |
944 | atomic_dec(v: &rdev->stats.res.qp_count); |
945 | |
946 | kfree(objp: rdev->gsi_ctx.sqp_tbl); |
947 | kfree(objp: gsi_sah); |
948 | kfree(objp: gsi_sqp); |
949 | rdev->gsi_ctx.gsi_sqp = NULL; |
950 | rdev->gsi_ctx.gsi_sah = NULL; |
951 | rdev->gsi_ctx.sqp_tbl = NULL; |
952 | |
953 | return 0; |
954 | fail: |
955 | return rc; |
956 | } |
957 | |
958 | /* Queue Pairs */ |
959 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
960 | { |
961 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
962 | struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; |
963 | struct bnxt_re_dev *rdev = qp->rdev; |
964 | struct bnxt_qplib_nq *scq_nq = NULL; |
965 | struct bnxt_qplib_nq *rcq_nq = NULL; |
966 | unsigned int flags; |
967 | int rc; |
968 | |
969 | bnxt_re_debug_rem_qpinfo(rdev, qp); |
970 | |
971 | bnxt_qplib_flush_cqn_wq(qp: &qp->qplib_qp); |
972 | |
973 | rc = bnxt_qplib_destroy_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
974 | if (rc) |
975 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to destroy HW QP"); |
976 | |
977 | if (rdma_is_kernel_res(res: &qp->ib_qp.res)) { |
978 | flags = bnxt_re_lock_cqs(qp); |
979 | bnxt_qplib_clean_qp(qp: &qp->qplib_qp); |
980 | bnxt_re_unlock_cqs(qp, flags); |
981 | } |
982 | |
983 | bnxt_qplib_free_qp_res(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
984 | |
985 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) |
986 | bnxt_re_destroy_gsi_sqp(qp); |
987 | |
988 | mutex_lock(&rdev->qp_lock); |
989 | list_del(entry: &qp->list); |
990 | mutex_unlock(lock: &rdev->qp_lock); |
991 | atomic_dec(v: &rdev->stats.res.qp_count); |
992 | if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) |
993 | atomic_dec(v: &rdev->stats.res.rc_qp_count); |
994 | else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) |
995 | atomic_dec(v: &rdev->stats.res.ud_qp_count); |
996 | |
997 | ib_umem_release(umem: qp->rumem); |
998 | ib_umem_release(umem: qp->sumem); |
999 | |
1000 | /* Flush all the entries of notification queue associated with |
1001 | * given qp. |
1002 | */ |
1003 | scq_nq = qplib_qp->scq->nq; |
1004 | rcq_nq = qplib_qp->rcq->nq; |
1005 | bnxt_re_synchronize_nq(nq: scq_nq); |
1006 | if (scq_nq != rcq_nq) |
1007 | bnxt_re_synchronize_nq(nq: rcq_nq); |
1008 | |
1009 | return 0; |
1010 | } |
1011 | |
1012 | static u8 __from_ib_qp_type(enum ib_qp_type type) |
1013 | { |
1014 | switch (type) { |
1015 | case IB_QPT_GSI: |
1016 | return CMDQ_CREATE_QP1_TYPE_GSI; |
1017 | case IB_QPT_RC: |
1018 | return CMDQ_CREATE_QP_TYPE_RC; |
1019 | case IB_QPT_UD: |
1020 | return CMDQ_CREATE_QP_TYPE_UD; |
1021 | default: |
1022 | return IB_QPT_MAX; |
1023 | } |
1024 | } |
1025 | |
1026 | static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp, |
1027 | int rsge, int max) |
1028 | { |
1029 | if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
1030 | rsge = max; |
1031 | return bnxt_re_get_rwqe_size(nsge: rsge); |
1032 | } |
1033 | |
1034 | static u16 bnxt_re_get_wqe_size(int ilsize, int nsge) |
1035 | { |
1036 | u16 wqe_size, calc_ils; |
1037 | |
1038 | wqe_size = bnxt_re_get_swqe_size(nsge); |
1039 | if (ilsize) { |
1040 | calc_ils = sizeof(struct sq_send_hdr) + ilsize; |
1041 | wqe_size = max_t(u16, calc_ils, wqe_size); |
1042 | wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr)); |
1043 | } |
1044 | return wqe_size; |
1045 | } |
1046 | |
1047 | static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, |
1048 | struct ib_qp_init_attr *init_attr) |
1049 | { |
1050 | struct bnxt_qplib_dev_attr *dev_attr; |
1051 | struct bnxt_qplib_qp *qplqp; |
1052 | struct bnxt_re_dev *rdev; |
1053 | struct bnxt_qplib_q *sq; |
1054 | int align, ilsize; |
1055 | |
1056 | rdev = qp->rdev; |
1057 | qplqp = &qp->qplib_qp; |
1058 | sq = &qplqp->sq; |
1059 | dev_attr = rdev->dev_attr; |
1060 | |
1061 | align = sizeof(struct sq_send_hdr); |
1062 | ilsize = ALIGN(init_attr->cap.max_inline_data, align); |
1063 | |
1064 | /* For gen p4 and gen p5 fixed wqe compatibility mode |
1065 | * wqe size is fixed to 128 bytes - ie 6 SGEs |
1066 | */ |
1067 | if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) { |
1068 | sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE); |
1069 | sq->max_sge = BNXT_STATIC_MAX_SGE; |
1070 | } else { |
1071 | sq->wqe_size = bnxt_re_get_wqe_size(ilsize, nsge: sq->max_sge); |
1072 | if (sq->wqe_size > bnxt_re_get_swqe_size(nsge: dev_attr->max_qp_sges)) |
1073 | return -EINVAL; |
1074 | } |
1075 | |
1076 | if (init_attr->cap.max_inline_data) { |
1077 | qplqp->max_inline_data = sq->wqe_size - |
1078 | sizeof(struct sq_send_hdr); |
1079 | init_attr->cap.max_inline_data = qplqp->max_inline_data; |
1080 | } |
1081 | |
1082 | return 0; |
1083 | } |
1084 | |
1085 | static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, |
1086 | struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx, |
1087 | struct bnxt_re_qp_req *ureq) |
1088 | { |
1089 | struct bnxt_qplib_qp *qplib_qp; |
1090 | int bytes = 0, psn_sz; |
1091 | struct ib_umem *umem; |
1092 | int psn_nume; |
1093 | |
1094 | qplib_qp = &qp->qplib_qp; |
1095 | |
1096 | bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); |
1097 | /* Consider mapping PSN search memory only for RC QPs. */ |
1098 | if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { |
1099 | psn_sz = bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx) ? |
1100 | sizeof(struct sq_psn_search_ext) : |
1101 | sizeof(struct sq_psn_search); |
1102 | if (cntx && bnxt_re_is_var_size_supported(rdev, uctx: cntx)) { |
1103 | psn_nume = ureq->sq_slots; |
1104 | } else { |
1105 | psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? |
1106 | qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / |
1107 | sizeof(struct bnxt_qplib_sge)); |
1108 | } |
1109 | if (_is_host_msn_table(dev_cap_ext_flags2: rdev->qplib_res.dattr->dev_cap_flags2)) |
1110 | psn_nume = roundup_pow_of_two(psn_nume); |
1111 | bytes += (psn_nume * psn_sz); |
1112 | } |
1113 | |
1114 | bytes = PAGE_ALIGN(bytes); |
1115 | umem = ib_umem_get(device: &rdev->ibdev, addr: ureq->qpsva, size: bytes, |
1116 | access: IB_ACCESS_LOCAL_WRITE); |
1117 | if (IS_ERR(ptr: umem)) |
1118 | return PTR_ERR(ptr: umem); |
1119 | |
1120 | qp->sumem = umem; |
1121 | qplib_qp->sq.sg_info.umem = umem; |
1122 | qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; |
1123 | qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; |
1124 | qplib_qp->qp_handle = ureq->qp_handle; |
1125 | |
1126 | if (!qp->qplib_qp.srq) { |
1127 | bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); |
1128 | bytes = PAGE_ALIGN(bytes); |
1129 | umem = ib_umem_get(device: &rdev->ibdev, addr: ureq->qprva, size: bytes, |
1130 | access: IB_ACCESS_LOCAL_WRITE); |
1131 | if (IS_ERR(ptr: umem)) |
1132 | goto rqfail; |
1133 | qp->rumem = umem; |
1134 | qplib_qp->rq.sg_info.umem = umem; |
1135 | qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; |
1136 | qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; |
1137 | } |
1138 | |
1139 | qplib_qp->dpi = &cntx->dpi; |
1140 | return 0; |
1141 | rqfail: |
1142 | ib_umem_release(umem: qp->sumem); |
1143 | qp->sumem = NULL; |
1144 | memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); |
1145 | |
1146 | return PTR_ERR(ptr: umem); |
1147 | } |
1148 | |
1149 | static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah |
1150 | (struct bnxt_re_pd *pd, |
1151 | struct bnxt_qplib_res *qp1_res, |
1152 | struct bnxt_qplib_qp *qp1_qp) |
1153 | { |
1154 | struct bnxt_re_dev *rdev = pd->rdev; |
1155 | struct bnxt_re_ah *ah; |
1156 | union ib_gid sgid; |
1157 | int rc; |
1158 | |
1159 | ah = kzalloc(sizeof(*ah), GFP_KERNEL); |
1160 | if (!ah) |
1161 | return NULL; |
1162 | |
1163 | ah->rdev = rdev; |
1164 | ah->qplib_ah.pd = &pd->qplib_pd; |
1165 | |
1166 | rc = bnxt_re_query_gid(ibdev: &rdev->ibdev, port_num: 1, index: 0, gid: &sgid); |
1167 | if (rc) |
1168 | goto fail; |
1169 | |
1170 | /* supply the dgid data same as sgid */ |
1171 | memcpy(ah->qplib_ah.dgid.data, &sgid.raw, |
1172 | sizeof(union ib_gid)); |
1173 | ah->qplib_ah.sgid_index = 0; |
1174 | |
1175 | ah->qplib_ah.traffic_class = 0; |
1176 | ah->qplib_ah.flow_label = 0; |
1177 | ah->qplib_ah.hop_limit = 1; |
1178 | ah->qplib_ah.sl = 0; |
1179 | /* Have DMAC same as SMAC */ |
1180 | ether_addr_copy(dst: ah->qplib_ah.dmac, src: rdev->netdev->dev_addr); |
1181 | |
1182 | rc = bnxt_qplib_create_ah(res: &rdev->qplib_res, ah: &ah->qplib_ah, block: false); |
1183 | if (rc) { |
1184 | ibdev_err(ibdev: &rdev->ibdev, |
1185 | format: "Failed to allocate HW AH for Shadow QP"); |
1186 | goto fail; |
1187 | } |
1188 | atomic_inc(v: &rdev->stats.res.ah_count); |
1189 | |
1190 | return ah; |
1191 | |
1192 | fail: |
1193 | kfree(objp: ah); |
1194 | return NULL; |
1195 | } |
1196 | |
1197 | static struct bnxt_re_qp *bnxt_re_create_shadow_qp |
1198 | (struct bnxt_re_pd *pd, |
1199 | struct bnxt_qplib_res *qp1_res, |
1200 | struct bnxt_qplib_qp *qp1_qp) |
1201 | { |
1202 | struct bnxt_re_dev *rdev = pd->rdev; |
1203 | struct bnxt_re_qp *qp; |
1204 | int rc; |
1205 | |
1206 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
1207 | if (!qp) |
1208 | return NULL; |
1209 | |
1210 | qp->rdev = rdev; |
1211 | |
1212 | /* Initialize the shadow QP structure from the QP1 values */ |
1213 | ether_addr_copy(dst: qp->qplib_qp.smac, src: rdev->netdev->dev_addr); |
1214 | |
1215 | qp->qplib_qp.pd = &pd->qplib_pd; |
1216 | qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); |
1217 | qp->qplib_qp.type = IB_QPT_UD; |
1218 | |
1219 | qp->qplib_qp.max_inline_data = 0; |
1220 | qp->qplib_qp.sig_type = true; |
1221 | |
1222 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ |
1223 | qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(ilsize: 0, nsge: 6); |
1224 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; |
1225 | qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe; |
1226 | qp->qplib_qp.sq.max_sge = 2; |
1227 | /* Q full delta can be 1 since it is internal QP */ |
1228 | qp->qplib_qp.sq.q_full_delta = 1; |
1229 | qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; |
1230 | qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT; |
1231 | |
1232 | qp->qplib_qp.scq = qp1_qp->scq; |
1233 | qp->qplib_qp.rcq = qp1_qp->rcq; |
1234 | |
1235 | qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(nsge: 6); |
1236 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; |
1237 | qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe; |
1238 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; |
1239 | /* Q full delta can be 1 since it is internal QP */ |
1240 | qp->qplib_qp.rq.q_full_delta = 1; |
1241 | qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; |
1242 | qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT; |
1243 | |
1244 | qp->qplib_qp.mtu = qp1_qp->mtu; |
1245 | |
1246 | qp->qplib_qp.sq_hdr_buf_size = 0; |
1247 | qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; |
1248 | qp->qplib_qp.dpi = &rdev->dpi_privileged; |
1249 | |
1250 | rc = bnxt_qplib_create_qp(res: qp1_res, qp: &qp->qplib_qp); |
1251 | if (rc) |
1252 | goto fail; |
1253 | |
1254 | spin_lock_init(&qp->sq_lock); |
1255 | INIT_LIST_HEAD(list: &qp->list); |
1256 | mutex_lock(&rdev->qp_lock); |
1257 | list_add_tail(new: &qp->list, head: &rdev->qp_list); |
1258 | atomic_inc(v: &rdev->stats.res.qp_count); |
1259 | mutex_unlock(lock: &rdev->qp_lock); |
1260 | return qp; |
1261 | fail: |
1262 | kfree(objp: qp); |
1263 | return NULL; |
1264 | } |
1265 | |
1266 | static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, |
1267 | struct ib_qp_init_attr *init_attr, |
1268 | struct bnxt_re_ucontext *uctx) |
1269 | { |
1270 | struct bnxt_qplib_dev_attr *dev_attr; |
1271 | struct bnxt_qplib_qp *qplqp; |
1272 | struct bnxt_re_dev *rdev; |
1273 | struct bnxt_qplib_q *rq; |
1274 | int entries; |
1275 | |
1276 | rdev = qp->rdev; |
1277 | qplqp = &qp->qplib_qp; |
1278 | rq = &qplqp->rq; |
1279 | dev_attr = rdev->dev_attr; |
1280 | |
1281 | if (init_attr->srq) { |
1282 | struct bnxt_re_srq *srq; |
1283 | |
1284 | srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); |
1285 | qplqp->srq = &srq->qplib_srq; |
1286 | rq->max_wqe = 0; |
1287 | } else { |
1288 | rq->max_sge = init_attr->cap.max_recv_sge; |
1289 | if (rq->max_sge > dev_attr->max_qp_sges) |
1290 | rq->max_sge = dev_attr->max_qp_sges; |
1291 | init_attr->cap.max_recv_sge = rq->max_sge; |
1292 | rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rsge: rq->max_sge, |
1293 | max: dev_attr->max_qp_sges); |
1294 | /* Allocate 1 more than what's provided so posting max doesn't |
1295 | * mean empty. |
1296 | */ |
1297 | entries = bnxt_re_init_depth(ent: init_attr->cap.max_recv_wr + 1, uctx); |
1298 | rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
1299 | rq->max_sw_wqe = rq->max_wqe; |
1300 | rq->q_full_delta = 0; |
1301 | rq->sg_info.pgsize = PAGE_SIZE; |
1302 | rq->sg_info.pgshft = PAGE_SHIFT; |
1303 | } |
1304 | |
1305 | return 0; |
1306 | } |
1307 | |
1308 | static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) |
1309 | { |
1310 | struct bnxt_qplib_dev_attr *dev_attr; |
1311 | struct bnxt_qplib_qp *qplqp; |
1312 | struct bnxt_re_dev *rdev; |
1313 | |
1314 | rdev = qp->rdev; |
1315 | qplqp = &qp->qplib_qp; |
1316 | dev_attr = rdev->dev_attr; |
1317 | |
1318 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) { |
1319 | qplqp->rq.max_sge = dev_attr->max_qp_sges; |
1320 | if (qplqp->rq.max_sge > dev_attr->max_qp_sges) |
1321 | qplqp->rq.max_sge = dev_attr->max_qp_sges; |
1322 | qplqp->rq.max_sge = 6; |
1323 | } |
1324 | } |
1325 | |
1326 | static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, |
1327 | struct ib_qp_init_attr *init_attr, |
1328 | struct bnxt_re_ucontext *uctx, |
1329 | struct bnxt_re_qp_req *ureq) |
1330 | { |
1331 | struct bnxt_qplib_dev_attr *dev_attr; |
1332 | struct bnxt_qplib_qp *qplqp; |
1333 | struct bnxt_re_dev *rdev; |
1334 | struct bnxt_qplib_q *sq; |
1335 | int diff = 0; |
1336 | int entries; |
1337 | int rc; |
1338 | |
1339 | rdev = qp->rdev; |
1340 | qplqp = &qp->qplib_qp; |
1341 | sq = &qplqp->sq; |
1342 | dev_attr = rdev->dev_attr; |
1343 | |
1344 | sq->max_sge = init_attr->cap.max_send_sge; |
1345 | entries = init_attr->cap.max_send_wr; |
1346 | if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) { |
1347 | sq->max_wqe = ureq->sq_slots; |
1348 | sq->max_sw_wqe = ureq->sq_slots; |
1349 | sq->wqe_size = sizeof(struct sq_sge); |
1350 | } else { |
1351 | if (sq->max_sge > dev_attr->max_qp_sges) { |
1352 | sq->max_sge = dev_attr->max_qp_sges; |
1353 | init_attr->cap.max_send_sge = sq->max_sge; |
1354 | } |
1355 | |
1356 | rc = bnxt_re_setup_swqe_size(qp, init_attr); |
1357 | if (rc) |
1358 | return rc; |
1359 | |
1360 | /* Allocate 128 + 1 more than what's provided */ |
1361 | diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? |
1362 | 0 : BNXT_QPLIB_RESERVED_QP_WRS; |
1363 | entries = bnxt_re_init_depth(ent: entries + diff + 1, uctx); |
1364 | sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); |
1365 | if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) |
1366 | sq->max_sw_wqe = bnxt_qplib_get_depth(que: sq, wqe_mode: qplqp->wqe_mode, is_sq: true); |
1367 | else |
1368 | sq->max_sw_wqe = sq->max_wqe; |
1369 | |
1370 | } |
1371 | sq->q_full_delta = diff + 1; |
1372 | /* |
1373 | * Reserving one slot for Phantom WQE. Application can |
1374 | * post one extra entry in this case. But allowing this to avoid |
1375 | * unexpected Queue full condition |
1376 | */ |
1377 | qplqp->sq.q_full_delta -= 1; |
1378 | qplqp->sq.sg_info.pgsize = PAGE_SIZE; |
1379 | qplqp->sq.sg_info.pgshft = PAGE_SHIFT; |
1380 | |
1381 | return 0; |
1382 | } |
1383 | |
1384 | static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, |
1385 | struct ib_qp_init_attr *init_attr, |
1386 | struct bnxt_re_ucontext *uctx) |
1387 | { |
1388 | struct bnxt_qplib_dev_attr *dev_attr; |
1389 | struct bnxt_qplib_qp *qplqp; |
1390 | struct bnxt_re_dev *rdev; |
1391 | int entries; |
1392 | |
1393 | rdev = qp->rdev; |
1394 | qplqp = &qp->qplib_qp; |
1395 | dev_attr = rdev->dev_attr; |
1396 | |
1397 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) { |
1398 | entries = bnxt_re_init_depth(ent: init_attr->cap.max_send_wr + 1, uctx); |
1399 | qplqp->sq.max_wqe = min_t(u32, entries, |
1400 | dev_attr->max_qp_wqes + 1); |
1401 | qplqp->sq.q_full_delta = qplqp->sq.max_wqe - |
1402 | init_attr->cap.max_send_wr; |
1403 | qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ |
1404 | if (qplqp->sq.max_sge > dev_attr->max_qp_sges) |
1405 | qplqp->sq.max_sge = dev_attr->max_qp_sges; |
1406 | } |
1407 | } |
1408 | |
1409 | static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, |
1410 | struct ib_qp_init_attr *init_attr) |
1411 | { |
1412 | struct bnxt_qplib_chip_ctx *chip_ctx; |
1413 | int qptype; |
1414 | |
1415 | chip_ctx = rdev->chip_ctx; |
1416 | |
1417 | qptype = __from_ib_qp_type(type: init_attr->qp_type); |
1418 | if (qptype == IB_QPT_MAX) { |
1419 | ibdev_err(ibdev: &rdev->ibdev, format: "QP type 0x%x not supported", qptype); |
1420 | qptype = -EOPNOTSUPP; |
1421 | goto out; |
1422 | } |
1423 | |
1424 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: chip_ctx) && |
1425 | init_attr->qp_type == IB_QPT_GSI) |
1426 | qptype = CMDQ_CREATE_QP_TYPE_GSI; |
1427 | out: |
1428 | return qptype; |
1429 | } |
1430 | |
1431 | static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, |
1432 | struct ib_qp_init_attr *init_attr, |
1433 | struct bnxt_re_ucontext *uctx, |
1434 | struct bnxt_re_qp_req *ureq) |
1435 | { |
1436 | struct bnxt_qplib_dev_attr *dev_attr; |
1437 | struct bnxt_qplib_qp *qplqp; |
1438 | struct bnxt_re_dev *rdev; |
1439 | struct bnxt_re_cq *cq; |
1440 | int rc = 0, qptype; |
1441 | |
1442 | rdev = qp->rdev; |
1443 | qplqp = &qp->qplib_qp; |
1444 | dev_attr = rdev->dev_attr; |
1445 | |
1446 | /* Setup misc params */ |
1447 | ether_addr_copy(dst: qplqp->smac, src: rdev->netdev->dev_addr); |
1448 | qplqp->pd = &pd->qplib_pd; |
1449 | qplqp->qp_handle = (u64)qplqp; |
1450 | qplqp->max_inline_data = init_attr->cap.max_inline_data; |
1451 | qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; |
1452 | qptype = bnxt_re_init_qp_type(rdev, init_attr); |
1453 | if (qptype < 0) { |
1454 | rc = qptype; |
1455 | goto out; |
1456 | } |
1457 | qplqp->type = (u8)qptype; |
1458 | qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx); |
1459 | if (init_attr->qp_type == IB_QPT_RC) { |
1460 | qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; |
1461 | qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; |
1462 | } |
1463 | qplqp->mtu = ib_mtu_enum_to_int(mtu: iboe_get_mtu(mtu: rdev->netdev->mtu)); |
1464 | qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ |
1465 | if (init_attr->create_flags) { |
1466 | ibdev_dbg(&rdev->ibdev, |
1467 | "QP create flags 0x%x not supported", |
1468 | init_attr->create_flags); |
1469 | return -EOPNOTSUPP; |
1470 | } |
1471 | |
1472 | /* Setup CQs */ |
1473 | if (init_attr->send_cq) { |
1474 | cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); |
1475 | qplqp->scq = &cq->qplib_cq; |
1476 | qp->scq = cq; |
1477 | } |
1478 | |
1479 | if (init_attr->recv_cq) { |
1480 | cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); |
1481 | qplqp->rcq = &cq->qplib_cq; |
1482 | qp->rcq = cq; |
1483 | } |
1484 | |
1485 | /* Setup RQ/SRQ */ |
1486 | rc = bnxt_re_init_rq_attr(qp, init_attr, uctx); |
1487 | if (rc) |
1488 | goto out; |
1489 | if (init_attr->qp_type == IB_QPT_GSI) |
1490 | bnxt_re_adjust_gsi_rq_attr(qp); |
1491 | |
1492 | /* Setup SQ */ |
1493 | rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq); |
1494 | if (rc) |
1495 | goto out; |
1496 | if (init_attr->qp_type == IB_QPT_GSI) |
1497 | bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx); |
1498 | |
1499 | if (uctx) /* This will update DPI and qp_handle */ |
1500 | rc = bnxt_re_init_user_qp(rdev, pd, qp, cntx: uctx, ureq); |
1501 | out: |
1502 | return rc; |
1503 | } |
1504 | |
1505 | static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, |
1506 | struct bnxt_re_pd *pd) |
1507 | { |
1508 | struct bnxt_re_sqp_entries *sqp_tbl; |
1509 | struct bnxt_re_dev *rdev; |
1510 | struct bnxt_re_qp *sqp; |
1511 | struct bnxt_re_ah *sah; |
1512 | int rc = 0; |
1513 | |
1514 | rdev = qp->rdev; |
1515 | /* Create a shadow QP to handle the QP1 traffic */ |
1516 | sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl), |
1517 | GFP_KERNEL); |
1518 | if (!sqp_tbl) |
1519 | return -ENOMEM; |
1520 | rdev->gsi_ctx.sqp_tbl = sqp_tbl; |
1521 | |
1522 | sqp = bnxt_re_create_shadow_qp(pd, qp1_res: &rdev->qplib_res, qp1_qp: &qp->qplib_qp); |
1523 | if (!sqp) { |
1524 | rc = -ENODEV; |
1525 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create Shadow QP for QP1"); |
1526 | goto out; |
1527 | } |
1528 | rdev->gsi_ctx.gsi_sqp = sqp; |
1529 | |
1530 | sqp->rcq = qp->rcq; |
1531 | sqp->scq = qp->scq; |
1532 | sah = bnxt_re_create_shadow_qp_ah(pd, qp1_res: &rdev->qplib_res, |
1533 | qp1_qp: &qp->qplib_qp); |
1534 | if (!sah) { |
1535 | bnxt_qplib_destroy_qp(res: &rdev->qplib_res, |
1536 | qp: &sqp->qplib_qp); |
1537 | rc = -ENODEV; |
1538 | ibdev_err(ibdev: &rdev->ibdev, |
1539 | format: "Failed to create AH entry for ShadowQP"); |
1540 | goto out; |
1541 | } |
1542 | rdev->gsi_ctx.gsi_sah = sah; |
1543 | |
1544 | return 0; |
1545 | out: |
1546 | kfree(objp: sqp_tbl); |
1547 | return rc; |
1548 | } |
1549 | |
1550 | static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, |
1551 | struct ib_qp_init_attr *init_attr) |
1552 | { |
1553 | struct bnxt_re_dev *rdev; |
1554 | struct bnxt_qplib_qp *qplqp; |
1555 | int rc; |
1556 | |
1557 | rdev = qp->rdev; |
1558 | qplqp = &qp->qplib_qp; |
1559 | |
1560 | qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; |
1561 | qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; |
1562 | |
1563 | rc = bnxt_qplib_create_qp1(res: &rdev->qplib_res, qp: qplqp); |
1564 | if (rc) { |
1565 | ibdev_err(ibdev: &rdev->ibdev, format: "create HW QP1 failed!"); |
1566 | goto out; |
1567 | } |
1568 | |
1569 | rc = bnxt_re_create_shadow_gsi(qp, pd); |
1570 | out: |
1571 | return rc; |
1572 | } |
1573 | |
1574 | static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, |
1575 | struct ib_qp_init_attr *init_attr, |
1576 | struct bnxt_qplib_dev_attr *dev_attr) |
1577 | { |
1578 | bool rc = true; |
1579 | |
1580 | if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || |
1581 | init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || |
1582 | init_attr->cap.max_send_sge > dev_attr->max_qp_sges || |
1583 | init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || |
1584 | init_attr->cap.max_inline_data > dev_attr->max_inline_data) { |
1585 | ibdev_err(ibdev: &rdev->ibdev, |
1586 | format: "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", |
1587 | init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, |
1588 | init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, |
1589 | init_attr->cap.max_send_sge, dev_attr->max_qp_sges, |
1590 | init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, |
1591 | init_attr->cap.max_inline_data, |
1592 | dev_attr->max_inline_data); |
1593 | rc = false; |
1594 | } |
1595 | return rc; |
1596 | } |
1597 | |
1598 | int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, |
1599 | struct ib_udata *udata) |
1600 | { |
1601 | struct bnxt_qplib_dev_attr *dev_attr; |
1602 | struct bnxt_re_ucontext *uctx; |
1603 | struct bnxt_re_qp_req ureq; |
1604 | struct bnxt_re_dev *rdev; |
1605 | struct bnxt_re_pd *pd; |
1606 | struct bnxt_re_qp *qp; |
1607 | struct ib_pd *ib_pd; |
1608 | u32 active_qps; |
1609 | int rc; |
1610 | |
1611 | ib_pd = ib_qp->pd; |
1612 | pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
1613 | rdev = pd->rdev; |
1614 | dev_attr = rdev->dev_attr; |
1615 | qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
1616 | |
1617 | uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
1618 | if (udata) |
1619 | if (ib_copy_from_udata(dest: &ureq, udata, min(udata->inlen, sizeof(ureq)))) |
1620 | return -EFAULT; |
1621 | |
1622 | rc = bnxt_re_test_qp_limits(rdev, init_attr: qp_init_attr, dev_attr); |
1623 | if (!rc) { |
1624 | rc = -EINVAL; |
1625 | goto fail; |
1626 | } |
1627 | |
1628 | qp->rdev = rdev; |
1629 | rc = bnxt_re_init_qp_attr(qp, pd, init_attr: qp_init_attr, uctx, ureq: &ureq); |
1630 | if (rc) |
1631 | goto fail; |
1632 | |
1633 | if (qp_init_attr->qp_type == IB_QPT_GSI && |
1634 | !(bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx))) { |
1635 | rc = bnxt_re_create_gsi_qp(qp, pd, init_attr: qp_init_attr); |
1636 | if (rc == -ENODEV) |
1637 | goto qp_destroy; |
1638 | if (rc) |
1639 | goto fail; |
1640 | } else { |
1641 | rc = bnxt_qplib_create_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
1642 | if (rc) { |
1643 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create HW QP"); |
1644 | goto free_umem; |
1645 | } |
1646 | if (udata) { |
1647 | struct bnxt_re_qp_resp resp; |
1648 | |
1649 | resp.qpid = qp->qplib_qp.id; |
1650 | resp.rsvd = 0; |
1651 | rc = ib_copy_to_udata(udata, src: &resp, len: sizeof(resp)); |
1652 | if (rc) { |
1653 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to copy QP udata"); |
1654 | goto qp_destroy; |
1655 | } |
1656 | } |
1657 | } |
1658 | |
1659 | qp->ib_qp.qp_num = qp->qplib_qp.id; |
1660 | if (qp_init_attr->qp_type == IB_QPT_GSI) |
1661 | rdev->gsi_ctx.gsi_qp = qp; |
1662 | spin_lock_init(&qp->sq_lock); |
1663 | spin_lock_init(&qp->rq_lock); |
1664 | INIT_LIST_HEAD(list: &qp->list); |
1665 | mutex_lock(&rdev->qp_lock); |
1666 | list_add_tail(new: &qp->list, head: &rdev->qp_list); |
1667 | mutex_unlock(lock: &rdev->qp_lock); |
1668 | active_qps = atomic_inc_return(v: &rdev->stats.res.qp_count); |
1669 | if (active_qps > rdev->stats.res.qp_watermark) |
1670 | rdev->stats.res.qp_watermark = active_qps; |
1671 | if (qp_init_attr->qp_type == IB_QPT_RC) { |
1672 | active_qps = atomic_inc_return(v: &rdev->stats.res.rc_qp_count); |
1673 | if (active_qps > rdev->stats.res.rc_qp_watermark) |
1674 | rdev->stats.res.rc_qp_watermark = active_qps; |
1675 | } else if (qp_init_attr->qp_type == IB_QPT_UD) { |
1676 | active_qps = atomic_inc_return(v: &rdev->stats.res.ud_qp_count); |
1677 | if (active_qps > rdev->stats.res.ud_qp_watermark) |
1678 | rdev->stats.res.ud_qp_watermark = active_qps; |
1679 | } |
1680 | bnxt_re_debug_add_qpinfo(rdev, qp); |
1681 | |
1682 | return 0; |
1683 | qp_destroy: |
1684 | bnxt_qplib_destroy_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
1685 | free_umem: |
1686 | ib_umem_release(umem: qp->rumem); |
1687 | ib_umem_release(umem: qp->sumem); |
1688 | fail: |
1689 | return rc; |
1690 | } |
1691 | |
1692 | static u8 __from_ib_qp_state(enum ib_qp_state state) |
1693 | { |
1694 | switch (state) { |
1695 | case IB_QPS_RESET: |
1696 | return CMDQ_MODIFY_QP_NEW_STATE_RESET; |
1697 | case IB_QPS_INIT: |
1698 | return CMDQ_MODIFY_QP_NEW_STATE_INIT; |
1699 | case IB_QPS_RTR: |
1700 | return CMDQ_MODIFY_QP_NEW_STATE_RTR; |
1701 | case IB_QPS_RTS: |
1702 | return CMDQ_MODIFY_QP_NEW_STATE_RTS; |
1703 | case IB_QPS_SQD: |
1704 | return CMDQ_MODIFY_QP_NEW_STATE_SQD; |
1705 | case IB_QPS_SQE: |
1706 | return CMDQ_MODIFY_QP_NEW_STATE_SQE; |
1707 | case IB_QPS_ERR: |
1708 | default: |
1709 | return CMDQ_MODIFY_QP_NEW_STATE_ERR; |
1710 | } |
1711 | } |
1712 | |
1713 | static enum ib_qp_state __to_ib_qp_state(u8 state) |
1714 | { |
1715 | switch (state) { |
1716 | case CMDQ_MODIFY_QP_NEW_STATE_RESET: |
1717 | return IB_QPS_RESET; |
1718 | case CMDQ_MODIFY_QP_NEW_STATE_INIT: |
1719 | return IB_QPS_INIT; |
1720 | case CMDQ_MODIFY_QP_NEW_STATE_RTR: |
1721 | return IB_QPS_RTR; |
1722 | case CMDQ_MODIFY_QP_NEW_STATE_RTS: |
1723 | return IB_QPS_RTS; |
1724 | case CMDQ_MODIFY_QP_NEW_STATE_SQD: |
1725 | return IB_QPS_SQD; |
1726 | case CMDQ_MODIFY_QP_NEW_STATE_SQE: |
1727 | return IB_QPS_SQE; |
1728 | case CMDQ_MODIFY_QP_NEW_STATE_ERR: |
1729 | default: |
1730 | return IB_QPS_ERR; |
1731 | } |
1732 | } |
1733 | |
1734 | static u32 __from_ib_mtu(enum ib_mtu mtu) |
1735 | { |
1736 | switch (mtu) { |
1737 | case IB_MTU_256: |
1738 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_256; |
1739 | case IB_MTU_512: |
1740 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_512; |
1741 | case IB_MTU_1024: |
1742 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024; |
1743 | case IB_MTU_2048: |
1744 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; |
1745 | case IB_MTU_4096: |
1746 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096; |
1747 | default: |
1748 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; |
1749 | } |
1750 | } |
1751 | |
1752 | static enum ib_mtu __to_ib_mtu(u32 mtu) |
1753 | { |
1754 | switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) { |
1755 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_256: |
1756 | return IB_MTU_256; |
1757 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_512: |
1758 | return IB_MTU_512; |
1759 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024: |
1760 | return IB_MTU_1024; |
1761 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048: |
1762 | return IB_MTU_2048; |
1763 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096: |
1764 | return IB_MTU_4096; |
1765 | default: |
1766 | return IB_MTU_2048; |
1767 | } |
1768 | } |
1769 | |
1770 | /* Shared Receive Queues */ |
1771 | int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) |
1772 | { |
1773 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1774 | ib_srq); |
1775 | struct bnxt_re_dev *rdev = srq->rdev; |
1776 | struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; |
1777 | |
1778 | if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { |
1779 | free_page((unsigned long)srq->uctx_srq_page); |
1780 | hash_del(node: &srq->hash_entry); |
1781 | } |
1782 | bnxt_qplib_destroy_srq(res: &rdev->qplib_res, srq: qplib_srq); |
1783 | ib_umem_release(umem: srq->umem); |
1784 | atomic_dec(v: &rdev->stats.res.srq_count); |
1785 | return 0; |
1786 | } |
1787 | |
1788 | static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, |
1789 | struct bnxt_re_pd *pd, |
1790 | struct bnxt_re_srq *srq, |
1791 | struct ib_udata *udata) |
1792 | { |
1793 | struct bnxt_re_srq_req ureq; |
1794 | struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; |
1795 | struct ib_umem *umem; |
1796 | int bytes = 0; |
1797 | struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( |
1798 | udata, struct bnxt_re_ucontext, ib_uctx); |
1799 | |
1800 | if (ib_copy_from_udata(dest: &ureq, udata, len: sizeof(ureq))) |
1801 | return -EFAULT; |
1802 | |
1803 | bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size); |
1804 | bytes = PAGE_ALIGN(bytes); |
1805 | umem = ib_umem_get(device: &rdev->ibdev, addr: ureq.srqva, size: bytes, |
1806 | access: IB_ACCESS_LOCAL_WRITE); |
1807 | if (IS_ERR(ptr: umem)) |
1808 | return PTR_ERR(ptr: umem); |
1809 | |
1810 | srq->umem = umem; |
1811 | qplib_srq->sg_info.umem = umem; |
1812 | qplib_srq->sg_info.pgsize = PAGE_SIZE; |
1813 | qplib_srq->sg_info.pgshft = PAGE_SHIFT; |
1814 | qplib_srq->srq_handle = ureq.srq_handle; |
1815 | qplib_srq->dpi = &cntx->dpi; |
1816 | |
1817 | return 0; |
1818 | } |
1819 | |
1820 | int bnxt_re_create_srq(struct ib_srq *ib_srq, |
1821 | struct ib_srq_init_attr *srq_init_attr, |
1822 | struct ib_udata *udata) |
1823 | { |
1824 | struct bnxt_qplib_dev_attr *dev_attr; |
1825 | struct bnxt_re_ucontext *uctx; |
1826 | struct bnxt_re_dev *rdev; |
1827 | struct bnxt_re_srq *srq; |
1828 | struct bnxt_re_pd *pd; |
1829 | struct ib_pd *ib_pd; |
1830 | u32 active_srqs; |
1831 | int rc, entries; |
1832 | |
1833 | ib_pd = ib_srq->pd; |
1834 | pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
1835 | rdev = pd->rdev; |
1836 | dev_attr = rdev->dev_attr; |
1837 | srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); |
1838 | |
1839 | if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { |
1840 | ibdev_err(ibdev: &rdev->ibdev, format: "Create CQ failed - max exceeded"); |
1841 | rc = -EINVAL; |
1842 | goto exit; |
1843 | } |
1844 | |
1845 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { |
1846 | rc = -EOPNOTSUPP; |
1847 | goto exit; |
1848 | } |
1849 | |
1850 | uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
1851 | srq->rdev = rdev; |
1852 | srq->qplib_srq.pd = &pd->qplib_pd; |
1853 | srq->qplib_srq.dpi = &rdev->dpi_privileged; |
1854 | /* Allocate 1 more than what's provided so posting max doesn't |
1855 | * mean empty |
1856 | */ |
1857 | entries = bnxt_re_init_depth(ent: srq_init_attr->attr.max_wr + 1, uctx); |
1858 | if (entries > dev_attr->max_srq_wqes + 1) |
1859 | entries = dev_attr->max_srq_wqes + 1; |
1860 | srq->qplib_srq.max_wqe = entries; |
1861 | |
1862 | srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; |
1863 | /* 128 byte wqe size for SRQ . So use max sges */ |
1864 | srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(nsge: dev_attr->max_srq_sges); |
1865 | srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; |
1866 | srq->srq_limit = srq_init_attr->attr.srq_limit; |
1867 | srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; |
1868 | srq->qplib_srq.sg_info.pgsize = PAGE_SIZE; |
1869 | srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT; |
1870 | |
1871 | if (udata) { |
1872 | rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); |
1873 | if (rc) |
1874 | goto fail; |
1875 | } |
1876 | |
1877 | rc = bnxt_qplib_create_srq(res: &rdev->qplib_res, srq: &srq->qplib_srq); |
1878 | if (rc) { |
1879 | ibdev_err(ibdev: &rdev->ibdev, format: "Create HW SRQ failed!"); |
1880 | goto fail; |
1881 | } |
1882 | |
1883 | if (udata) { |
1884 | struct bnxt_re_srq_resp resp = {}; |
1885 | |
1886 | resp.srqid = srq->qplib_srq.id; |
1887 | if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { |
1888 | hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id); |
1889 | srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL); |
1890 | if (!srq->uctx_srq_page) { |
1891 | rc = -ENOMEM; |
1892 | goto fail; |
1893 | } |
1894 | resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT; |
1895 | } |
1896 | rc = ib_copy_to_udata(udata, src: &resp, len: sizeof(resp)); |
1897 | if (rc) { |
1898 | ibdev_err(ibdev: &rdev->ibdev, format: "SRQ copy to udata failed!"); |
1899 | bnxt_qplib_destroy_srq(res: &rdev->qplib_res, |
1900 | srq: &srq->qplib_srq); |
1901 | goto fail; |
1902 | } |
1903 | } |
1904 | active_srqs = atomic_inc_return(v: &rdev->stats.res.srq_count); |
1905 | if (active_srqs > rdev->stats.res.srq_watermark) |
1906 | rdev->stats.res.srq_watermark = active_srqs; |
1907 | spin_lock_init(&srq->lock); |
1908 | |
1909 | return 0; |
1910 | |
1911 | fail: |
1912 | ib_umem_release(umem: srq->umem); |
1913 | exit: |
1914 | return rc; |
1915 | } |
1916 | |
1917 | int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, |
1918 | enum ib_srq_attr_mask srq_attr_mask, |
1919 | struct ib_udata *udata) |
1920 | { |
1921 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1922 | ib_srq); |
1923 | struct bnxt_re_dev *rdev = srq->rdev; |
1924 | int rc; |
1925 | |
1926 | switch (srq_attr_mask) { |
1927 | case IB_SRQ_MAX_WR: |
1928 | /* SRQ resize is not supported */ |
1929 | return -EINVAL; |
1930 | case IB_SRQ_LIMIT: |
1931 | /* Change the SRQ threshold */ |
1932 | if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) |
1933 | return -EINVAL; |
1934 | |
1935 | srq->qplib_srq.threshold = srq_attr->srq_limit; |
1936 | rc = bnxt_qplib_modify_srq(res: &rdev->qplib_res, srq: &srq->qplib_srq); |
1937 | if (rc) { |
1938 | ibdev_err(ibdev: &rdev->ibdev, format: "Modify HW SRQ failed!"); |
1939 | return rc; |
1940 | } |
1941 | /* On success, update the shadow */ |
1942 | srq->srq_limit = srq_attr->srq_limit; |
1943 | /* No need to Build and send response back to udata */ |
1944 | return 0; |
1945 | default: |
1946 | ibdev_err(ibdev: &rdev->ibdev, |
1947 | format: "Unsupported srq_attr_mask 0x%x", srq_attr_mask); |
1948 | return -EINVAL; |
1949 | } |
1950 | } |
1951 | |
1952 | int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) |
1953 | { |
1954 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1955 | ib_srq); |
1956 | struct bnxt_re_srq tsrq; |
1957 | struct bnxt_re_dev *rdev = srq->rdev; |
1958 | int rc; |
1959 | |
1960 | /* Get live SRQ attr */ |
1961 | tsrq.qplib_srq.id = srq->qplib_srq.id; |
1962 | rc = bnxt_qplib_query_srq(res: &rdev->qplib_res, srq: &tsrq.qplib_srq); |
1963 | if (rc) { |
1964 | ibdev_err(ibdev: &rdev->ibdev, format: "Query HW SRQ failed!"); |
1965 | return rc; |
1966 | } |
1967 | srq_attr->max_wr = srq->qplib_srq.max_wqe; |
1968 | srq_attr->max_sge = srq->qplib_srq.max_sge; |
1969 | srq_attr->srq_limit = tsrq.qplib_srq.threshold; |
1970 | |
1971 | return 0; |
1972 | } |
1973 | |
1974 | int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, |
1975 | const struct ib_recv_wr **bad_wr) |
1976 | { |
1977 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1978 | ib_srq); |
1979 | struct bnxt_qplib_swqe wqe; |
1980 | unsigned long flags; |
1981 | int rc = 0; |
1982 | |
1983 | spin_lock_irqsave(&srq->lock, flags); |
1984 | while (wr) { |
1985 | /* Transcribe each ib_recv_wr to qplib_swqe */ |
1986 | wqe.num_sge = wr->num_sge; |
1987 | bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe.sg_list, num: wr->num_sge); |
1988 | wqe.wr_id = wr->wr_id; |
1989 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
1990 | |
1991 | rc = bnxt_qplib_post_srq_recv(srq: &srq->qplib_srq, wqe: &wqe); |
1992 | if (rc) { |
1993 | *bad_wr = wr; |
1994 | break; |
1995 | } |
1996 | wr = wr->next; |
1997 | } |
1998 | spin_unlock_irqrestore(lock: &srq->lock, flags); |
1999 | |
2000 | return rc; |
2001 | } |
2002 | static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, |
2003 | struct bnxt_re_qp *qp1_qp, |
2004 | int qp_attr_mask) |
2005 | { |
2006 | struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; |
2007 | int rc; |
2008 | |
2009 | if (qp_attr_mask & IB_QP_STATE) { |
2010 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; |
2011 | qp->qplib_qp.state = qp1_qp->qplib_qp.state; |
2012 | } |
2013 | if (qp_attr_mask & IB_QP_PKEY_INDEX) { |
2014 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; |
2015 | qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; |
2016 | } |
2017 | |
2018 | if (qp_attr_mask & IB_QP_QKEY) { |
2019 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; |
2020 | /* Using a Random QKEY */ |
2021 | qp->qplib_qp.qkey = 0x81818181; |
2022 | } |
2023 | if (qp_attr_mask & IB_QP_SQ_PSN) { |
2024 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; |
2025 | qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; |
2026 | } |
2027 | |
2028 | rc = bnxt_qplib_modify_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
2029 | if (rc) |
2030 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to modify Shadow QP for QP1"); |
2031 | return rc; |
2032 | } |
2033 | |
2034 | int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, |
2035 | int qp_attr_mask, struct ib_udata *udata) |
2036 | { |
2037 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2038 | struct bnxt_re_dev *rdev = qp->rdev; |
2039 | struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; |
2040 | enum ib_qp_state curr_qp_state, new_qp_state; |
2041 | int rc, entries; |
2042 | unsigned int flags; |
2043 | u8 nw_type; |
2044 | |
2045 | if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) |
2046 | return -EOPNOTSUPP; |
2047 | |
2048 | qp->qplib_qp.modify_flags = 0; |
2049 | if (qp_attr_mask & IB_QP_STATE) { |
2050 | curr_qp_state = __to_ib_qp_state(state: qp->qplib_qp.cur_qp_state); |
2051 | new_qp_state = qp_attr->qp_state; |
2052 | if (!ib_modify_qp_is_ok(cur_state: curr_qp_state, next_state: new_qp_state, |
2053 | type: ib_qp->qp_type, mask: qp_attr_mask)) { |
2054 | ibdev_err(ibdev: &rdev->ibdev, |
2055 | format: "Invalid attribute mask: %#x specified ", |
2056 | qp_attr_mask); |
2057 | ibdev_err(ibdev: &rdev->ibdev, |
2058 | format: "for qpn: %#x type: %#x", |
2059 | ib_qp->qp_num, ib_qp->qp_type); |
2060 | ibdev_err(ibdev: &rdev->ibdev, |
2061 | format: "curr_qp_state=0x%x, new_qp_state=0x%x\n", |
2062 | curr_qp_state, new_qp_state); |
2063 | return -EINVAL; |
2064 | } |
2065 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; |
2066 | qp->qplib_qp.state = __from_ib_qp_state(state: qp_attr->qp_state); |
2067 | |
2068 | if (!qp->sumem && |
2069 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { |
2070 | ibdev_dbg(&rdev->ibdev, |
2071 | "Move QP = %p to flush list\n", qp); |
2072 | flags = bnxt_re_lock_cqs(qp); |
2073 | bnxt_qplib_add_flush_qp(qp: &qp->qplib_qp); |
2074 | bnxt_re_unlock_cqs(qp, flags); |
2075 | } |
2076 | if (!qp->sumem && |
2077 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { |
2078 | ibdev_dbg(&rdev->ibdev, |
2079 | "Move QP = %p out of flush list\n", qp); |
2080 | flags = bnxt_re_lock_cqs(qp); |
2081 | bnxt_qplib_clean_qp(qp: &qp->qplib_qp); |
2082 | bnxt_re_unlock_cqs(qp, flags); |
2083 | } |
2084 | } |
2085 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { |
2086 | qp->qplib_qp.modify_flags |= |
2087 | CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY; |
2088 | qp->qplib_qp.en_sqd_async_notify = true; |
2089 | } |
2090 | if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { |
2091 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; |
2092 | qp->qplib_qp.access = |
2093 | __qp_access_flags_from_ib(cctx: qp->qplib_qp.cctx, |
2094 | iflags: qp_attr->qp_access_flags); |
2095 | /* LOCAL_WRITE access must be set to allow RC receive */ |
2096 | qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE; |
2097 | } |
2098 | if (qp_attr_mask & IB_QP_PKEY_INDEX) { |
2099 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; |
2100 | qp->qplib_qp.pkey_index = qp_attr->pkey_index; |
2101 | } |
2102 | if (qp_attr_mask & IB_QP_QKEY) { |
2103 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; |
2104 | qp->qplib_qp.qkey = qp_attr->qkey; |
2105 | } |
2106 | if (qp_attr_mask & IB_QP_AV) { |
2107 | const struct ib_global_route *grh = |
2108 | rdma_ah_read_grh(attr: &qp_attr->ah_attr); |
2109 | const struct ib_gid_attr *sgid_attr; |
2110 | struct bnxt_re_gid_ctx *ctx; |
2111 | |
2112 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | |
2113 | CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | |
2114 | CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | |
2115 | CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | |
2116 | CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | |
2117 | CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | |
2118 | CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; |
2119 | memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, |
2120 | sizeof(qp->qplib_qp.ah.dgid.data)); |
2121 | qp->qplib_qp.ah.flow_label = grh->flow_label; |
2122 | sgid_attr = grh->sgid_attr; |
2123 | /* Get the HW context of the GID. The reference |
2124 | * of GID table entry is already taken by the caller. |
2125 | */ |
2126 | ctx = rdma_read_gid_hw_context(attr: sgid_attr); |
2127 | qp->qplib_qp.ah.sgid_index = ctx->idx; |
2128 | qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; |
2129 | qp->qplib_qp.ah.hop_limit = grh->hop_limit; |
2130 | qp->qplib_qp.ah.traffic_class = grh->traffic_class >> 2; |
2131 | qp->qplib_qp.ah.sl = rdma_ah_get_sl(attr: &qp_attr->ah_attr); |
2132 | ether_addr_copy(dst: qp->qplib_qp.ah.dmac, |
2133 | src: qp_attr->ah_attr.roce.dmac); |
2134 | |
2135 | rc = rdma_read_gid_l2_fields(attr: sgid_attr, NULL, |
2136 | smac: &qp->qplib_qp.smac[0]); |
2137 | if (rc) |
2138 | return rc; |
2139 | |
2140 | nw_type = rdma_gid_attr_network_type(attr: sgid_attr); |
2141 | switch (nw_type) { |
2142 | case RDMA_NETWORK_IPV4: |
2143 | qp->qplib_qp.nw_type = |
2144 | CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4; |
2145 | break; |
2146 | case RDMA_NETWORK_IPV6: |
2147 | qp->qplib_qp.nw_type = |
2148 | CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6; |
2149 | break; |
2150 | default: |
2151 | qp->qplib_qp.nw_type = |
2152 | CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; |
2153 | break; |
2154 | } |
2155 | } |
2156 | |
2157 | if (qp_attr->qp_state == IB_QPS_RTR) { |
2158 | enum ib_mtu qpmtu; |
2159 | |
2160 | qpmtu = iboe_get_mtu(mtu: rdev->netdev->mtu); |
2161 | if (qp_attr_mask & IB_QP_PATH_MTU) { |
2162 | if (ib_mtu_enum_to_int(mtu: qp_attr->path_mtu) > |
2163 | ib_mtu_enum_to_int(mtu: qpmtu)) |
2164 | return -EINVAL; |
2165 | qpmtu = qp_attr->path_mtu; |
2166 | } |
2167 | |
2168 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; |
2169 | qp->qplib_qp.path_mtu = __from_ib_mtu(mtu: qpmtu); |
2170 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(mtu: qpmtu); |
2171 | } |
2172 | |
2173 | if (qp_attr_mask & IB_QP_TIMEOUT) { |
2174 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; |
2175 | qp->qplib_qp.timeout = qp_attr->timeout; |
2176 | } |
2177 | if (qp_attr_mask & IB_QP_RETRY_CNT) { |
2178 | qp->qplib_qp.modify_flags |= |
2179 | CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT; |
2180 | qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; |
2181 | } |
2182 | if (qp_attr_mask & IB_QP_RNR_RETRY) { |
2183 | qp->qplib_qp.modify_flags |= |
2184 | CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY; |
2185 | qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; |
2186 | } |
2187 | if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) { |
2188 | qp->qplib_qp.modify_flags |= |
2189 | CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; |
2190 | qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; |
2191 | } |
2192 | if (qp_attr_mask & IB_QP_RQ_PSN) { |
2193 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; |
2194 | qp->qplib_qp.rq.psn = qp_attr->rq_psn; |
2195 | } |
2196 | if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
2197 | qp->qplib_qp.modify_flags |= |
2198 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; |
2199 | /* Cap the max_rd_atomic to device max */ |
2200 | qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, |
2201 | dev_attr->max_qp_rd_atom); |
2202 | } |
2203 | if (qp_attr_mask & IB_QP_SQ_PSN) { |
2204 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; |
2205 | qp->qplib_qp.sq.psn = qp_attr->sq_psn; |
2206 | } |
2207 | if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
2208 | if (qp_attr->max_dest_rd_atomic > |
2209 | dev_attr->max_qp_init_rd_atom) { |
2210 | ibdev_err(ibdev: &rdev->ibdev, |
2211 | format: "max_dest_rd_atomic requested%d is > dev_max%d", |
2212 | qp_attr->max_dest_rd_atomic, |
2213 | dev_attr->max_qp_init_rd_atom); |
2214 | return -EINVAL; |
2215 | } |
2216 | |
2217 | qp->qplib_qp.modify_flags |= |
2218 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; |
2219 | qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; |
2220 | } |
2221 | if (qp_attr_mask & IB_QP_CAP) { |
2222 | struct bnxt_re_ucontext *uctx = |
2223 | rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
2224 | |
2225 | qp->qplib_qp.modify_flags |= |
2226 | CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE | |
2227 | CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE | |
2228 | CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE | |
2229 | CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE | |
2230 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA; |
2231 | if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) || |
2232 | (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) || |
2233 | (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) || |
2234 | (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || |
2235 | (qp_attr->cap.max_inline_data >= |
2236 | dev_attr->max_inline_data)) { |
2237 | ibdev_err(ibdev: &rdev->ibdev, |
2238 | format: "Create QP failed - max exceeded"); |
2239 | return -EINVAL; |
2240 | } |
2241 | entries = bnxt_re_init_depth(ent: qp_attr->cap.max_send_wr, uctx); |
2242 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, |
2243 | dev_attr->max_qp_wqes + 1); |
2244 | qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - |
2245 | qp_attr->cap.max_send_wr; |
2246 | /* |
2247 | * Reserving one slot for Phantom WQE. Some application can |
2248 | * post one extra entry in this case. Allowing this to avoid |
2249 | * unexpected Queue full condition |
2250 | */ |
2251 | qp->qplib_qp.sq.q_full_delta -= 1; |
2252 | qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; |
2253 | if (qp->qplib_qp.rq.max_wqe) { |
2254 | entries = bnxt_re_init_depth(ent: qp_attr->cap.max_recv_wr, uctx); |
2255 | qp->qplib_qp.rq.max_wqe = |
2256 | min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
2257 | qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe; |
2258 | qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - |
2259 | qp_attr->cap.max_recv_wr; |
2260 | qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; |
2261 | } else { |
2262 | /* SRQ was used prior, just ignore the RQ caps */ |
2263 | } |
2264 | } |
2265 | if (qp_attr_mask & IB_QP_DEST_QPN) { |
2266 | qp->qplib_qp.modify_flags |= |
2267 | CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID; |
2268 | qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; |
2269 | } |
2270 | rc = bnxt_qplib_modify_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
2271 | if (rc) { |
2272 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to modify HW QP"); |
2273 | return rc; |
2274 | } |
2275 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) |
2276 | rc = bnxt_re_modify_shadow_qp(rdev, qp1_qp: qp, qp_attr_mask); |
2277 | return rc; |
2278 | } |
2279 | |
2280 | int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, |
2281 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) |
2282 | { |
2283 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2284 | struct bnxt_re_dev *rdev = qp->rdev; |
2285 | struct bnxt_qplib_qp *qplib_qp; |
2286 | int rc; |
2287 | |
2288 | qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); |
2289 | if (!qplib_qp) |
2290 | return -ENOMEM; |
2291 | |
2292 | qplib_qp->id = qp->qplib_qp.id; |
2293 | qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; |
2294 | |
2295 | rc = bnxt_qplib_query_qp(res: &rdev->qplib_res, qp: qplib_qp); |
2296 | if (rc) { |
2297 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to query HW QP"); |
2298 | goto out; |
2299 | } |
2300 | qp_attr->qp_state = __to_ib_qp_state(state: qplib_qp->state); |
2301 | qp_attr->cur_qp_state = __to_ib_qp_state(state: qplib_qp->cur_qp_state); |
2302 | qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; |
2303 | qp_attr->qp_access_flags = __qp_access_flags_to_ib(cctx: qp->qplib_qp.cctx, |
2304 | qflags: qplib_qp->access); |
2305 | qp_attr->pkey_index = qplib_qp->pkey_index; |
2306 | qp_attr->qkey = qplib_qp->qkey; |
2307 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
2308 | rdma_ah_set_grh(attr: &qp_attr->ah_attr, NULL, flow_label: qplib_qp->ah.flow_label, |
2309 | sgid_index: qplib_qp->ah.host_sgid_index, |
2310 | hop_limit: qplib_qp->ah.hop_limit, |
2311 | traffic_class: qplib_qp->ah.traffic_class); |
2312 | rdma_ah_set_dgid_raw(attr: &qp_attr->ah_attr, dgid: qplib_qp->ah.dgid.data); |
2313 | rdma_ah_set_sl(attr: &qp_attr->ah_attr, sl: qplib_qp->ah.sl); |
2314 | ether_addr_copy(dst: qp_attr->ah_attr.roce.dmac, src: qplib_qp->ah.dmac); |
2315 | qp_attr->path_mtu = __to_ib_mtu(mtu: qplib_qp->path_mtu); |
2316 | qp_attr->timeout = qplib_qp->timeout; |
2317 | qp_attr->retry_cnt = qplib_qp->retry_cnt; |
2318 | qp_attr->rnr_retry = qplib_qp->rnr_retry; |
2319 | qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; |
2320 | qp_attr->port_num = __to_ib_port_num(port_id: qplib_qp->port_id); |
2321 | qp_attr->rq_psn = qplib_qp->rq.psn; |
2322 | qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; |
2323 | qp_attr->sq_psn = qplib_qp->sq.psn; |
2324 | qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; |
2325 | qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : |
2326 | IB_SIGNAL_REQ_WR; |
2327 | qp_attr->dest_qp_num = qplib_qp->dest_qpn; |
2328 | |
2329 | qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; |
2330 | qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; |
2331 | qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; |
2332 | qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; |
2333 | qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; |
2334 | qp_init_attr->cap = qp_attr->cap; |
2335 | |
2336 | out: |
2337 | kfree(objp: qplib_qp); |
2338 | return rc; |
2339 | } |
2340 | |
2341 | /* Routine for sending QP1 packets for RoCE V1 an V2 |
2342 | */ |
2343 | static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, |
2344 | const struct ib_send_wr *wr, |
2345 | struct bnxt_qplib_swqe *wqe, |
2346 | int payload_size) |
2347 | { |
2348 | struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, |
2349 | ib_ah); |
2350 | struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; |
2351 | const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr; |
2352 | struct bnxt_qplib_sge sge; |
2353 | u8 nw_type; |
2354 | u16 ether_type; |
2355 | union ib_gid dgid; |
2356 | bool is_eth = false; |
2357 | bool is_vlan = false; |
2358 | bool is_grh = false; |
2359 | bool is_udp = false; |
2360 | u8 ip_version = 0; |
2361 | u16 vlan_id = 0xFFFF; |
2362 | void *buf; |
2363 | int i, rc; |
2364 | |
2365 | memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); |
2366 | |
2367 | rc = rdma_read_gid_l2_fields(attr: sgid_attr, vlan_id: &vlan_id, NULL); |
2368 | if (rc) |
2369 | return rc; |
2370 | |
2371 | /* Get network header type for this GID */ |
2372 | nw_type = rdma_gid_attr_network_type(attr: sgid_attr); |
2373 | switch (nw_type) { |
2374 | case RDMA_NETWORK_IPV4: |
2375 | nw_type = BNXT_RE_ROCEV2_IPV4_PACKET; |
2376 | break; |
2377 | case RDMA_NETWORK_IPV6: |
2378 | nw_type = BNXT_RE_ROCEV2_IPV6_PACKET; |
2379 | break; |
2380 | default: |
2381 | nw_type = BNXT_RE_ROCE_V1_PACKET; |
2382 | break; |
2383 | } |
2384 | memcpy(&dgid.raw, &qplib_ah->dgid, 16); |
2385 | is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; |
2386 | if (is_udp) { |
2387 | if (ipv6_addr_v4mapped(a: (struct in6_addr *)&sgid_attr->gid)) { |
2388 | ip_version = 4; |
2389 | ether_type = ETH_P_IP; |
2390 | } else { |
2391 | ip_version = 6; |
2392 | ether_type = ETH_P_IPV6; |
2393 | } |
2394 | is_grh = false; |
2395 | } else { |
2396 | ether_type = ETH_P_IBOE; |
2397 | is_grh = true; |
2398 | } |
2399 | |
2400 | is_eth = true; |
2401 | is_vlan = vlan_id && (vlan_id < 0x1000); |
2402 | |
2403 | ib_ud_header_init(payload_bytes: payload_size, lrh_present: !is_eth, eth_present: is_eth, vlan_present: is_vlan, grh_present: is_grh, |
2404 | ip_version, udp_present: is_udp, immediate_present: 0, header: &qp->qp1_hdr); |
2405 | |
2406 | /* ETH */ |
2407 | ether_addr_copy(dst: qp->qp1_hdr.eth.dmac_h, src: ah->qplib_ah.dmac); |
2408 | ether_addr_copy(dst: qp->qp1_hdr.eth.smac_h, src: qp->qplib_qp.smac); |
2409 | |
2410 | /* For vlan, check the sgid for vlan existence */ |
2411 | |
2412 | if (!is_vlan) { |
2413 | qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); |
2414 | } else { |
2415 | qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); |
2416 | qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id); |
2417 | } |
2418 | |
2419 | if (is_grh || (ip_version == 6)) { |
2420 | memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, |
2421 | sizeof(sgid_attr->gid)); |
2422 | memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, |
2423 | sizeof(sgid_attr->gid)); |
2424 | qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit; |
2425 | } |
2426 | |
2427 | if (ip_version == 4) { |
2428 | qp->qp1_hdr.ip4.tos = 0; |
2429 | qp->qp1_hdr.ip4.id = 0; |
2430 | qp->qp1_hdr.ip4.frag_off = htons(IP_DF); |
2431 | qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit; |
2432 | |
2433 | memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); |
2434 | memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4); |
2435 | qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(header: &qp->qp1_hdr); |
2436 | } |
2437 | |
2438 | if (is_udp) { |
2439 | qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); |
2440 | qp->qp1_hdr.udp.sport = htons(0x8CD1); |
2441 | qp->qp1_hdr.udp.csum = 0; |
2442 | } |
2443 | |
2444 | /* BTH */ |
2445 | if (wr->opcode == IB_WR_SEND_WITH_IMM) { |
2446 | qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
2447 | qp->qp1_hdr.immediate_present = 1; |
2448 | } else { |
2449 | qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
2450 | } |
2451 | if (wr->send_flags & IB_SEND_SOLICITED) |
2452 | qp->qp1_hdr.bth.solicited_event = 1; |
2453 | /* pad_count */ |
2454 | qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; |
2455 | |
2456 | /* P_key for QP1 is for all members */ |
2457 | qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); |
2458 | qp->qp1_hdr.bth.destination_qpn = IB_QP1; |
2459 | qp->qp1_hdr.bth.ack_req = 0; |
2460 | qp->send_psn++; |
2461 | qp->send_psn &= BTH_PSN_MASK; |
2462 | qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); |
2463 | /* DETH */ |
2464 | /* Use the priviledged Q_Key for QP1 */ |
2465 | qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); |
2466 | qp->qp1_hdr.deth.source_qpn = IB_QP1; |
2467 | |
2468 | /* Pack the QP1 to the transmit buffer */ |
2469 | buf = bnxt_qplib_get_qp1_sq_buf(qp: &qp->qplib_qp, sge: &sge); |
2470 | if (buf) { |
2471 | ib_ud_header_pack(header: &qp->qp1_hdr, buf); |
2472 | for (i = wqe->num_sge; i; i--) { |
2473 | wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; |
2474 | wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; |
2475 | wqe->sg_list[i].size = wqe->sg_list[i - 1].size; |
2476 | } |
2477 | |
2478 | /* |
2479 | * Max Header buf size for IPV6 RoCE V2 is 86, |
2480 | * which is same as the QP1 SQ header buffer. |
2481 | * Header buf size for IPV4 RoCE V2 can be 66. |
2482 | * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). |
2483 | * Subtract 20 bytes from QP1 SQ header buf size |
2484 | */ |
2485 | if (is_udp && ip_version == 4) |
2486 | sge.size -= 20; |
2487 | /* |
2488 | * Max Header buf size for RoCE V1 is 78. |
2489 | * ETH(14) + VLAN(4) + GRH(40) + BTH(20). |
2490 | * Subtract 8 bytes from QP1 SQ header buf size |
2491 | */ |
2492 | if (!is_udp) |
2493 | sge.size -= 8; |
2494 | |
2495 | /* Subtract 4 bytes for non vlan packets */ |
2496 | if (!is_vlan) |
2497 | sge.size -= 4; |
2498 | |
2499 | wqe->sg_list[0].addr = sge.addr; |
2500 | wqe->sg_list[0].lkey = sge.lkey; |
2501 | wqe->sg_list[0].size = sge.size; |
2502 | wqe->num_sge++; |
2503 | |
2504 | } else { |
2505 | ibdev_err(ibdev: &qp->rdev->ibdev, format: "QP1 buffer is empty!"); |
2506 | rc = -ENOMEM; |
2507 | } |
2508 | return rc; |
2509 | } |
2510 | |
2511 | /* For the MAD layer, it only provides the recv SGE the size of |
2512 | * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH, |
2513 | * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire |
2514 | * receive packet (334 bytes) with no VLAN and then copy the GRH |
2515 | * and the MAD datagram out to the provided SGE. |
2516 | */ |
2517 | static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, |
2518 | const struct ib_recv_wr *wr, |
2519 | struct bnxt_qplib_swqe *wqe, |
2520 | int payload_size) |
2521 | { |
2522 | struct bnxt_re_sqp_entries *sqp_entry; |
2523 | struct bnxt_qplib_sge ref, sge; |
2524 | struct bnxt_re_dev *rdev; |
2525 | u32 rq_prod_index; |
2526 | |
2527 | rdev = qp->rdev; |
2528 | |
2529 | rq_prod_index = bnxt_qplib_get_rq_prod_index(qp: &qp->qplib_qp); |
2530 | |
2531 | if (!bnxt_qplib_get_qp1_rq_buf(qp: &qp->qplib_qp, sge: &sge)) |
2532 | return -ENOMEM; |
2533 | |
2534 | /* Create 1 SGE to receive the entire |
2535 | * ethernet packet |
2536 | */ |
2537 | /* Save the reference from ULP */ |
2538 | ref.addr = wqe->sg_list[0].addr; |
2539 | ref.lkey = wqe->sg_list[0].lkey; |
2540 | ref.size = wqe->sg_list[0].size; |
2541 | |
2542 | sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; |
2543 | |
2544 | /* SGE 1 */ |
2545 | wqe->sg_list[0].addr = sge.addr; |
2546 | wqe->sg_list[0].lkey = sge.lkey; |
2547 | wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; |
2548 | sge.size -= wqe->sg_list[0].size; |
2549 | |
2550 | sqp_entry->sge.addr = ref.addr; |
2551 | sqp_entry->sge.lkey = ref.lkey; |
2552 | sqp_entry->sge.size = ref.size; |
2553 | /* Store the wrid for reporting completion */ |
2554 | sqp_entry->wrid = wqe->wr_id; |
2555 | /* change the wqe->wrid to table index */ |
2556 | wqe->wr_id = rq_prod_index; |
2557 | return 0; |
2558 | } |
2559 | |
2560 | static int is_ud_qp(struct bnxt_re_qp *qp) |
2561 | { |
2562 | return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || |
2563 | qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); |
2564 | } |
2565 | |
2566 | static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, |
2567 | const struct ib_send_wr *wr, |
2568 | struct bnxt_qplib_swqe *wqe) |
2569 | { |
2570 | struct bnxt_re_ah *ah = NULL; |
2571 | |
2572 | if (is_ud_qp(qp)) { |
2573 | ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); |
2574 | wqe->send.q_key = ud_wr(wr)->remote_qkey; |
2575 | wqe->send.dst_qp = ud_wr(wr)->remote_qpn; |
2576 | wqe->send.avid = ah->qplib_ah.id; |
2577 | } |
2578 | switch (wr->opcode) { |
2579 | case IB_WR_SEND: |
2580 | wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND; |
2581 | break; |
2582 | case IB_WR_SEND_WITH_IMM: |
2583 | wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; |
2584 | wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data); |
2585 | break; |
2586 | case IB_WR_SEND_WITH_INV: |
2587 | wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; |
2588 | wqe->send.inv_key = wr->ex.invalidate_rkey; |
2589 | break; |
2590 | default: |
2591 | return -EINVAL; |
2592 | } |
2593 | if (wr->send_flags & IB_SEND_SIGNALED) |
2594 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2595 | if (wr->send_flags & IB_SEND_FENCE) |
2596 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2597 | if (wr->send_flags & IB_SEND_SOLICITED) |
2598 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2599 | if (wr->send_flags & IB_SEND_INLINE) |
2600 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; |
2601 | |
2602 | return 0; |
2603 | } |
2604 | |
2605 | static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, |
2606 | struct bnxt_qplib_swqe *wqe) |
2607 | { |
2608 | switch (wr->opcode) { |
2609 | case IB_WR_RDMA_WRITE: |
2610 | wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE; |
2611 | break; |
2612 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2613 | wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; |
2614 | wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data); |
2615 | break; |
2616 | case IB_WR_RDMA_READ: |
2617 | wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; |
2618 | wqe->rdma.inv_key = wr->ex.invalidate_rkey; |
2619 | break; |
2620 | default: |
2621 | return -EINVAL; |
2622 | } |
2623 | wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; |
2624 | wqe->rdma.r_key = rdma_wr(wr)->rkey; |
2625 | if (wr->send_flags & IB_SEND_SIGNALED) |
2626 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2627 | if (wr->send_flags & IB_SEND_FENCE) |
2628 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2629 | if (wr->send_flags & IB_SEND_SOLICITED) |
2630 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2631 | if (wr->send_flags & IB_SEND_INLINE) |
2632 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; |
2633 | |
2634 | return 0; |
2635 | } |
2636 | |
2637 | static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr, |
2638 | struct bnxt_qplib_swqe *wqe) |
2639 | { |
2640 | switch (wr->opcode) { |
2641 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2642 | wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; |
2643 | wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; |
2644 | wqe->atomic.swap_data = atomic_wr(wr)->swap; |
2645 | break; |
2646 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2647 | wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD; |
2648 | wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; |
2649 | break; |
2650 | default: |
2651 | return -EINVAL; |
2652 | } |
2653 | wqe->atomic.remote_va = atomic_wr(wr)->remote_addr; |
2654 | wqe->atomic.r_key = atomic_wr(wr)->rkey; |
2655 | if (wr->send_flags & IB_SEND_SIGNALED) |
2656 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2657 | if (wr->send_flags & IB_SEND_FENCE) |
2658 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2659 | if (wr->send_flags & IB_SEND_SOLICITED) |
2660 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2661 | return 0; |
2662 | } |
2663 | |
2664 | static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, |
2665 | struct bnxt_qplib_swqe *wqe) |
2666 | { |
2667 | wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; |
2668 | wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; |
2669 | |
2670 | if (wr->send_flags & IB_SEND_SIGNALED) |
2671 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2672 | if (wr->send_flags & IB_SEND_SOLICITED) |
2673 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2674 | |
2675 | return 0; |
2676 | } |
2677 | |
2678 | static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, |
2679 | struct bnxt_qplib_swqe *wqe) |
2680 | { |
2681 | struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr); |
2682 | struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl; |
2683 | int access = wr->access; |
2684 | |
2685 | wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; |
2686 | wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; |
2687 | wqe->frmr.page_list = mr->pages; |
2688 | wqe->frmr.page_list_len = mr->npages; |
2689 | wqe->frmr.levels = qplib_frpl->hwq.level; |
2690 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; |
2691 | |
2692 | if (wr->wr.send_flags & IB_SEND_SIGNALED) |
2693 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2694 | |
2695 | if (access & IB_ACCESS_LOCAL_WRITE) |
2696 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; |
2697 | if (access & IB_ACCESS_REMOTE_READ) |
2698 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; |
2699 | if (access & IB_ACCESS_REMOTE_WRITE) |
2700 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; |
2701 | if (access & IB_ACCESS_REMOTE_ATOMIC) |
2702 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; |
2703 | if (access & IB_ACCESS_MW_BIND) |
2704 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; |
2705 | |
2706 | wqe->frmr.l_key = wr->key; |
2707 | wqe->frmr.length = wr->mr->length; |
2708 | wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); |
2709 | wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K); |
2710 | wqe->frmr.va = wr->mr->iova; |
2711 | return 0; |
2712 | } |
2713 | |
2714 | static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, |
2715 | const struct ib_send_wr *wr, |
2716 | struct bnxt_qplib_swqe *wqe) |
2717 | { |
2718 | /* Copy the inline data to the data field */ |
2719 | u8 *in_data; |
2720 | u32 i, sge_len; |
2721 | void *sge_addr; |
2722 | |
2723 | in_data = wqe->inline_data; |
2724 | for (i = 0; i < wr->num_sge; i++) { |
2725 | sge_addr = (void *)(unsigned long) |
2726 | wr->sg_list[i].addr; |
2727 | sge_len = wr->sg_list[i].length; |
2728 | |
2729 | if ((sge_len + wqe->inline_len) > |
2730 | BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { |
2731 | ibdev_err(ibdev: &rdev->ibdev, |
2732 | format: "Inline data size requested > supported value"); |
2733 | return -EINVAL; |
2734 | } |
2735 | sge_len = wr->sg_list[i].length; |
2736 | |
2737 | memcpy(in_data, sge_addr, sge_len); |
2738 | in_data += wr->sg_list[i].length; |
2739 | wqe->inline_len += wr->sg_list[i].length; |
2740 | } |
2741 | return wqe->inline_len; |
2742 | } |
2743 | |
2744 | static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, |
2745 | const struct ib_send_wr *wr, |
2746 | struct bnxt_qplib_swqe *wqe) |
2747 | { |
2748 | int payload_sz = 0; |
2749 | |
2750 | if (wr->send_flags & IB_SEND_INLINE) |
2751 | payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe); |
2752 | else |
2753 | payload_sz = bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe->sg_list, |
2754 | num: wqe->num_sge); |
2755 | |
2756 | return payload_sz; |
2757 | } |
2758 | |
2759 | static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) |
2760 | { |
2761 | if ((qp->ib_qp.qp_type == IB_QPT_UD || |
2762 | qp->ib_qp.qp_type == IB_QPT_GSI || |
2763 | qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && |
2764 | qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { |
2765 | int qp_attr_mask; |
2766 | struct ib_qp_attr qp_attr; |
2767 | |
2768 | qp_attr_mask = IB_QP_STATE; |
2769 | qp_attr.qp_state = IB_QPS_RTS; |
2770 | bnxt_re_modify_qp(ib_qp: &qp->ib_qp, qp_attr: &qp_attr, qp_attr_mask, NULL); |
2771 | qp->qplib_qp.wqe_cnt = 0; |
2772 | } |
2773 | } |
2774 | |
2775 | static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, |
2776 | struct bnxt_re_qp *qp, |
2777 | const struct ib_send_wr *wr) |
2778 | { |
2779 | int rc = 0, payload_sz = 0; |
2780 | unsigned long flags; |
2781 | |
2782 | spin_lock_irqsave(&qp->sq_lock, flags); |
2783 | while (wr) { |
2784 | struct bnxt_qplib_swqe wqe = {}; |
2785 | |
2786 | /* Common */ |
2787 | wqe.num_sge = wr->num_sge; |
2788 | if (wr->num_sge > qp->qplib_qp.sq.max_sge) { |
2789 | ibdev_err(ibdev: &rdev->ibdev, |
2790 | format: "Limit exceeded for Send SGEs"); |
2791 | rc = -EINVAL; |
2792 | goto bad; |
2793 | } |
2794 | |
2795 | payload_sz = bnxt_re_copy_wr_payload(rdev: qp->rdev, wr, wqe: &wqe); |
2796 | if (payload_sz < 0) { |
2797 | rc = -EINVAL; |
2798 | goto bad; |
2799 | } |
2800 | wqe.wr_id = wr->wr_id; |
2801 | |
2802 | wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; |
2803 | |
2804 | rc = bnxt_re_build_send_wqe(qp, wr, wqe: &wqe); |
2805 | if (!rc) |
2806 | rc = bnxt_qplib_post_send(qp: &qp->qplib_qp, wqe: &wqe); |
2807 | bad: |
2808 | if (rc) { |
2809 | ibdev_err(ibdev: &rdev->ibdev, |
2810 | format: "Post send failed opcode = %#x rc = %d", |
2811 | wr->opcode, rc); |
2812 | break; |
2813 | } |
2814 | wr = wr->next; |
2815 | } |
2816 | bnxt_qplib_post_send_db(qp: &qp->qplib_qp); |
2817 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: qp->rdev->chip_ctx)) |
2818 | bnxt_ud_qp_hw_stall_workaround(qp); |
2819 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
2820 | return rc; |
2821 | } |
2822 | |
2823 | static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe) |
2824 | { |
2825 | /* Need unconditional fence for non-wire memory opcode |
2826 | * to work as expected. |
2827 | */ |
2828 | if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV || |
2829 | wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR || |
2830 | wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR || |
2831 | wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW) |
2832 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2833 | } |
2834 | |
2835 | int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, |
2836 | const struct ib_send_wr **bad_wr) |
2837 | { |
2838 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2839 | struct bnxt_qplib_swqe wqe; |
2840 | int rc = 0, payload_sz = 0; |
2841 | unsigned long flags; |
2842 | |
2843 | spin_lock_irqsave(&qp->sq_lock, flags); |
2844 | while (wr) { |
2845 | /* House keeping */ |
2846 | memset(&wqe, 0, sizeof(wqe)); |
2847 | |
2848 | /* Common */ |
2849 | wqe.num_sge = wr->num_sge; |
2850 | if (wr->num_sge > qp->qplib_qp.sq.max_sge) { |
2851 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2852 | format: "Limit exceeded for Send SGEs"); |
2853 | rc = -EINVAL; |
2854 | goto bad; |
2855 | } |
2856 | |
2857 | payload_sz = bnxt_re_copy_wr_payload(rdev: qp->rdev, wr, wqe: &wqe); |
2858 | if (payload_sz < 0) { |
2859 | rc = -EINVAL; |
2860 | goto bad; |
2861 | } |
2862 | wqe.wr_id = wr->wr_id; |
2863 | |
2864 | switch (wr->opcode) { |
2865 | case IB_WR_SEND: |
2866 | case IB_WR_SEND_WITH_IMM: |
2867 | if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) { |
2868 | rc = bnxt_re_build_qp1_send_v2(qp, wr, wqe: &wqe, |
2869 | payload_size: payload_sz); |
2870 | if (rc) |
2871 | goto bad; |
2872 | wqe.rawqp1.lflags |= |
2873 | SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; |
2874 | } |
2875 | switch (wr->send_flags) { |
2876 | case IB_SEND_IP_CSUM: |
2877 | wqe.rawqp1.lflags |= |
2878 | SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; |
2879 | break; |
2880 | default: |
2881 | break; |
2882 | } |
2883 | fallthrough; |
2884 | case IB_WR_SEND_WITH_INV: |
2885 | rc = bnxt_re_build_send_wqe(qp, wr, wqe: &wqe); |
2886 | break; |
2887 | case IB_WR_RDMA_WRITE: |
2888 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2889 | case IB_WR_RDMA_READ: |
2890 | rc = bnxt_re_build_rdma_wqe(wr, wqe: &wqe); |
2891 | break; |
2892 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2893 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2894 | rc = bnxt_re_build_atomic_wqe(wr, wqe: &wqe); |
2895 | break; |
2896 | case IB_WR_RDMA_READ_WITH_INV: |
2897 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2898 | format: "RDMA Read with Invalidate is not supported"); |
2899 | rc = -EINVAL; |
2900 | goto bad; |
2901 | case IB_WR_LOCAL_INV: |
2902 | rc = bnxt_re_build_inv_wqe(wr, wqe: &wqe); |
2903 | break; |
2904 | case IB_WR_REG_MR: |
2905 | rc = bnxt_re_build_reg_wqe(wr: reg_wr(wr), wqe: &wqe); |
2906 | break; |
2907 | default: |
2908 | /* Unsupported WRs */ |
2909 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2910 | format: "WR (%#x) is not supported", wr->opcode); |
2911 | rc = -EINVAL; |
2912 | goto bad; |
2913 | } |
2914 | if (!rc) { |
2915 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: qp->rdev->chip_ctx)) |
2916 | bnxt_re_legacy_set_uc_fence(wqe: &wqe); |
2917 | rc = bnxt_qplib_post_send(qp: &qp->qplib_qp, wqe: &wqe); |
2918 | } |
2919 | bad: |
2920 | if (rc) { |
2921 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2922 | format: "post_send failed op:%#x qps = %#x rc = %d\n", |
2923 | wr->opcode, qp->qplib_qp.state, rc); |
2924 | *bad_wr = wr; |
2925 | break; |
2926 | } |
2927 | wr = wr->next; |
2928 | } |
2929 | bnxt_qplib_post_send_db(qp: &qp->qplib_qp); |
2930 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: qp->rdev->chip_ctx)) |
2931 | bnxt_ud_qp_hw_stall_workaround(qp); |
2932 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
2933 | |
2934 | return rc; |
2935 | } |
2936 | |
2937 | static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, |
2938 | struct bnxt_re_qp *qp, |
2939 | const struct ib_recv_wr *wr) |
2940 | { |
2941 | struct bnxt_qplib_swqe wqe; |
2942 | int rc = 0; |
2943 | |
2944 | while (wr) { |
2945 | /* House keeping */ |
2946 | memset(&wqe, 0, sizeof(wqe)); |
2947 | |
2948 | /* Common */ |
2949 | wqe.num_sge = wr->num_sge; |
2950 | if (wr->num_sge > qp->qplib_qp.rq.max_sge) { |
2951 | ibdev_err(ibdev: &rdev->ibdev, |
2952 | format: "Limit exceeded for Receive SGEs"); |
2953 | rc = -EINVAL; |
2954 | break; |
2955 | } |
2956 | bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe.sg_list, num: wr->num_sge); |
2957 | wqe.wr_id = wr->wr_id; |
2958 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
2959 | |
2960 | rc = bnxt_qplib_post_recv(qp: &qp->qplib_qp, wqe: &wqe); |
2961 | if (rc) |
2962 | break; |
2963 | |
2964 | wr = wr->next; |
2965 | } |
2966 | if (!rc) |
2967 | bnxt_qplib_post_recv_db(qp: &qp->qplib_qp); |
2968 | return rc; |
2969 | } |
2970 | |
2971 | int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, |
2972 | const struct ib_recv_wr **bad_wr) |
2973 | { |
2974 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2975 | struct bnxt_qplib_swqe wqe; |
2976 | int rc = 0, payload_sz = 0; |
2977 | unsigned long flags; |
2978 | u32 count = 0; |
2979 | |
2980 | spin_lock_irqsave(&qp->rq_lock, flags); |
2981 | while (wr) { |
2982 | /* House keeping */ |
2983 | memset(&wqe, 0, sizeof(wqe)); |
2984 | |
2985 | /* Common */ |
2986 | wqe.num_sge = wr->num_sge; |
2987 | if (wr->num_sge > qp->qplib_qp.rq.max_sge) { |
2988 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2989 | format: "Limit exceeded for Receive SGEs"); |
2990 | rc = -EINVAL; |
2991 | *bad_wr = wr; |
2992 | break; |
2993 | } |
2994 | |
2995 | payload_sz = bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe.sg_list, |
2996 | num: wr->num_sge); |
2997 | wqe.wr_id = wr->wr_id; |
2998 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
2999 | |
3000 | if (ib_qp->qp_type == IB_QPT_GSI && |
3001 | qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI) |
3002 | rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe: &wqe, |
3003 | payload_size: payload_sz); |
3004 | if (!rc) |
3005 | rc = bnxt_qplib_post_recv(qp: &qp->qplib_qp, wqe: &wqe); |
3006 | if (rc) { |
3007 | *bad_wr = wr; |
3008 | break; |
3009 | } |
3010 | |
3011 | /* Ring DB if the RQEs posted reaches a threshold value */ |
3012 | if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { |
3013 | bnxt_qplib_post_recv_db(qp: &qp->qplib_qp); |
3014 | count = 0; |
3015 | } |
3016 | |
3017 | wr = wr->next; |
3018 | } |
3019 | |
3020 | if (count) |
3021 | bnxt_qplib_post_recv_db(qp: &qp->qplib_qp); |
3022 | |
3023 | spin_unlock_irqrestore(lock: &qp->rq_lock, flags); |
3024 | |
3025 | return rc; |
3026 | } |
3027 | |
3028 | static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev) |
3029 | { |
3030 | int min, indx; |
3031 | |
3032 | mutex_lock(&rdev->nqr->load_lock); |
3033 | for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) { |
3034 | if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load) |
3035 | min = indx; |
3036 | } |
3037 | rdev->nqr->nq[min].load++; |
3038 | mutex_unlock(lock: &rdev->nqr->load_lock); |
3039 | |
3040 | return &rdev->nqr->nq[min]; |
3041 | } |
3042 | |
3043 | static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq) |
3044 | { |
3045 | mutex_lock(&rdev->nqr->load_lock); |
3046 | nq->load--; |
3047 | mutex_unlock(lock: &rdev->nqr->load_lock); |
3048 | } |
3049 | |
3050 | /* Completion Queues */ |
3051 | int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
3052 | { |
3053 | struct bnxt_qplib_chip_ctx *cctx; |
3054 | struct bnxt_qplib_nq *nq; |
3055 | struct bnxt_re_dev *rdev; |
3056 | struct bnxt_re_cq *cq; |
3057 | |
3058 | cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
3059 | rdev = cq->rdev; |
3060 | nq = cq->qplib_cq.nq; |
3061 | cctx = rdev->chip_ctx; |
3062 | |
3063 | if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) { |
3064 | free_page((unsigned long)cq->uctx_cq_page); |
3065 | hash_del(node: &cq->hash_entry); |
3066 | } |
3067 | bnxt_qplib_destroy_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3068 | |
3069 | bnxt_re_put_nq(rdev, nq); |
3070 | ib_umem_release(umem: cq->umem); |
3071 | |
3072 | atomic_dec(v: &rdev->stats.res.cq_count); |
3073 | kfree(objp: cq->cql); |
3074 | return 0; |
3075 | } |
3076 | |
3077 | int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
3078 | struct uverbs_attr_bundle *attrs) |
3079 | { |
3080 | struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); |
3081 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); |
3082 | struct ib_udata *udata = &attrs->driver_udata; |
3083 | struct bnxt_re_ucontext *uctx = |
3084 | rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
3085 | struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; |
3086 | struct bnxt_qplib_chip_ctx *cctx; |
3087 | int cqe = attr->cqe; |
3088 | int rc, entries; |
3089 | u32 active_cqs; |
3090 | |
3091 | if (attr->flags) |
3092 | return -EOPNOTSUPP; |
3093 | |
3094 | /* Validate CQ fields */ |
3095 | if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { |
3096 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create CQ -max exceeded"); |
3097 | return -EINVAL; |
3098 | } |
3099 | |
3100 | cq->rdev = rdev; |
3101 | cctx = rdev->chip_ctx; |
3102 | cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); |
3103 | |
3104 | entries = bnxt_re_init_depth(ent: cqe + 1, uctx); |
3105 | if (entries > dev_attr->max_cq_wqes + 1) |
3106 | entries = dev_attr->max_cq_wqes + 1; |
3107 | |
3108 | cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; |
3109 | cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; |
3110 | if (udata) { |
3111 | struct bnxt_re_cq_req req; |
3112 | if (ib_copy_from_udata(dest: &req, udata, len: sizeof(req))) { |
3113 | rc = -EFAULT; |
3114 | goto fail; |
3115 | } |
3116 | |
3117 | cq->umem = ib_umem_get(device: &rdev->ibdev, addr: req.cq_va, |
3118 | size: entries * sizeof(struct cq_base), |
3119 | access: IB_ACCESS_LOCAL_WRITE); |
3120 | if (IS_ERR(ptr: cq->umem)) { |
3121 | rc = PTR_ERR(ptr: cq->umem); |
3122 | goto fail; |
3123 | } |
3124 | cq->qplib_cq.sg_info.umem = cq->umem; |
3125 | cq->qplib_cq.dpi = &uctx->dpi; |
3126 | } else { |
3127 | cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); |
3128 | cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), |
3129 | GFP_KERNEL); |
3130 | if (!cq->cql) { |
3131 | rc = -ENOMEM; |
3132 | goto fail; |
3133 | } |
3134 | |
3135 | cq->qplib_cq.dpi = &rdev->dpi_privileged; |
3136 | } |
3137 | cq->qplib_cq.max_wqe = entries; |
3138 | cq->qplib_cq.coalescing = &rdev->cq_coalescing; |
3139 | cq->qplib_cq.nq = bnxt_re_get_nq(rdev); |
3140 | cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id; |
3141 | |
3142 | rc = bnxt_qplib_create_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3143 | if (rc) { |
3144 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create HW CQ"); |
3145 | goto fail; |
3146 | } |
3147 | |
3148 | cq->ib_cq.cqe = entries; |
3149 | cq->cq_period = cq->qplib_cq.period; |
3150 | |
3151 | active_cqs = atomic_inc_return(v: &rdev->stats.res.cq_count); |
3152 | if (active_cqs > rdev->stats.res.cq_watermark) |
3153 | rdev->stats.res.cq_watermark = active_cqs; |
3154 | spin_lock_init(&cq->cq_lock); |
3155 | |
3156 | if (udata) { |
3157 | struct bnxt_re_cq_resp resp = {}; |
3158 | |
3159 | if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) { |
3160 | hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id); |
3161 | /* Allocate a page */ |
3162 | cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL); |
3163 | if (!cq->uctx_cq_page) { |
3164 | rc = -ENOMEM; |
3165 | goto c2fail; |
3166 | } |
3167 | resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT; |
3168 | } |
3169 | resp.cqid = cq->qplib_cq.id; |
3170 | resp.tail = cq->qplib_cq.hwq.cons; |
3171 | resp.phase = cq->qplib_cq.period; |
3172 | resp.rsvd = 0; |
3173 | rc = ib_copy_to_udata(udata, src: &resp, min(sizeof(resp), udata->outlen)); |
3174 | if (rc) { |
3175 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to copy CQ udata"); |
3176 | bnxt_qplib_destroy_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3177 | goto free_mem; |
3178 | } |
3179 | } |
3180 | |
3181 | return 0; |
3182 | |
3183 | free_mem: |
3184 | free_page((unsigned long)cq->uctx_cq_page); |
3185 | c2fail: |
3186 | ib_umem_release(umem: cq->umem); |
3187 | fail: |
3188 | kfree(objp: cq->cql); |
3189 | return rc; |
3190 | } |
3191 | |
3192 | static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) |
3193 | { |
3194 | struct bnxt_re_dev *rdev = cq->rdev; |
3195 | |
3196 | bnxt_qplib_resize_cq_complete(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3197 | |
3198 | cq->qplib_cq.max_wqe = cq->resize_cqe; |
3199 | if (cq->resize_umem) { |
3200 | ib_umem_release(umem: cq->umem); |
3201 | cq->umem = cq->resize_umem; |
3202 | cq->resize_umem = NULL; |
3203 | cq->resize_cqe = 0; |
3204 | } |
3205 | } |
3206 | |
3207 | int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) |
3208 | { |
3209 | struct bnxt_qplib_sg_info sg_info = {}; |
3210 | struct bnxt_qplib_dpi *orig_dpi = NULL; |
3211 | struct bnxt_qplib_dev_attr *dev_attr; |
3212 | struct bnxt_re_ucontext *uctx = NULL; |
3213 | struct bnxt_re_resize_cq_req req; |
3214 | struct bnxt_re_dev *rdev; |
3215 | struct bnxt_re_cq *cq; |
3216 | int rc, entries; |
3217 | |
3218 | cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); |
3219 | rdev = cq->rdev; |
3220 | dev_attr = rdev->dev_attr; |
3221 | if (!ibcq->uobject) { |
3222 | ibdev_err(ibdev: &rdev->ibdev, format: "Kernel CQ Resize not supported"); |
3223 | return -EOPNOTSUPP; |
3224 | } |
3225 | |
3226 | if (cq->resize_umem) { |
3227 | ibdev_err(ibdev: &rdev->ibdev, format: "Resize CQ %#x failed - Busy", |
3228 | cq->qplib_cq.id); |
3229 | return -EBUSY; |
3230 | } |
3231 | |
3232 | /* Check the requested cq depth out of supported depth */ |
3233 | if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { |
3234 | ibdev_err(ibdev: &rdev->ibdev, format: "Resize CQ %#x failed - out of range cqe %d", |
3235 | cq->qplib_cq.id, cqe); |
3236 | return -EINVAL; |
3237 | } |
3238 | |
3239 | uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
3240 | entries = bnxt_re_init_depth(ent: cqe + 1, uctx); |
3241 | if (entries > dev_attr->max_cq_wqes + 1) |
3242 | entries = dev_attr->max_cq_wqes + 1; |
3243 | |
3244 | /* uverbs consumer */ |
3245 | if (ib_copy_from_udata(dest: &req, udata, len: sizeof(req))) { |
3246 | rc = -EFAULT; |
3247 | goto fail; |
3248 | } |
3249 | |
3250 | cq->resize_umem = ib_umem_get(device: &rdev->ibdev, addr: req.cq_va, |
3251 | size: entries * sizeof(struct cq_base), |
3252 | access: IB_ACCESS_LOCAL_WRITE); |
3253 | if (IS_ERR(ptr: cq->resize_umem)) { |
3254 | rc = PTR_ERR(ptr: cq->resize_umem); |
3255 | cq->resize_umem = NULL; |
3256 | ibdev_err(ibdev: &rdev->ibdev, format: "%s: ib_umem_get failed! rc = %d\n", |
3257 | __func__, rc); |
3258 | goto fail; |
3259 | } |
3260 | cq->resize_cqe = entries; |
3261 | memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info)); |
3262 | orig_dpi = cq->qplib_cq.dpi; |
3263 | |
3264 | cq->qplib_cq.sg_info.umem = cq->resize_umem; |
3265 | cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; |
3266 | cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; |
3267 | cq->qplib_cq.dpi = &uctx->dpi; |
3268 | |
3269 | rc = bnxt_qplib_resize_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq, new_cqes: entries); |
3270 | if (rc) { |
3271 | ibdev_err(ibdev: &rdev->ibdev, format: "Resize HW CQ %#x failed!", |
3272 | cq->qplib_cq.id); |
3273 | goto fail; |
3274 | } |
3275 | |
3276 | cq->ib_cq.cqe = cq->resize_cqe; |
3277 | atomic_inc(v: &rdev->stats.res.resize_count); |
3278 | |
3279 | return 0; |
3280 | |
3281 | fail: |
3282 | if (cq->resize_umem) { |
3283 | ib_umem_release(umem: cq->resize_umem); |
3284 | cq->resize_umem = NULL; |
3285 | cq->resize_cqe = 0; |
3286 | memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info)); |
3287 | cq->qplib_cq.dpi = orig_dpi; |
3288 | } |
3289 | return rc; |
3290 | } |
3291 | |
3292 | static u8 __req_to_ib_wc_status(u8 qstatus) |
3293 | { |
3294 | switch (qstatus) { |
3295 | case CQ_REQ_STATUS_OK: |
3296 | return IB_WC_SUCCESS; |
3297 | case CQ_REQ_STATUS_BAD_RESPONSE_ERR: |
3298 | return IB_WC_BAD_RESP_ERR; |
3299 | case CQ_REQ_STATUS_LOCAL_LENGTH_ERR: |
3300 | return IB_WC_LOC_LEN_ERR; |
3301 | case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR: |
3302 | return IB_WC_LOC_QP_OP_ERR; |
3303 | case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR: |
3304 | return IB_WC_LOC_PROT_ERR; |
3305 | case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR: |
3306 | return IB_WC_GENERAL_ERR; |
3307 | case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR: |
3308 | return IB_WC_REM_INV_REQ_ERR; |
3309 | case CQ_REQ_STATUS_REMOTE_ACCESS_ERR: |
3310 | return IB_WC_REM_ACCESS_ERR; |
3311 | case CQ_REQ_STATUS_REMOTE_OPERATION_ERR: |
3312 | return IB_WC_REM_OP_ERR; |
3313 | case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR: |
3314 | return IB_WC_RNR_RETRY_EXC_ERR; |
3315 | case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR: |
3316 | return IB_WC_RETRY_EXC_ERR; |
3317 | case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR: |
3318 | return IB_WC_WR_FLUSH_ERR; |
3319 | default: |
3320 | return IB_WC_GENERAL_ERR; |
3321 | } |
3322 | return 0; |
3323 | } |
3324 | |
3325 | static u8 __rawqp1_to_ib_wc_status(u8 qstatus) |
3326 | { |
3327 | switch (qstatus) { |
3328 | case CQ_RES_RAWETH_QP1_STATUS_OK: |
3329 | return IB_WC_SUCCESS; |
3330 | case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR: |
3331 | return IB_WC_LOC_ACCESS_ERR; |
3332 | case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR: |
3333 | return IB_WC_LOC_LEN_ERR; |
3334 | case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR: |
3335 | return IB_WC_LOC_PROT_ERR; |
3336 | case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR: |
3337 | return IB_WC_LOC_QP_OP_ERR; |
3338 | case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR: |
3339 | return IB_WC_GENERAL_ERR; |
3340 | case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR: |
3341 | return IB_WC_WR_FLUSH_ERR; |
3342 | case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR: |
3343 | return IB_WC_WR_FLUSH_ERR; |
3344 | default: |
3345 | return IB_WC_GENERAL_ERR; |
3346 | } |
3347 | } |
3348 | |
3349 | static u8 __rc_to_ib_wc_status(u8 qstatus) |
3350 | { |
3351 | switch (qstatus) { |
3352 | case CQ_RES_RC_STATUS_OK: |
3353 | return IB_WC_SUCCESS; |
3354 | case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR: |
3355 | return IB_WC_LOC_ACCESS_ERR; |
3356 | case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR: |
3357 | return IB_WC_LOC_LEN_ERR; |
3358 | case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR: |
3359 | return IB_WC_LOC_PROT_ERR; |
3360 | case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR: |
3361 | return IB_WC_LOC_QP_OP_ERR; |
3362 | case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR: |
3363 | return IB_WC_GENERAL_ERR; |
3364 | case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR: |
3365 | return IB_WC_REM_INV_REQ_ERR; |
3366 | case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR: |
3367 | return IB_WC_WR_FLUSH_ERR; |
3368 | case CQ_RES_RC_STATUS_HW_FLUSH_ERR: |
3369 | return IB_WC_WR_FLUSH_ERR; |
3370 | default: |
3371 | return IB_WC_GENERAL_ERR; |
3372 | } |
3373 | } |
3374 | |
3375 | static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) |
3376 | { |
3377 | switch (cqe->type) { |
3378 | case BNXT_QPLIB_SWQE_TYPE_SEND: |
3379 | wc->opcode = IB_WC_SEND; |
3380 | break; |
3381 | case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: |
3382 | wc->opcode = IB_WC_SEND; |
3383 | wc->wc_flags |= IB_WC_WITH_IMM; |
3384 | break; |
3385 | case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: |
3386 | wc->opcode = IB_WC_SEND; |
3387 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
3388 | break; |
3389 | case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: |
3390 | wc->opcode = IB_WC_RDMA_WRITE; |
3391 | break; |
3392 | case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: |
3393 | wc->opcode = IB_WC_RDMA_WRITE; |
3394 | wc->wc_flags |= IB_WC_WITH_IMM; |
3395 | break; |
3396 | case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: |
3397 | wc->opcode = IB_WC_RDMA_READ; |
3398 | break; |
3399 | case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: |
3400 | wc->opcode = IB_WC_COMP_SWAP; |
3401 | break; |
3402 | case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: |
3403 | wc->opcode = IB_WC_FETCH_ADD; |
3404 | break; |
3405 | case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: |
3406 | wc->opcode = IB_WC_LOCAL_INV; |
3407 | break; |
3408 | case BNXT_QPLIB_SWQE_TYPE_REG_MR: |
3409 | wc->opcode = IB_WC_REG_MR; |
3410 | break; |
3411 | default: |
3412 | wc->opcode = IB_WC_SEND; |
3413 | break; |
3414 | } |
3415 | |
3416 | wc->status = __req_to_ib_wc_status(qstatus: cqe->status); |
3417 | } |
3418 | |
3419 | static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, |
3420 | u16 raweth_qp1_flags2) |
3421 | { |
3422 | bool is_ipv6 = false, is_ipv4 = false; |
3423 | |
3424 | /* raweth_qp1_flags Bit 9-6 indicates itype */ |
3425 | if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) |
3426 | != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) |
3427 | return -1; |
3428 | |
3429 | if (raweth_qp1_flags2 & |
3430 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC && |
3431 | raweth_qp1_flags2 & |
3432 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) { |
3433 | /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */ |
3434 | (raweth_qp1_flags2 & |
3435 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ? |
3436 | (is_ipv6 = true) : (is_ipv4 = true); |
3437 | return ((is_ipv6) ? |
3438 | BNXT_RE_ROCEV2_IPV6_PACKET : |
3439 | BNXT_RE_ROCEV2_IPV4_PACKET); |
3440 | } else { |
3441 | return BNXT_RE_ROCE_V1_PACKET; |
3442 | } |
3443 | } |
3444 | |
3445 | static int bnxt_re_to_ib_nw_type(int nw_type) |
3446 | { |
3447 | u8 nw_hdr_type = 0xFF; |
3448 | |
3449 | switch (nw_type) { |
3450 | case BNXT_RE_ROCE_V1_PACKET: |
3451 | nw_hdr_type = RDMA_NETWORK_ROCE_V1; |
3452 | break; |
3453 | case BNXT_RE_ROCEV2_IPV4_PACKET: |
3454 | nw_hdr_type = RDMA_NETWORK_IPV4; |
3455 | break; |
3456 | case BNXT_RE_ROCEV2_IPV6_PACKET: |
3457 | nw_hdr_type = RDMA_NETWORK_IPV6; |
3458 | break; |
3459 | } |
3460 | return nw_hdr_type; |
3461 | } |
3462 | |
3463 | static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, |
3464 | void *rq_hdr_buf) |
3465 | { |
3466 | u8 *tmp_buf = NULL; |
3467 | struct ethhdr *eth_hdr; |
3468 | u16 eth_type; |
3469 | bool rc = false; |
3470 | |
3471 | tmp_buf = (u8 *)rq_hdr_buf; |
3472 | /* |
3473 | * If dest mac is not same as I/F mac, this could be a |
3474 | * loopback address or multicast address, check whether |
3475 | * it is a loopback packet |
3476 | */ |
3477 | if (!ether_addr_equal(addr1: tmp_buf, addr2: rdev->netdev->dev_addr)) { |
3478 | tmp_buf += 4; |
3479 | /* Check the ether type */ |
3480 | eth_hdr = (struct ethhdr *)tmp_buf; |
3481 | eth_type = ntohs(eth_hdr->h_proto); |
3482 | switch (eth_type) { |
3483 | case ETH_P_IBOE: |
3484 | rc = true; |
3485 | break; |
3486 | case ETH_P_IP: |
3487 | case ETH_P_IPV6: { |
3488 | u32 len; |
3489 | struct udphdr *udp_hdr; |
3490 | |
3491 | len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) : |
3492 | sizeof(struct ipv6hdr)); |
3493 | tmp_buf += sizeof(struct ethhdr) + len; |
3494 | udp_hdr = (struct udphdr *)tmp_buf; |
3495 | if (ntohs(udp_hdr->dest) == |
3496 | ROCE_V2_UDP_DPORT) |
3497 | rc = true; |
3498 | break; |
3499 | } |
3500 | default: |
3501 | break; |
3502 | } |
3503 | } |
3504 | |
3505 | return rc; |
3506 | } |
3507 | |
3508 | static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, |
3509 | struct bnxt_qplib_cqe *cqe) |
3510 | { |
3511 | struct bnxt_re_dev *rdev = gsi_qp->rdev; |
3512 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
3513 | struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; |
3514 | dma_addr_t shrq_hdr_buf_map; |
3515 | struct ib_sge s_sge[2] = {}; |
3516 | struct ib_sge r_sge[2] = {}; |
3517 | struct bnxt_re_ah *gsi_sah; |
3518 | struct ib_recv_wr rwr = {}; |
3519 | dma_addr_t rq_hdr_buf_map; |
3520 | struct ib_ud_wr udwr = {}; |
3521 | struct ib_send_wr *swr; |
3522 | u32 skip_bytes = 0; |
3523 | int pkt_type = 0; |
3524 | void *rq_hdr_buf; |
3525 | u32 offset = 0; |
3526 | u32 tbl_idx; |
3527 | int rc; |
3528 | |
3529 | swr = &udwr.wr; |
3530 | tbl_idx = cqe->wr_id; |
3531 | |
3532 | rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + |
3533 | (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); |
3534 | rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(qp: &gsi_qp->qplib_qp, |
3535 | index: tbl_idx); |
3536 | |
3537 | /* Shadow QP header buffer */ |
3538 | shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(qp: &gsi_qp->qplib_qp, |
3539 | index: tbl_idx); |
3540 | sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
3541 | |
3542 | /* Store this cqe */ |
3543 | memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); |
3544 | sqp_entry->qp1_qp = gsi_qp; |
3545 | |
3546 | /* Find packet type from the cqe */ |
3547 | |
3548 | pkt_type = bnxt_re_check_packet_type(raweth_qp1_flags: cqe->raweth_qp1_flags, |
3549 | raweth_qp1_flags2: cqe->raweth_qp1_flags2); |
3550 | if (pkt_type < 0) { |
3551 | ibdev_err(ibdev: &rdev->ibdev, format: "Invalid packet\n"); |
3552 | return -EINVAL; |
3553 | } |
3554 | |
3555 | /* Adjust the offset for the user buffer and post in the rq */ |
3556 | |
3557 | if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET) |
3558 | offset = 20; |
3559 | |
3560 | /* |
3561 | * QP1 loopback packet has 4 bytes of internal header before |
3562 | * ether header. Skip these four bytes. |
3563 | */ |
3564 | if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) |
3565 | skip_bytes = 4; |
3566 | |
3567 | /* First send SGE . Skip the ether header*/ |
3568 | s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE |
3569 | + skip_bytes; |
3570 | s_sge[0].lkey = 0xFFFFFFFF; |
3571 | s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 : |
3572 | BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; |
3573 | |
3574 | /* Second Send SGE */ |
3575 | s_sge[1].addr = s_sge[0].addr + s_sge[0].length + |
3576 | BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE; |
3577 | if (pkt_type != BNXT_RE_ROCE_V1_PACKET) |
3578 | s_sge[1].addr += 8; |
3579 | s_sge[1].lkey = 0xFFFFFFFF; |
3580 | s_sge[1].length = 256; |
3581 | |
3582 | /* First recv SGE */ |
3583 | |
3584 | r_sge[0].addr = shrq_hdr_buf_map; |
3585 | r_sge[0].lkey = 0xFFFFFFFF; |
3586 | r_sge[0].length = 40; |
3587 | |
3588 | r_sge[1].addr = sqp_entry->sge.addr + offset; |
3589 | r_sge[1].lkey = sqp_entry->sge.lkey; |
3590 | r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset; |
3591 | |
3592 | /* Create receive work request */ |
3593 | rwr.num_sge = 2; |
3594 | rwr.sg_list = r_sge; |
3595 | rwr.wr_id = tbl_idx; |
3596 | rwr.next = NULL; |
3597 | |
3598 | rc = bnxt_re_post_recv_shadow_qp(rdev, qp: gsi_sqp, wr: &rwr); |
3599 | if (rc) { |
3600 | ibdev_err(ibdev: &rdev->ibdev, |
3601 | format: "Failed to post Rx buffers to shadow QP"); |
3602 | return -ENOMEM; |
3603 | } |
3604 | |
3605 | swr->num_sge = 2; |
3606 | swr->sg_list = s_sge; |
3607 | swr->wr_id = tbl_idx; |
3608 | swr->opcode = IB_WR_SEND; |
3609 | swr->next = NULL; |
3610 | gsi_sah = rdev->gsi_ctx.gsi_sah; |
3611 | udwr.ah = &gsi_sah->ib_ah; |
3612 | udwr.remote_qpn = gsi_sqp->qplib_qp.id; |
3613 | udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; |
3614 | |
3615 | /* post data received in the send queue */ |
3616 | return bnxt_re_post_send_shadow_qp(rdev, qp: gsi_sqp, wr: swr); |
3617 | } |
3618 | |
3619 | static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, |
3620 | struct bnxt_qplib_cqe *cqe) |
3621 | { |
3622 | wc->opcode = IB_WC_RECV; |
3623 | wc->status = __rawqp1_to_ib_wc_status(qstatus: cqe->status); |
3624 | wc->wc_flags |= IB_WC_GRH; |
3625 | } |
3626 | |
3627 | static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, |
3628 | u16 vlan_id) |
3629 | { |
3630 | /* |
3631 | * Check if the vlan is configured in the host. If not configured, it |
3632 | * can be a transparent VLAN. So dont report the vlan id. |
3633 | */ |
3634 | if (!__vlan_find_dev_deep_rcu(real_dev: rdev->netdev, |
3635 | htons(ETH_P_8021Q), vlan_id)) |
3636 | return false; |
3637 | return true; |
3638 | } |
3639 | |
3640 | static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, |
3641 | u16 *vid, u8 *sl) |
3642 | { |
3643 | bool ret = false; |
3644 | u32 metadata; |
3645 | u16 tpid; |
3646 | |
3647 | metadata = orig_cqe->raweth_qp1_metadata; |
3648 | if (orig_cqe->raweth_qp1_flags2 & |
3649 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) { |
3650 | tpid = ((metadata & |
3651 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >> |
3652 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); |
3653 | if (tpid == ETH_P_8021Q) { |
3654 | *vid = metadata & |
3655 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK; |
3656 | *sl = (metadata & |
3657 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >> |
3658 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT; |
3659 | ret = true; |
3660 | } |
3661 | } |
3662 | |
3663 | return ret; |
3664 | } |
3665 | |
3666 | static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, |
3667 | struct bnxt_qplib_cqe *cqe) |
3668 | { |
3669 | wc->opcode = IB_WC_RECV; |
3670 | wc->status = __rc_to_ib_wc_status(qstatus: cqe->status); |
3671 | |
3672 | if (cqe->flags & CQ_RES_RC_FLAGS_IMM) |
3673 | wc->wc_flags |= IB_WC_WITH_IMM; |
3674 | if (cqe->flags & CQ_RES_RC_FLAGS_INV) |
3675 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
3676 | if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == |
3677 | (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) |
3678 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
3679 | } |
3680 | |
3681 | static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, |
3682 | struct ib_wc *wc, |
3683 | struct bnxt_qplib_cqe *cqe) |
3684 | { |
3685 | struct bnxt_re_dev *rdev = gsi_sqp->rdev; |
3686 | struct bnxt_re_qp *gsi_qp = NULL; |
3687 | struct bnxt_qplib_cqe *orig_cqe = NULL; |
3688 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
3689 | int nw_type; |
3690 | u32 tbl_idx; |
3691 | u16 vlan_id; |
3692 | u8 sl; |
3693 | |
3694 | tbl_idx = cqe->wr_id; |
3695 | |
3696 | sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
3697 | gsi_qp = sqp_entry->qp1_qp; |
3698 | orig_cqe = &sqp_entry->cqe; |
3699 | |
3700 | wc->wr_id = sqp_entry->wrid; |
3701 | wc->byte_len = orig_cqe->length; |
3702 | wc->qp = &gsi_qp->ib_qp; |
3703 | |
3704 | wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata); |
3705 | wc->src_qp = orig_cqe->src_qp; |
3706 | memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); |
3707 | if (bnxt_re_is_vlan_pkt(orig_cqe, vid: &vlan_id, sl: &sl)) { |
3708 | if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { |
3709 | wc->vlan_id = vlan_id; |
3710 | wc->sl = sl; |
3711 | wc->wc_flags |= IB_WC_WITH_VLAN; |
3712 | } |
3713 | } |
3714 | wc->port_num = 1; |
3715 | wc->vendor_err = orig_cqe->status; |
3716 | |
3717 | wc->opcode = IB_WC_RECV; |
3718 | wc->status = __rawqp1_to_ib_wc_status(qstatus: orig_cqe->status); |
3719 | wc->wc_flags |= IB_WC_GRH; |
3720 | |
3721 | nw_type = bnxt_re_check_packet_type(raweth_qp1_flags: orig_cqe->raweth_qp1_flags, |
3722 | raweth_qp1_flags2: orig_cqe->raweth_qp1_flags2); |
3723 | if (nw_type >= 0) { |
3724 | wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); |
3725 | wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; |
3726 | } |
3727 | } |
3728 | |
3729 | static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, |
3730 | struct ib_wc *wc, |
3731 | struct bnxt_qplib_cqe *cqe) |
3732 | { |
3733 | struct bnxt_re_dev *rdev; |
3734 | u16 vlan_id = 0; |
3735 | u8 nw_type; |
3736 | |
3737 | rdev = qp->rdev; |
3738 | wc->opcode = IB_WC_RECV; |
3739 | wc->status = __rc_to_ib_wc_status(qstatus: cqe->status); |
3740 | |
3741 | if (cqe->flags & CQ_RES_UD_FLAGS_IMM) |
3742 | wc->wc_flags |= IB_WC_WITH_IMM; |
3743 | /* report only on GSI QP for Thor */ |
3744 | if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) { |
3745 | wc->wc_flags |= IB_WC_GRH; |
3746 | memcpy(wc->smac, cqe->smac, ETH_ALEN); |
3747 | wc->wc_flags |= IB_WC_WITH_SMAC; |
3748 | if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { |
3749 | vlan_id = (cqe->cfa_meta & 0xFFF); |
3750 | } |
3751 | /* Mark only if vlan_id is non zero */ |
3752 | if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { |
3753 | wc->vlan_id = vlan_id; |
3754 | wc->wc_flags |= IB_WC_WITH_VLAN; |
3755 | } |
3756 | nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> |
3757 | CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; |
3758 | wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); |
3759 | wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; |
3760 | } |
3761 | |
3762 | } |
3763 | |
3764 | static int send_phantom_wqe(struct bnxt_re_qp *qp) |
3765 | { |
3766 | struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; |
3767 | unsigned long flags; |
3768 | int rc; |
3769 | |
3770 | spin_lock_irqsave(&qp->sq_lock, flags); |
3771 | |
3772 | rc = bnxt_re_bind_fence_mw(qplib_qp: lib_qp); |
3773 | if (!rc) { |
3774 | lib_qp->sq.phantom_wqe_cnt++; |
3775 | ibdev_dbg(&qp->rdev->ibdev, |
3776 | "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", |
3777 | lib_qp->id, lib_qp->sq.hwq.prod, |
3778 | HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), |
3779 | lib_qp->sq.phantom_wqe_cnt); |
3780 | } |
3781 | |
3782 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
3783 | return rc; |
3784 | } |
3785 | |
3786 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) |
3787 | { |
3788 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
3789 | struct bnxt_re_qp *qp, *sh_qp; |
3790 | struct bnxt_qplib_cqe *cqe; |
3791 | int i, ncqe, budget; |
3792 | struct bnxt_qplib_q *sq; |
3793 | struct bnxt_qplib_qp *lib_qp; |
3794 | u32 tbl_idx; |
3795 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
3796 | unsigned long flags; |
3797 | |
3798 | /* User CQ; the only processing we do is to |
3799 | * complete any pending CQ resize operation. |
3800 | */ |
3801 | if (cq->umem) { |
3802 | if (cq->resize_umem) |
3803 | bnxt_re_resize_cq_complete(cq); |
3804 | return 0; |
3805 | } |
3806 | |
3807 | spin_lock_irqsave(&cq->cq_lock, flags); |
3808 | budget = min_t(u32, num_entries, cq->max_cql); |
3809 | num_entries = budget; |
3810 | if (!cq->cql) { |
3811 | ibdev_err(ibdev: &cq->rdev->ibdev, format: "POLL CQ : no CQL to use"); |
3812 | goto exit; |
3813 | } |
3814 | cqe = &cq->cql[0]; |
3815 | while (budget) { |
3816 | lib_qp = NULL; |
3817 | ncqe = bnxt_qplib_poll_cq(cq: &cq->qplib_cq, cqe, num: budget, qp: &lib_qp); |
3818 | if (lib_qp) { |
3819 | sq = &lib_qp->sq; |
3820 | if (sq->send_phantom) { |
3821 | qp = container_of(lib_qp, |
3822 | struct bnxt_re_qp, qplib_qp); |
3823 | if (send_phantom_wqe(qp) == -ENOMEM) |
3824 | ibdev_err(ibdev: &cq->rdev->ibdev, |
3825 | format: "Phantom failed! Scheduled to send again\n"); |
3826 | else |
3827 | sq->send_phantom = false; |
3828 | } |
3829 | } |
3830 | if (ncqe < budget) |
3831 | ncqe += bnxt_qplib_process_flush_list(cq: &cq->qplib_cq, |
3832 | cqe: cqe + ncqe, |
3833 | num_cqes: budget - ncqe); |
3834 | |
3835 | if (!ncqe) |
3836 | break; |
3837 | |
3838 | for (i = 0; i < ncqe; i++, cqe++) { |
3839 | /* Transcribe each qplib_wqe back to ib_wc */ |
3840 | memset(wc, 0, sizeof(*wc)); |
3841 | |
3842 | wc->wr_id = cqe->wr_id; |
3843 | wc->byte_len = cqe->length; |
3844 | qp = container_of |
3845 | ((struct bnxt_qplib_qp *) |
3846 | (unsigned long)(cqe->qp_handle), |
3847 | struct bnxt_re_qp, qplib_qp); |
3848 | wc->qp = &qp->ib_qp; |
3849 | if (cqe->flags & CQ_RES_RC_FLAGS_IMM) |
3850 | wc->ex.imm_data = cpu_to_be32(cqe->immdata); |
3851 | else |
3852 | wc->ex.invalidate_rkey = cqe->invrkey; |
3853 | wc->src_qp = cqe->src_qp; |
3854 | memcpy(wc->smac, cqe->smac, ETH_ALEN); |
3855 | wc->port_num = 1; |
3856 | wc->vendor_err = cqe->status; |
3857 | |
3858 | switch (cqe->opcode) { |
3859 | case CQ_BASE_CQE_TYPE_REQ: |
3860 | sh_qp = qp->rdev->gsi_ctx.gsi_sqp; |
3861 | if (sh_qp && |
3862 | qp->qplib_qp.id == sh_qp->qplib_qp.id) { |
3863 | /* Handle this completion with |
3864 | * the stored completion |
3865 | */ |
3866 | memset(wc, 0, sizeof(*wc)); |
3867 | continue; |
3868 | } |
3869 | bnxt_re_process_req_wc(wc, cqe); |
3870 | break; |
3871 | case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: |
3872 | if (!cqe->status) { |
3873 | int rc = 0; |
3874 | |
3875 | rc = bnxt_re_process_raw_qp_pkt_rx |
3876 | (gsi_qp: qp, cqe); |
3877 | if (!rc) { |
3878 | memset(wc, 0, sizeof(*wc)); |
3879 | continue; |
3880 | } |
3881 | cqe->status = -1; |
3882 | } |
3883 | /* Errors need not be looped back. |
3884 | * But change the wr_id to the one |
3885 | * stored in the table |
3886 | */ |
3887 | tbl_idx = cqe->wr_id; |
3888 | sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
3889 | wc->wr_id = sqp_entry->wrid; |
3890 | bnxt_re_process_res_rawqp1_wc(wc, cqe); |
3891 | break; |
3892 | case CQ_BASE_CQE_TYPE_RES_RC: |
3893 | bnxt_re_process_res_rc_wc(wc, cqe); |
3894 | break; |
3895 | case CQ_BASE_CQE_TYPE_RES_UD: |
3896 | sh_qp = qp->rdev->gsi_ctx.gsi_sqp; |
3897 | if (sh_qp && |
3898 | qp->qplib_qp.id == sh_qp->qplib_qp.id) { |
3899 | /* Handle this completion with |
3900 | * the stored completion |
3901 | */ |
3902 | if (cqe->status) { |
3903 | continue; |
3904 | } else { |
3905 | bnxt_re_process_res_shadow_qp_wc |
3906 | (gsi_sqp: qp, wc, cqe); |
3907 | break; |
3908 | } |
3909 | } |
3910 | bnxt_re_process_res_ud_wc(qp, wc, cqe); |
3911 | break; |
3912 | default: |
3913 | ibdev_err(ibdev: &cq->rdev->ibdev, |
3914 | format: "POLL CQ : type 0x%x not handled", |
3915 | cqe->opcode); |
3916 | continue; |
3917 | } |
3918 | wc++; |
3919 | budget--; |
3920 | } |
3921 | } |
3922 | exit: |
3923 | spin_unlock_irqrestore(lock: &cq->cq_lock, flags); |
3924 | return num_entries - budget; |
3925 | } |
3926 | |
3927 | int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, |
3928 | enum ib_cq_notify_flags ib_cqn_flags) |
3929 | { |
3930 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
3931 | int type = 0, rc = 0; |
3932 | unsigned long flags; |
3933 | |
3934 | spin_lock_irqsave(&cq->cq_lock, flags); |
3935 | /* Trigger on the very next completion */ |
3936 | if (ib_cqn_flags & IB_CQ_NEXT_COMP) |
3937 | type = DBC_DBC_TYPE_CQ_ARMALL; |
3938 | /* Trigger on the next solicited completion */ |
3939 | else if (ib_cqn_flags & IB_CQ_SOLICITED) |
3940 | type = DBC_DBC_TYPE_CQ_ARMSE; |
3941 | |
3942 | /* Poll to see if there are missed events */ |
3943 | if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && |
3944 | !(bnxt_qplib_is_cq_empty(cq: &cq->qplib_cq))) { |
3945 | rc = 1; |
3946 | goto exit; |
3947 | } |
3948 | bnxt_qplib_req_notify_cq(cq: &cq->qplib_cq, arm_type: type); |
3949 | |
3950 | exit: |
3951 | spin_unlock_irqrestore(lock: &cq->cq_lock, flags); |
3952 | return rc; |
3953 | } |
3954 | |
3955 | /* Memory Regions */ |
3956 | struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) |
3957 | { |
3958 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
3959 | struct bnxt_re_dev *rdev = pd->rdev; |
3960 | struct bnxt_re_mr *mr; |
3961 | u32 active_mrs; |
3962 | int rc; |
3963 | |
3964 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
3965 | if (!mr) |
3966 | return ERR_PTR(error: -ENOMEM); |
3967 | |
3968 | mr->rdev = rdev; |
3969 | mr->qplib_mr.pd = &pd->qplib_pd; |
3970 | mr->qplib_mr.access_flags = __from_ib_access_flags(iflags: mr_access_flags); |
3971 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; |
3972 | |
3973 | if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING) |
3974 | bnxt_re_check_and_set_relaxed_ordering(rdev, qplib_mr: &mr->qplib_mr); |
3975 | |
3976 | /* Allocate and register 0 as the address */ |
3977 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
3978 | if (rc) |
3979 | goto fail; |
3980 | |
3981 | mr->qplib_mr.hwq.level = PBL_LVL_MAX; |
3982 | mr->qplib_mr.total_size = -1; /* Infinte length */ |
3983 | rc = bnxt_qplib_reg_mr(res: &rdev->qplib_res, mr: &mr->qplib_mr, NULL, num_pbls: 0, |
3984 | PAGE_SIZE); |
3985 | if (rc) |
3986 | goto fail_mr; |
3987 | |
3988 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
3989 | if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | |
3990 | IB_ACCESS_REMOTE_ATOMIC)) |
3991 | mr->ib_mr.rkey = mr->ib_mr.lkey; |
3992 | active_mrs = atomic_inc_return(v: &rdev->stats.res.mr_count); |
3993 | if (active_mrs > rdev->stats.res.mr_watermark) |
3994 | rdev->stats.res.mr_watermark = active_mrs; |
3995 | |
3996 | return &mr->ib_mr; |
3997 | |
3998 | fail_mr: |
3999 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
4000 | fail: |
4001 | kfree(objp: mr); |
4002 | return ERR_PTR(error: rc); |
4003 | } |
4004 | |
4005 | int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
4006 | { |
4007 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
4008 | struct bnxt_re_dev *rdev = mr->rdev; |
4009 | int rc; |
4010 | |
4011 | rc = bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
4012 | if (rc) { |
4013 | ibdev_err(ibdev: &rdev->ibdev, format: "Dereg MR failed: %#x\n", rc); |
4014 | return rc; |
4015 | } |
4016 | |
4017 | if (mr->pages) { |
4018 | rc = bnxt_qplib_free_fast_reg_page_list(res: &rdev->qplib_res, |
4019 | frpl: &mr->qplib_frpl); |
4020 | kfree(objp: mr->pages); |
4021 | mr->npages = 0; |
4022 | mr->pages = NULL; |
4023 | } |
4024 | ib_umem_release(umem: mr->ib_umem); |
4025 | |
4026 | kfree(objp: mr); |
4027 | atomic_dec(v: &rdev->stats.res.mr_count); |
4028 | return rc; |
4029 | } |
4030 | |
4031 | static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr) |
4032 | { |
4033 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
4034 | |
4035 | if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) |
4036 | return -ENOMEM; |
4037 | |
4038 | mr->pages[mr->npages++] = addr; |
4039 | return 0; |
4040 | } |
4041 | |
4042 | int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, |
4043 | unsigned int *sg_offset) |
4044 | { |
4045 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
4046 | |
4047 | mr->npages = 0; |
4048 | return ib_sg_to_pages(mr: ib_mr, sgl: sg, sg_nents, sg_offset, set_page: bnxt_re_set_page); |
4049 | } |
4050 | |
4051 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, |
4052 | u32 max_num_sg) |
4053 | { |
4054 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4055 | struct bnxt_re_dev *rdev = pd->rdev; |
4056 | struct bnxt_re_mr *mr = NULL; |
4057 | u32 active_mrs; |
4058 | int rc; |
4059 | |
4060 | if (type != IB_MR_TYPE_MEM_REG) { |
4061 | ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type); |
4062 | return ERR_PTR(error: -EINVAL); |
4063 | } |
4064 | if (max_num_sg > MAX_PBL_LVL_1_PGS) |
4065 | return ERR_PTR(error: -EINVAL); |
4066 | |
4067 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
4068 | if (!mr) |
4069 | return ERR_PTR(error: -ENOMEM); |
4070 | |
4071 | mr->rdev = rdev; |
4072 | mr->qplib_mr.pd = &pd->qplib_pd; |
4073 | mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR; |
4074 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; |
4075 | |
4076 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
4077 | if (rc) |
4078 | goto bail; |
4079 | |
4080 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
4081 | mr->ib_mr.rkey = mr->ib_mr.lkey; |
4082 | |
4083 | mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); |
4084 | if (!mr->pages) { |
4085 | rc = -ENOMEM; |
4086 | goto fail; |
4087 | } |
4088 | rc = bnxt_qplib_alloc_fast_reg_page_list(res: &rdev->qplib_res, |
4089 | frpl: &mr->qplib_frpl, max: max_num_sg); |
4090 | if (rc) { |
4091 | ibdev_err(ibdev: &rdev->ibdev, |
4092 | format: "Failed to allocate HW FR page list"); |
4093 | goto fail_mr; |
4094 | } |
4095 | |
4096 | active_mrs = atomic_inc_return(v: &rdev->stats.res.mr_count); |
4097 | if (active_mrs > rdev->stats.res.mr_watermark) |
4098 | rdev->stats.res.mr_watermark = active_mrs; |
4099 | return &mr->ib_mr; |
4100 | |
4101 | fail_mr: |
4102 | kfree(objp: mr->pages); |
4103 | fail: |
4104 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
4105 | bail: |
4106 | kfree(objp: mr); |
4107 | return ERR_PTR(error: rc); |
4108 | } |
4109 | |
4110 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
4111 | struct ib_udata *udata) |
4112 | { |
4113 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4114 | struct bnxt_re_dev *rdev = pd->rdev; |
4115 | struct bnxt_re_mw *mw; |
4116 | u32 active_mws; |
4117 | int rc; |
4118 | |
4119 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); |
4120 | if (!mw) |
4121 | return ERR_PTR(error: -ENOMEM); |
4122 | mw->rdev = rdev; |
4123 | mw->qplib_mw.pd = &pd->qplib_pd; |
4124 | |
4125 | mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? |
4126 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : |
4127 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); |
4128 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mw->qplib_mw); |
4129 | if (rc) { |
4130 | ibdev_err(ibdev: &rdev->ibdev, format: "Allocate MW failed!"); |
4131 | goto fail; |
4132 | } |
4133 | mw->ib_mw.rkey = mw->qplib_mw.rkey; |
4134 | |
4135 | active_mws = atomic_inc_return(v: &rdev->stats.res.mw_count); |
4136 | if (active_mws > rdev->stats.res.mw_watermark) |
4137 | rdev->stats.res.mw_watermark = active_mws; |
4138 | return &mw->ib_mw; |
4139 | |
4140 | fail: |
4141 | kfree(objp: mw); |
4142 | return ERR_PTR(error: rc); |
4143 | } |
4144 | |
4145 | int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) |
4146 | { |
4147 | struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); |
4148 | struct bnxt_re_dev *rdev = mw->rdev; |
4149 | int rc; |
4150 | |
4151 | rc = bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mw->qplib_mw); |
4152 | if (rc) { |
4153 | ibdev_err(ibdev: &rdev->ibdev, format: "Free MW failed: %#x\n", rc); |
4154 | return rc; |
4155 | } |
4156 | |
4157 | kfree(objp: mw); |
4158 | atomic_dec(v: &rdev->stats.res.mw_count); |
4159 | return rc; |
4160 | } |
4161 | |
4162 | static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr, |
4163 | int mr_access_flags, struct ib_umem *umem) |
4164 | { |
4165 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4166 | struct bnxt_re_dev *rdev = pd->rdev; |
4167 | unsigned long page_size; |
4168 | struct bnxt_re_mr *mr; |
4169 | int umem_pgs, rc; |
4170 | u32 active_mrs; |
4171 | |
4172 | if (length > BNXT_RE_MAX_MR_SIZE) { |
4173 | ibdev_err(ibdev: &rdev->ibdev, format: "MR Size: %lld > Max supported:%lld\n", |
4174 | length, BNXT_RE_MAX_MR_SIZE); |
4175 | return ERR_PTR(error: -ENOMEM); |
4176 | } |
4177 | |
4178 | page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt: virt_addr); |
4179 | if (!page_size) { |
4180 | ibdev_err(ibdev: &rdev->ibdev, format: "umem page size unsupported!"); |
4181 | return ERR_PTR(error: -EINVAL); |
4182 | } |
4183 | |
4184 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
4185 | if (!mr) |
4186 | return ERR_PTR(error: -ENOMEM); |
4187 | |
4188 | mr->rdev = rdev; |
4189 | mr->qplib_mr.pd = &pd->qplib_pd; |
4190 | mr->qplib_mr.access_flags = __from_ib_access_flags(iflags: mr_access_flags); |
4191 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; |
4192 | |
4193 | if (!_is_alloc_mr_unified(dev_cap_flags: rdev->dev_attr->dev_cap_flags)) { |
4194 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
4195 | if (rc) { |
4196 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to allocate MR rc = %d", rc); |
4197 | rc = -EIO; |
4198 | goto free_mr; |
4199 | } |
4200 | /* The fixed portion of the rkey is the same as the lkey */ |
4201 | mr->ib_mr.rkey = mr->qplib_mr.rkey; |
4202 | } else { |
4203 | mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR; |
4204 | } |
4205 | mr->ib_umem = umem; |
4206 | mr->qplib_mr.va = virt_addr; |
4207 | mr->qplib_mr.total_size = length; |
4208 | |
4209 | if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING) |
4210 | bnxt_re_check_and_set_relaxed_ordering(rdev, qplib_mr: &mr->qplib_mr); |
4211 | |
4212 | umem_pgs = ib_umem_num_dma_blocks(umem, pgsz: page_size); |
4213 | rc = bnxt_qplib_reg_mr(res: &rdev->qplib_res, mr: &mr->qplib_mr, umem, |
4214 | num_pbls: umem_pgs, buf_pg_size: page_size); |
4215 | if (rc) { |
4216 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to register user MR - rc = %d\n", rc); |
4217 | rc = -EIO; |
4218 | goto free_mrw; |
4219 | } |
4220 | |
4221 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
4222 | mr->ib_mr.rkey = mr->qplib_mr.lkey; |
4223 | active_mrs = atomic_inc_return(v: &rdev->stats.res.mr_count); |
4224 | if (active_mrs > rdev->stats.res.mr_watermark) |
4225 | rdev->stats.res.mr_watermark = active_mrs; |
4226 | |
4227 | return &mr->ib_mr; |
4228 | |
4229 | free_mrw: |
4230 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
4231 | free_mr: |
4232 | kfree(objp: mr); |
4233 | return ERR_PTR(error: rc); |
4234 | } |
4235 | |
4236 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, |
4237 | u64 virt_addr, int mr_access_flags, |
4238 | struct ib_udata *udata) |
4239 | { |
4240 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4241 | struct bnxt_re_dev *rdev = pd->rdev; |
4242 | struct ib_umem *umem; |
4243 | struct ib_mr *ib_mr; |
4244 | |
4245 | umem = ib_umem_get(device: &rdev->ibdev, addr: start, size: length, access: mr_access_flags); |
4246 | if (IS_ERR(ptr: umem)) |
4247 | return ERR_CAST(ptr: umem); |
4248 | |
4249 | ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); |
4250 | if (IS_ERR(ptr: ib_mr)) |
4251 | ib_umem_release(umem); |
4252 | return ib_mr; |
4253 | } |
4254 | |
4255 | struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, |
4256 | u64 length, u64 virt_addr, int fd, |
4257 | int mr_access_flags, |
4258 | struct uverbs_attr_bundle *attrs) |
4259 | { |
4260 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4261 | struct bnxt_re_dev *rdev = pd->rdev; |
4262 | struct ib_umem_dmabuf *umem_dmabuf; |
4263 | struct ib_umem *umem; |
4264 | struct ib_mr *ib_mr; |
4265 | |
4266 | umem_dmabuf = ib_umem_dmabuf_get_pinned(device: &rdev->ibdev, offset: start, size: length, |
4267 | fd, access: mr_access_flags); |
4268 | if (IS_ERR(ptr: umem_dmabuf)) |
4269 | return ERR_CAST(ptr: umem_dmabuf); |
4270 | |
4271 | umem = &umem_dmabuf->umem; |
4272 | |
4273 | ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); |
4274 | if (IS_ERR(ptr: ib_mr)) |
4275 | ib_umem_release(umem); |
4276 | return ib_mr; |
4277 | } |
4278 | |
4279 | int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) |
4280 | { |
4281 | struct ib_device *ibdev = ctx->device; |
4282 | struct bnxt_re_ucontext *uctx = |
4283 | container_of(ctx, struct bnxt_re_ucontext, ib_uctx); |
4284 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
4285 | struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; |
4286 | struct bnxt_re_user_mmap_entry *entry; |
4287 | struct bnxt_re_uctx_resp resp = {}; |
4288 | struct bnxt_re_uctx_req ureq = {}; |
4289 | u32 chip_met_rev_num = 0; |
4290 | int rc; |
4291 | |
4292 | ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver); |
4293 | |
4294 | if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { |
4295 | ibdev_dbg(ibdev, " is different from the device %d ", |
4296 | BNXT_RE_ABI_VERSION); |
4297 | return -EPERM; |
4298 | } |
4299 | |
4300 | uctx->rdev = rdev; |
4301 | |
4302 | uctx->shpg = (void *)__get_free_page(GFP_KERNEL); |
4303 | if (!uctx->shpg) { |
4304 | rc = -ENOMEM; |
4305 | goto fail; |
4306 | } |
4307 | spin_lock_init(&uctx->sh_lock); |
4308 | |
4309 | resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; |
4310 | chip_met_rev_num = rdev->chip_ctx->chip_num; |
4311 | chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) << |
4312 | BNXT_RE_CHIP_ID0_CHIP_REV_SFT; |
4313 | chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) << |
4314 | BNXT_RE_CHIP_ID0_CHIP_MET_SFT; |
4315 | resp.chip_id0 = chip_met_rev_num; |
4316 | /*Temp, Use xa_alloc instead */ |
4317 | resp.dev_id = rdev->en_dev->pdev->devfn; |
4318 | resp.max_qp = rdev->qplib_ctx.qpc_count; |
4319 | resp.pg_size = PAGE_SIZE; |
4320 | resp.cqe_sz = sizeof(struct cq_base); |
4321 | resp.max_cqd = dev_attr->max_cq_wqes; |
4322 | |
4323 | if (rdev->chip_ctx->modes.db_push) |
4324 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED; |
4325 | |
4326 | entry = bnxt_re_mmap_entry_insert(uctx, mem_offset: 0, mmap_flag: BNXT_RE_MMAP_SH_PAGE, NULL); |
4327 | if (!entry) { |
4328 | rc = -ENOMEM; |
4329 | goto cfail; |
4330 | } |
4331 | uctx->shpage_mmap = &entry->rdma_entry; |
4332 | if (rdev->pacing.dbr_pacing) |
4333 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED; |
4334 | |
4335 | if (_is_host_msn_table(dev_cap_ext_flags2: rdev->qplib_res.dattr->dev_cap_flags2)) |
4336 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED; |
4337 | |
4338 | if (udata->inlen >= sizeof(ureq)) { |
4339 | rc = ib_copy_from_udata(dest: &ureq, udata, min(udata->inlen, sizeof(ureq))); |
4340 | if (rc) |
4341 | goto cfail; |
4342 | if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) { |
4343 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED; |
4344 | uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED; |
4345 | } |
4346 | if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) { |
4347 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; |
4348 | resp.mode = rdev->chip_ctx->modes.wqe_mode; |
4349 | if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE) |
4350 | uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED; |
4351 | } |
4352 | } |
4353 | |
4354 | rc = ib_copy_to_udata(udata, src: &resp, min(udata->outlen, sizeof(resp))); |
4355 | if (rc) { |
4356 | ibdev_err(ibdev, format: "Failed to copy user context"); |
4357 | rc = -EFAULT; |
4358 | goto cfail; |
4359 | } |
4360 | |
4361 | return 0; |
4362 | cfail: |
4363 | free_page((unsigned long)uctx->shpg); |
4364 | uctx->shpg = NULL; |
4365 | fail: |
4366 | return rc; |
4367 | } |
4368 | |
4369 | void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) |
4370 | { |
4371 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, |
4372 | struct bnxt_re_ucontext, |
4373 | ib_uctx); |
4374 | |
4375 | struct bnxt_re_dev *rdev = uctx->rdev; |
4376 | |
4377 | rdma_user_mmap_entry_remove(entry: uctx->shpage_mmap); |
4378 | uctx->shpage_mmap = NULL; |
4379 | if (uctx->shpg) |
4380 | free_page((unsigned long)uctx->shpg); |
4381 | |
4382 | if (uctx->dpi.dbr) { |
4383 | /* Free DPI only if this is the first PD allocated by the |
4384 | * application and mark the context dpi as NULL |
4385 | */ |
4386 | bnxt_qplib_dealloc_dpi(res: &rdev->qplib_res, dpi: &uctx->dpi); |
4387 | uctx->dpi.dbr = NULL; |
4388 | } |
4389 | } |
4390 | |
4391 | static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id) |
4392 | { |
4393 | struct bnxt_re_cq *cq = NULL, *tmp_cq; |
4394 | |
4395 | hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) { |
4396 | if (tmp_cq->qplib_cq.id == cq_id) { |
4397 | cq = tmp_cq; |
4398 | break; |
4399 | } |
4400 | } |
4401 | return cq; |
4402 | } |
4403 | |
4404 | static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id) |
4405 | { |
4406 | struct bnxt_re_srq *srq = NULL, *tmp_srq; |
4407 | |
4408 | hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) { |
4409 | if (tmp_srq->qplib_srq.id == srq_id) { |
4410 | srq = tmp_srq; |
4411 | break; |
4412 | } |
4413 | } |
4414 | return srq; |
4415 | } |
4416 | |
4417 | /* Helper function to mmap the virtual memory from user app */ |
4418 | int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) |
4419 | { |
4420 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, |
4421 | struct bnxt_re_ucontext, |
4422 | ib_uctx); |
4423 | struct bnxt_re_user_mmap_entry *bnxt_entry; |
4424 | struct rdma_user_mmap_entry *rdma_entry; |
4425 | int ret = 0; |
4426 | u64 pfn; |
4427 | |
4428 | rdma_entry = rdma_user_mmap_entry_get(ucontext: &uctx->ib_uctx, vma); |
4429 | if (!rdma_entry) |
4430 | return -EINVAL; |
4431 | |
4432 | bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, |
4433 | rdma_entry); |
4434 | |
4435 | switch (bnxt_entry->mmap_flag) { |
4436 | case BNXT_RE_MMAP_WC_DB: |
4437 | pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; |
4438 | ret = rdma_user_mmap_io(ucontext: ib_uctx, vma, pfn, PAGE_SIZE, |
4439 | pgprot_writecombine(prot: vma->vm_page_prot), |
4440 | entry: rdma_entry); |
4441 | break; |
4442 | case BNXT_RE_MMAP_UC_DB: |
4443 | pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; |
4444 | ret = rdma_user_mmap_io(ucontext: ib_uctx, vma, pfn, PAGE_SIZE, |
4445 | pgprot_noncached(vma->vm_page_prot), |
4446 | entry: rdma_entry); |
4447 | break; |
4448 | case BNXT_RE_MMAP_SH_PAGE: |
4449 | ret = vm_insert_page(vma, addr: vma->vm_start, virt_to_page(uctx->shpg)); |
4450 | break; |
4451 | case BNXT_RE_MMAP_DBR_BAR: |
4452 | pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; |
4453 | ret = rdma_user_mmap_io(ucontext: ib_uctx, vma, pfn, PAGE_SIZE, |
4454 | pgprot_noncached(vma->vm_page_prot), |
4455 | entry: rdma_entry); |
4456 | break; |
4457 | case BNXT_RE_MMAP_DBR_PAGE: |
4458 | case BNXT_RE_MMAP_TOGGLE_PAGE: |
4459 | /* Driver doesn't expect write access for user space */ |
4460 | if (vma->vm_flags & VM_WRITE) |
4461 | ret = -EFAULT; |
4462 | else |
4463 | ret = vm_insert_page(vma, addr: vma->vm_start, |
4464 | virt_to_page((void *)bnxt_entry->mem_offset)); |
4465 | break; |
4466 | default: |
4467 | ret = -EINVAL; |
4468 | break; |
4469 | } |
4470 | |
4471 | rdma_user_mmap_entry_put(entry: rdma_entry); |
4472 | return ret; |
4473 | } |
4474 | |
4475 | void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry) |
4476 | { |
4477 | struct bnxt_re_user_mmap_entry *bnxt_entry; |
4478 | |
4479 | bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, |
4480 | rdma_entry); |
4481 | |
4482 | kfree(objp: bnxt_entry); |
4483 | } |
4484 | |
4485 | int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, |
4486 | u32 port_num, const struct ib_wc *in_wc, |
4487 | const struct ib_grh *in_grh, |
4488 | const struct ib_mad *in_mad, struct ib_mad *out_mad, |
4489 | size_t *out_mad_size, u16 *out_mad_pkey_index) |
4490 | { |
4491 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
4492 | struct ib_class_port_info cpi = {}; |
4493 | int ret = IB_MAD_RESULT_SUCCESS; |
4494 | int rc = 0; |
4495 | |
4496 | if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) |
4497 | return ret; |
4498 | |
4499 | switch (in_mad->mad_hdr.attr_id) { |
4500 | case IB_PMA_CLASS_PORT_INFO: |
4501 | cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; |
4502 | memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); |
4503 | break; |
4504 | case IB_PMA_PORT_COUNTERS_EXT: |
4505 | rc = bnxt_re_assign_pma_port_ext_counters(rdev, out_mad); |
4506 | break; |
4507 | case IB_PMA_PORT_COUNTERS: |
4508 | rc = bnxt_re_assign_pma_port_counters(rdev, out_mad); |
4509 | break; |
4510 | default: |
4511 | rc = -EINVAL; |
4512 | break; |
4513 | } |
4514 | if (rc) |
4515 | return IB_MAD_RESULT_FAILURE; |
4516 | ret |= IB_MAD_RESULT_REPLY; |
4517 | return ret; |
4518 | } |
4519 | |
4520 | static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs) |
4521 | { |
4522 | struct bnxt_re_ucontext *uctx; |
4523 | |
4524 | uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); |
4525 | bnxt_re_pacing_alert(rdev: uctx->rdev); |
4526 | return 0; |
4527 | } |
4528 | |
4529 | static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs) |
4530 | { |
4531 | struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_HANDLE); |
4532 | enum bnxt_re_alloc_page_type alloc_type; |
4533 | struct bnxt_re_user_mmap_entry *entry; |
4534 | enum bnxt_re_mmap_flag mmap_flag; |
4535 | struct bnxt_qplib_chip_ctx *cctx; |
4536 | struct bnxt_re_ucontext *uctx; |
4537 | struct bnxt_re_dev *rdev; |
4538 | u64 mmap_offset; |
4539 | u32 length; |
4540 | u32 dpi; |
4541 | u64 addr; |
4542 | int err; |
4543 | |
4544 | uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); |
4545 | if (IS_ERR(ptr: uctx)) |
4546 | return PTR_ERR(ptr: uctx); |
4547 | |
4548 | err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE); |
4549 | if (err) |
4550 | return err; |
4551 | |
4552 | rdev = uctx->rdev; |
4553 | cctx = rdev->chip_ctx; |
4554 | |
4555 | switch (alloc_type) { |
4556 | case BNXT_RE_ALLOC_WC_PAGE: |
4557 | if (cctx->modes.db_push) { |
4558 | if (bnxt_qplib_alloc_dpi(res: &rdev->qplib_res, dpi: &uctx->wcdpi, |
4559 | app: uctx, type: BNXT_QPLIB_DPI_TYPE_WC)) |
4560 | return -ENOMEM; |
4561 | length = PAGE_SIZE; |
4562 | dpi = uctx->wcdpi.dpi; |
4563 | addr = (u64)uctx->wcdpi.umdbr; |
4564 | mmap_flag = BNXT_RE_MMAP_WC_DB; |
4565 | } else { |
4566 | return -EINVAL; |
4567 | } |
4568 | |
4569 | break; |
4570 | case BNXT_RE_ALLOC_DBR_BAR_PAGE: |
4571 | length = PAGE_SIZE; |
4572 | addr = (u64)rdev->pacing.dbr_bar_addr; |
4573 | mmap_flag = BNXT_RE_MMAP_DBR_BAR; |
4574 | break; |
4575 | |
4576 | case BNXT_RE_ALLOC_DBR_PAGE: |
4577 | length = PAGE_SIZE; |
4578 | addr = (u64)rdev->pacing.dbr_page; |
4579 | mmap_flag = BNXT_RE_MMAP_DBR_PAGE; |
4580 | break; |
4581 | |
4582 | default: |
4583 | return -EOPNOTSUPP; |
4584 | } |
4585 | |
4586 | entry = bnxt_re_mmap_entry_insert(uctx, mem_offset: addr, mmap_flag, offset: &mmap_offset); |
4587 | if (!entry) |
4588 | return -ENOMEM; |
4589 | |
4590 | uobj->object = entry; |
4591 | uverbs_finalize_uobj_create(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_HANDLE); |
4592 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, |
4593 | from: &mmap_offset, size: sizeof(mmap_offset)); |
4594 | if (err) |
4595 | return err; |
4596 | |
4597 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, |
4598 | from: &length, size: sizeof(length)); |
4599 | if (err) |
4600 | return err; |
4601 | |
4602 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_DPI, |
4603 | from: &dpi, size: sizeof(length)); |
4604 | if (err) |
4605 | return err; |
4606 | |
4607 | return 0; |
4608 | } |
4609 | |
4610 | static int alloc_page_obj_cleanup(struct ib_uobject *uobject, |
4611 | enum rdma_remove_reason why, |
4612 | struct uverbs_attr_bundle *attrs) |
4613 | { |
4614 | struct bnxt_re_user_mmap_entry *entry = uobject->object; |
4615 | struct bnxt_re_ucontext *uctx = entry->uctx; |
4616 | |
4617 | switch (entry->mmap_flag) { |
4618 | case BNXT_RE_MMAP_WC_DB: |
4619 | if (uctx && uctx->wcdpi.dbr) { |
4620 | struct bnxt_re_dev *rdev = uctx->rdev; |
4621 | |
4622 | bnxt_qplib_dealloc_dpi(res: &rdev->qplib_res, dpi: &uctx->wcdpi); |
4623 | uctx->wcdpi.dbr = NULL; |
4624 | } |
4625 | break; |
4626 | case BNXT_RE_MMAP_DBR_BAR: |
4627 | case BNXT_RE_MMAP_DBR_PAGE: |
4628 | break; |
4629 | default: |
4630 | goto exit; |
4631 | } |
4632 | rdma_user_mmap_entry_remove(entry: &entry->rdma_entry); |
4633 | exit: |
4634 | return 0; |
4635 | } |
4636 | |
4637 | DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE, |
4638 | UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE, |
4639 | BNXT_RE_OBJECT_ALLOC_PAGE, |
4640 | UVERBS_ACCESS_NEW, |
4641 | UA_MANDATORY), |
4642 | UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE, |
4643 | enum bnxt_re_alloc_page_type, |
4644 | UA_MANDATORY), |
4645 | UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, |
4646 | UVERBS_ATTR_TYPE(u64), |
4647 | UA_MANDATORY), |
4648 | UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, |
4649 | UVERBS_ATTR_TYPE(u32), |
4650 | UA_MANDATORY), |
4651 | UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI, |
4652 | UVERBS_ATTR_TYPE(u32), |
4653 | UA_MANDATORY)); |
4654 | |
4655 | DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE, |
4656 | UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE, |
4657 | BNXT_RE_OBJECT_ALLOC_PAGE, |
4658 | UVERBS_ACCESS_DESTROY, |
4659 | UA_MANDATORY)); |
4660 | |
4661 | DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE, |
4662 | UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup), |
4663 | &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE), |
4664 | &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE)); |
4665 | |
4666 | DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV); |
4667 | |
4668 | DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV, |
4669 | &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV)); |
4670 | |
4671 | /* Toggle MEM */ |
4672 | static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs) |
4673 | { |
4674 | struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_HANDLE); |
4675 | enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE; |
4676 | enum bnxt_re_get_toggle_mem_type res_type; |
4677 | struct bnxt_re_user_mmap_entry *entry; |
4678 | struct bnxt_re_ucontext *uctx; |
4679 | struct ib_ucontext *ib_uctx; |
4680 | struct bnxt_re_dev *rdev; |
4681 | struct bnxt_re_srq *srq; |
4682 | u32 length = PAGE_SIZE; |
4683 | struct bnxt_re_cq *cq; |
4684 | u64 mem_offset; |
4685 | u32 offset = 0; |
4686 | u64 addr = 0; |
4687 | u32 res_id; |
4688 | int err; |
4689 | |
4690 | ib_uctx = ib_uverbs_get_ucontext(attrs); |
4691 | if (IS_ERR(ptr: ib_uctx)) |
4692 | return PTR_ERR(ptr: ib_uctx); |
4693 | |
4694 | err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE); |
4695 | if (err) |
4696 | return err; |
4697 | |
4698 | uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); |
4699 | rdev = uctx->rdev; |
4700 | err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID); |
4701 | if (err) |
4702 | return err; |
4703 | |
4704 | switch (res_type) { |
4705 | case BNXT_RE_CQ_TOGGLE_MEM: |
4706 | cq = bnxt_re_search_for_cq(rdev, cq_id: res_id); |
4707 | if (!cq) |
4708 | return -EINVAL; |
4709 | |
4710 | addr = (u64)cq->uctx_cq_page; |
4711 | break; |
4712 | case BNXT_RE_SRQ_TOGGLE_MEM: |
4713 | srq = bnxt_re_search_for_srq(rdev, srq_id: res_id); |
4714 | if (!srq) |
4715 | return -EINVAL; |
4716 | |
4717 | addr = (u64)srq->uctx_srq_page; |
4718 | break; |
4719 | |
4720 | default: |
4721 | return -EOPNOTSUPP; |
4722 | } |
4723 | |
4724 | entry = bnxt_re_mmap_entry_insert(uctx, mem_offset: addr, mmap_flag, offset: &mem_offset); |
4725 | if (!entry) |
4726 | return -ENOMEM; |
4727 | |
4728 | uobj->object = entry; |
4729 | uverbs_finalize_uobj_create(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_HANDLE); |
4730 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_MMAP_PAGE, |
4731 | from: &mem_offset, size: sizeof(mem_offset)); |
4732 | if (err) |
4733 | return err; |
4734 | |
4735 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_MMAP_LENGTH, |
4736 | from: &length, size: sizeof(length)); |
4737 | if (err) |
4738 | return err; |
4739 | |
4740 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_MMAP_OFFSET, |
4741 | from: &offset, size: sizeof(length)); |
4742 | if (err) |
4743 | return err; |
4744 | |
4745 | return 0; |
4746 | } |
4747 | |
4748 | static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject, |
4749 | enum rdma_remove_reason why, |
4750 | struct uverbs_attr_bundle *attrs) |
4751 | { |
4752 | struct bnxt_re_user_mmap_entry *entry = uobject->object; |
4753 | |
4754 | rdma_user_mmap_entry_remove(entry: &entry->rdma_entry); |
4755 | return 0; |
4756 | } |
4757 | |
4758 | DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM, |
4759 | UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE, |
4760 | BNXT_RE_OBJECT_GET_TOGGLE_MEM, |
4761 | UVERBS_ACCESS_NEW, |
4762 | UA_MANDATORY), |
4763 | UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE, |
4764 | enum bnxt_re_get_toggle_mem_type, |
4765 | UA_MANDATORY), |
4766 | UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID, |
4767 | UVERBS_ATTR_TYPE(u32), |
4768 | UA_MANDATORY), |
4769 | UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE, |
4770 | UVERBS_ATTR_TYPE(u64), |
4771 | UA_MANDATORY), |
4772 | UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET, |
4773 | UVERBS_ATTR_TYPE(u32), |
4774 | UA_MANDATORY), |
4775 | UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH, |
4776 | UVERBS_ATTR_TYPE(u32), |
4777 | UA_MANDATORY)); |
4778 | |
4779 | DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM, |
4780 | UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE, |
4781 | BNXT_RE_OBJECT_GET_TOGGLE_MEM, |
4782 | UVERBS_ACCESS_DESTROY, |
4783 | UA_MANDATORY)); |
4784 | |
4785 | DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM, |
4786 | UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup), |
4787 | &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM), |
4788 | &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM)); |
4789 | |
4790 | const struct uapi_definition bnxt_re_uapi_defs[] = { |
4791 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE), |
4792 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV), |
4793 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM), |
4794 | {} |
4795 | }; |
4796 |
Definitions
- __from_ib_access_flags
- __to_ib_access_flags
- __qp_access_flags_from_ib
- __qp_access_flags_to_ib
- bnxt_re_check_and_set_relaxed_ordering
- bnxt_re_build_sgl
- bnxt_re_query_device
- bnxt_re_modify_device
- bnxt_re_query_port
- bnxt_re_get_port_immutable
- bnxt_re_query_fw_str
- bnxt_re_query_pkey
- bnxt_re_query_gid
- bnxt_re_del_gid
- bnxt_re_add_gid
- bnxt_re_get_link_layer
- bnxt_re_create_fence_wqe
- bnxt_re_bind_fence_mw
- bnxt_re_destroy_fence_mr
- bnxt_re_create_fence_mr
- bnxt_re_mmap_entry_insert
- bnxt_re_dealloc_pd
- bnxt_re_alloc_pd
- bnxt_re_destroy_ah
- bnxt_re_stack_to_dev_nw_type
- bnxt_re_create_ah
- bnxt_re_query_ah
- bnxt_re_lock_cqs
- bnxt_re_unlock_cqs
- bnxt_re_destroy_gsi_sqp
- bnxt_re_destroy_qp
- __from_ib_qp_type
- bnxt_re_setup_rwqe_size
- bnxt_re_get_wqe_size
- bnxt_re_setup_swqe_size
- bnxt_re_init_user_qp
- bnxt_re_create_shadow_qp_ah
- bnxt_re_create_shadow_qp
- bnxt_re_init_rq_attr
- bnxt_re_adjust_gsi_rq_attr
- bnxt_re_init_sq_attr
- bnxt_re_adjust_gsi_sq_attr
- bnxt_re_init_qp_type
- bnxt_re_init_qp_attr
- bnxt_re_create_shadow_gsi
- bnxt_re_create_gsi_qp
- bnxt_re_test_qp_limits
- bnxt_re_create_qp
- __from_ib_qp_state
- __to_ib_qp_state
- __from_ib_mtu
- __to_ib_mtu
- bnxt_re_destroy_srq
- bnxt_re_init_user_srq
- bnxt_re_create_srq
- bnxt_re_modify_srq
- bnxt_re_query_srq
- bnxt_re_post_srq_recv
- bnxt_re_modify_shadow_qp
- bnxt_re_modify_qp
- bnxt_re_query_qp
- bnxt_re_build_qp1_send_v2
- bnxt_re_build_qp1_shadow_qp_recv
- is_ud_qp
- bnxt_re_build_send_wqe
- bnxt_re_build_rdma_wqe
- bnxt_re_build_atomic_wqe
- bnxt_re_build_inv_wqe
- bnxt_re_build_reg_wqe
- bnxt_re_copy_inline_data
- bnxt_re_copy_wr_payload
- bnxt_ud_qp_hw_stall_workaround
- bnxt_re_post_send_shadow_qp
- bnxt_re_legacy_set_uc_fence
- bnxt_re_post_send
- bnxt_re_post_recv_shadow_qp
- bnxt_re_post_recv
- bnxt_re_get_nq
- bnxt_re_put_nq
- bnxt_re_destroy_cq
- bnxt_re_create_cq
- bnxt_re_resize_cq_complete
- bnxt_re_resize_cq
- __req_to_ib_wc_status
- __rawqp1_to_ib_wc_status
- __rc_to_ib_wc_status
- bnxt_re_process_req_wc
- bnxt_re_check_packet_type
- bnxt_re_to_ib_nw_type
- bnxt_re_is_loopback_packet
- bnxt_re_process_raw_qp_pkt_rx
- bnxt_re_process_res_rawqp1_wc
- bnxt_re_check_if_vlan_valid
- bnxt_re_is_vlan_pkt
- bnxt_re_process_res_rc_wc
- bnxt_re_process_res_shadow_qp_wc
- bnxt_re_process_res_ud_wc
- send_phantom_wqe
- bnxt_re_poll_cq
- bnxt_re_req_notify_cq
- bnxt_re_get_dma_mr
- bnxt_re_dereg_mr
- bnxt_re_set_page
- bnxt_re_map_mr_sg
- bnxt_re_alloc_mr
- bnxt_re_alloc_mw
- bnxt_re_dealloc_mw
- __bnxt_re_user_reg_mr
- bnxt_re_reg_user_mr
- bnxt_re_reg_user_mr_dmabuf
- bnxt_re_alloc_ucontext
- bnxt_re_dealloc_ucontext
- bnxt_re_search_for_cq
- bnxt_re_search_for_srq
- bnxt_re_mmap
- bnxt_re_mmap_free
- bnxt_re_process_mad
- alloc_page_obj_cleanup
- get_toggle_mem_obj_cleanup
Improve your Profiling and Debugging skills
Find out more