1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/if_ether.h> |
8 | #include <linux/if_vlan.h> |
9 | #include <linux/ip.h> |
10 | #include <linux/ipv6.h> |
11 | #include <linux/spinlock.h> |
12 | #include <linux/tcp.h> |
13 | #include "qed_cxt.h" |
14 | #include "qed_hw.h" |
15 | #include "qed_ll2.h" |
16 | #include "qed_rdma.h" |
17 | #include "qed_reg_addr.h" |
18 | #include "qed_sp.h" |
19 | #include "qed_ooo.h" |
20 | |
21 | #define QED_IWARP_ORD_DEFAULT 32 |
22 | #define QED_IWARP_IRD_DEFAULT 32 |
23 | #define QED_IWARP_MAX_FW_MSS 4120 |
24 | |
25 | #define QED_EP_SIG 0xecabcdef |
26 | |
27 | struct mpa_v2_hdr { |
28 | __be16 ird; |
29 | __be16 ord; |
30 | }; |
31 | |
32 | #define MPA_V2_PEER2PEER_MODEL 0x8000 |
33 | #define MPA_V2_SEND_RTR 0x4000 /* on ird */ |
34 | #define MPA_V2_READ_RTR 0x4000 /* on ord */ |
35 | #define MPA_V2_WRITE_RTR 0x8000 |
36 | #define MPA_V2_IRD_ORD_MASK 0x3FFF |
37 | |
38 | #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) |
39 | |
40 | #define QED_IWARP_INVALID_TCP_CID 0xffffffff |
41 | |
42 | #define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024) |
43 | #define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024) |
44 | #define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024) |
45 | #define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024) |
46 | |
47 | #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) |
48 | #define (12) |
49 | #define QED_IWARP_MAX_FIN_RT_DEFAULT (2) |
50 | |
51 | #define QED_IWARP_TS_EN BIT(0) |
52 | #define QED_IWARP_DA_EN BIT(1) |
53 | #define QED_IWARP_PARAM_CRC_NEEDED (1) |
54 | #define QED_IWARP_PARAM_P2P (1) |
55 | |
56 | #define QED_IWARP_DEF_MAX_RT_TIME (0) |
57 | #define QED_IWARP_DEF_CWND_FACTOR (4) |
58 | #define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) |
59 | #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ |
60 | #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ |
61 | |
62 | static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, |
63 | __le16 echo, union event_ring_data *data, |
64 | u8 fw_return_code); |
65 | |
66 | /* Override devinfo with iWARP specific values */ |
67 | void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) |
68 | { |
69 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; |
70 | |
71 | dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; |
72 | dev->max_qp = min_t(u32, |
73 | IWARP_MAX_QPS, |
74 | p_hwfn->p_rdma_info->num_qps) - |
75 | QED_IWARP_PREALLOC_CNT; |
76 | |
77 | dev->max_cq = dev->max_qp; |
78 | |
79 | dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; |
80 | dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; |
81 | } |
82 | |
83 | void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
84 | { |
85 | p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; |
86 | qed_wr(p_hwfn, p_ptt, hw_addr: p_hwfn->rdma_prs_search_reg, val: 1); |
87 | p_hwfn->b_rdma_enabled_in_prs = true; |
88 | } |
89 | |
90 | /* We have two cid maps, one for tcp which should be used only from passive |
91 | * syn processing and replacing a pre-allocated ep in the list. The second |
92 | * for active tcp and for QPs. |
93 | */ |
94 | static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) |
95 | { |
96 | cid -= qed_cxt_get_proto_cid_start(p_hwfn, type: p_hwfn->p_rdma_info->proto); |
97 | |
98 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
99 | |
100 | if (cid < QED_IWARP_PREALLOC_CNT) |
101 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->tcp_cid_map, |
102 | id_num: cid); |
103 | else |
104 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->cid_map, id_num: cid); |
105 | |
106 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
107 | } |
108 | |
109 | void |
110 | qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, |
111 | struct iwarp_init_func_ramrod_data *p_ramrod) |
112 | { |
113 | p_ramrod->iwarp.ll2_ooo_q_index = |
114 | RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + |
115 | p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; |
116 | |
117 | p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT); |
118 | p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT); |
119 | p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; |
120 | |
121 | return; |
122 | } |
123 | |
124 | static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) |
125 | { |
126 | int rc; |
127 | |
128 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
129 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->cid_map, id_num: cid); |
130 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
131 | if (rc) { |
132 | DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n" ); |
133 | return rc; |
134 | } |
135 | *cid += qed_cxt_get_proto_cid_start(p_hwfn, type: p_hwfn->p_rdma_info->proto); |
136 | |
137 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type: QED_ELEM_CXT, iid: *cid); |
138 | if (rc) |
139 | qed_iwarp_cid_cleaned(p_hwfn, cid: *cid); |
140 | |
141 | return rc; |
142 | } |
143 | |
144 | static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) |
145 | { |
146 | cid -= qed_cxt_get_proto_cid_start(p_hwfn, type: p_hwfn->p_rdma_info->proto); |
147 | |
148 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
149 | qed_bmap_set_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->tcp_cid_map, id_num: cid); |
150 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
151 | } |
152 | |
153 | /* This function allocates a cid for passive tcp (called from syn receive) |
154 | * the reason it's separate from the regular cid allocation is because it |
155 | * is assured that these cids already have ilt allocated. They are preallocated |
156 | * to ensure that we won't need to allocate memory during syn processing |
157 | */ |
158 | static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) |
159 | { |
160 | int rc; |
161 | |
162 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
163 | |
164 | rc = qed_rdma_bmap_alloc_id(p_hwfn, |
165 | bmap: &p_hwfn->p_rdma_info->tcp_cid_map, id_num: cid); |
166 | |
167 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
168 | |
169 | if (rc) { |
170 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
171 | "can't allocate iwarp tcp cid max-count=%d\n" , |
172 | p_hwfn->p_rdma_info->tcp_cid_map.max_count); |
173 | |
174 | *cid = QED_IWARP_INVALID_TCP_CID; |
175 | return rc; |
176 | } |
177 | |
178 | *cid += qed_cxt_get_proto_cid_start(p_hwfn, |
179 | type: p_hwfn->p_rdma_info->proto); |
180 | return 0; |
181 | } |
182 | |
183 | int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, |
184 | struct qed_rdma_qp *qp, |
185 | struct qed_rdma_create_qp_out_params *out_params) |
186 | { |
187 | struct iwarp_create_qp_ramrod_data *p_ramrod; |
188 | struct qed_sp_init_data init_data; |
189 | struct qed_spq_entry *p_ent; |
190 | u16 physical_queue; |
191 | u32 cid; |
192 | int rc; |
193 | |
194 | qp->shared_queue = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
195 | IWARP_SHARED_QUEUE_PAGE_SIZE, |
196 | dma_handle: &qp->shared_queue_phys_addr, |
197 | GFP_KERNEL); |
198 | if (!qp->shared_queue) |
199 | return -ENOMEM; |
200 | |
201 | out_params->sq_pbl_virt = (u8 *)qp->shared_queue + |
202 | IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; |
203 | out_params->sq_pbl_phys = qp->shared_queue_phys_addr + |
204 | IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; |
205 | out_params->rq_pbl_virt = (u8 *)qp->shared_queue + |
206 | IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; |
207 | out_params->rq_pbl_phys = qp->shared_queue_phys_addr + |
208 | IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; |
209 | |
210 | rc = qed_iwarp_alloc_cid(p_hwfn, cid: &cid); |
211 | if (rc) |
212 | goto err1; |
213 | |
214 | qp->icid = (u16)cid; |
215 | |
216 | memset(&init_data, 0, sizeof(init_data)); |
217 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
218 | init_data.cid = qp->icid; |
219 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
220 | |
221 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
222 | cmd: IWARP_RAMROD_CMD_ID_CREATE_QP, |
223 | protocol: PROTOCOLID_IWARP, p_data: &init_data); |
224 | if (rc) |
225 | goto err2; |
226 | |
227 | p_ramrod = &p_ent->ramrod.iwarp_create_qp; |
228 | |
229 | SET_FIELD(p_ramrod->flags, |
230 | IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, |
231 | qp->fmr_and_reserved_lkey); |
232 | |
233 | SET_FIELD(p_ramrod->flags, |
234 | IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); |
235 | |
236 | SET_FIELD(p_ramrod->flags, |
237 | IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, |
238 | qp->incoming_rdma_read_en); |
239 | |
240 | SET_FIELD(p_ramrod->flags, |
241 | IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, |
242 | qp->incoming_rdma_write_en); |
243 | |
244 | SET_FIELD(p_ramrod->flags, |
245 | IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, |
246 | qp->incoming_atomic_en); |
247 | |
248 | SET_FIELD(p_ramrod->flags, |
249 | IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); |
250 | |
251 | p_ramrod->pd = cpu_to_le16(qp->pd); |
252 | p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); |
253 | p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); |
254 | |
255 | p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); |
256 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); |
257 | p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; |
258 | p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; |
259 | |
260 | p_ramrod->cq_cid_for_sq = |
261 | cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); |
262 | p_ramrod->cq_cid_for_rq = |
263 | cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); |
264 | |
265 | p_ramrod->dpi = cpu_to_le16(qp->dpi); |
266 | |
267 | physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); |
268 | p_ramrod->physical_q0 = cpu_to_le16(physical_queue); |
269 | physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); |
270 | p_ramrod->physical_q1 = cpu_to_le16(physical_queue); |
271 | |
272 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
273 | if (rc) |
274 | goto err2; |
275 | |
276 | return rc; |
277 | |
278 | err2: |
279 | qed_iwarp_cid_cleaned(p_hwfn, cid); |
280 | err1: |
281 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
282 | IWARP_SHARED_QUEUE_PAGE_SIZE, |
283 | cpu_addr: qp->shared_queue, dma_handle: qp->shared_queue_phys_addr); |
284 | |
285 | return rc; |
286 | } |
287 | |
288 | static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) |
289 | { |
290 | struct iwarp_modify_qp_ramrod_data *p_ramrod; |
291 | struct qed_sp_init_data init_data; |
292 | struct qed_spq_entry *p_ent; |
293 | u16 flags, trans_to_state; |
294 | int rc; |
295 | |
296 | /* Get SPQ entry */ |
297 | memset(&init_data, 0, sizeof(init_data)); |
298 | init_data.cid = qp->icid; |
299 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
300 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
301 | |
302 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
303 | cmd: IWARP_RAMROD_CMD_ID_MODIFY_QP, |
304 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
305 | if (rc) |
306 | return rc; |
307 | |
308 | p_ramrod = &p_ent->ramrod.iwarp_modify_qp; |
309 | |
310 | flags = le16_to_cpu(p_ramrod->flags); |
311 | SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1); |
312 | p_ramrod->flags = cpu_to_le16(flags); |
313 | |
314 | if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) |
315 | trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING; |
316 | else |
317 | trans_to_state = IWARP_MODIFY_QP_STATE_ERROR; |
318 | |
319 | p_ramrod->transition_to_state = cpu_to_le16(trans_to_state); |
320 | |
321 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
322 | |
323 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n" , qp->icid, rc); |
324 | |
325 | return rc; |
326 | } |
327 | |
328 | enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) |
329 | { |
330 | switch (state) { |
331 | case QED_ROCE_QP_STATE_RESET: |
332 | case QED_ROCE_QP_STATE_INIT: |
333 | case QED_ROCE_QP_STATE_RTR: |
334 | return QED_IWARP_QP_STATE_IDLE; |
335 | case QED_ROCE_QP_STATE_RTS: |
336 | return QED_IWARP_QP_STATE_RTS; |
337 | case QED_ROCE_QP_STATE_SQD: |
338 | return QED_IWARP_QP_STATE_CLOSING; |
339 | case QED_ROCE_QP_STATE_ERR: |
340 | return QED_IWARP_QP_STATE_ERROR; |
341 | case QED_ROCE_QP_STATE_SQE: |
342 | return QED_IWARP_QP_STATE_TERMINATE; |
343 | default: |
344 | return QED_IWARP_QP_STATE_ERROR; |
345 | } |
346 | } |
347 | |
348 | static enum qed_roce_qp_state |
349 | qed_iwarp2roce_state(enum qed_iwarp_qp_state state) |
350 | { |
351 | switch (state) { |
352 | case QED_IWARP_QP_STATE_IDLE: |
353 | return QED_ROCE_QP_STATE_INIT; |
354 | case QED_IWARP_QP_STATE_RTS: |
355 | return QED_ROCE_QP_STATE_RTS; |
356 | case QED_IWARP_QP_STATE_TERMINATE: |
357 | return QED_ROCE_QP_STATE_SQE; |
358 | case QED_IWARP_QP_STATE_CLOSING: |
359 | return QED_ROCE_QP_STATE_SQD; |
360 | case QED_IWARP_QP_STATE_ERROR: |
361 | return QED_ROCE_QP_STATE_ERR; |
362 | default: |
363 | return QED_ROCE_QP_STATE_ERR; |
364 | } |
365 | } |
366 | |
367 | static const char * const iwarp_state_names[] = { |
368 | "IDLE" , |
369 | "RTS" , |
370 | "TERMINATE" , |
371 | "CLOSING" , |
372 | "ERROR" , |
373 | }; |
374 | |
375 | int |
376 | qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, |
377 | struct qed_rdma_qp *qp, |
378 | enum qed_iwarp_qp_state new_state, bool internal) |
379 | { |
380 | enum qed_iwarp_qp_state prev_iw_state; |
381 | bool modify_fw = false; |
382 | int rc = 0; |
383 | |
384 | /* modify QP can be called from upper-layer or as a result of async |
385 | * RST/FIN... therefore need to protect |
386 | */ |
387 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.qp_lock); |
388 | prev_iw_state = qp->iwarp_state; |
389 | |
390 | if (prev_iw_state == new_state) { |
391 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.qp_lock); |
392 | return 0; |
393 | } |
394 | |
395 | switch (prev_iw_state) { |
396 | case QED_IWARP_QP_STATE_IDLE: |
397 | switch (new_state) { |
398 | case QED_IWARP_QP_STATE_RTS: |
399 | qp->iwarp_state = QED_IWARP_QP_STATE_RTS; |
400 | break; |
401 | case QED_IWARP_QP_STATE_ERROR: |
402 | qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; |
403 | if (!internal) |
404 | modify_fw = true; |
405 | break; |
406 | default: |
407 | break; |
408 | } |
409 | break; |
410 | case QED_IWARP_QP_STATE_RTS: |
411 | switch (new_state) { |
412 | case QED_IWARP_QP_STATE_CLOSING: |
413 | if (!internal) |
414 | modify_fw = true; |
415 | |
416 | qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; |
417 | break; |
418 | case QED_IWARP_QP_STATE_ERROR: |
419 | if (!internal) |
420 | modify_fw = true; |
421 | qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; |
422 | break; |
423 | default: |
424 | break; |
425 | } |
426 | break; |
427 | case QED_IWARP_QP_STATE_ERROR: |
428 | switch (new_state) { |
429 | case QED_IWARP_QP_STATE_IDLE: |
430 | |
431 | qp->iwarp_state = new_state; |
432 | break; |
433 | case QED_IWARP_QP_STATE_CLOSING: |
434 | /* could happen due to race... do nothing.... */ |
435 | break; |
436 | default: |
437 | rc = -EINVAL; |
438 | } |
439 | break; |
440 | case QED_IWARP_QP_STATE_TERMINATE: |
441 | case QED_IWARP_QP_STATE_CLOSING: |
442 | qp->iwarp_state = new_state; |
443 | break; |
444 | default: |
445 | break; |
446 | } |
447 | |
448 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n" , |
449 | qp->icid, |
450 | iwarp_state_names[prev_iw_state], |
451 | iwarp_state_names[qp->iwarp_state], |
452 | internal ? "internal" : "" ); |
453 | |
454 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.qp_lock); |
455 | |
456 | if (modify_fw) |
457 | rc = qed_iwarp_modify_fw(p_hwfn, qp); |
458 | |
459 | return rc; |
460 | } |
461 | |
462 | int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) |
463 | { |
464 | struct qed_sp_init_data init_data; |
465 | struct qed_spq_entry *p_ent; |
466 | int rc; |
467 | |
468 | /* Get SPQ entry */ |
469 | memset(&init_data, 0, sizeof(init_data)); |
470 | init_data.cid = qp->icid; |
471 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
472 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
473 | |
474 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
475 | cmd: IWARP_RAMROD_CMD_ID_DESTROY_QP, |
476 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
477 | if (rc) |
478 | return rc; |
479 | |
480 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
481 | |
482 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n" , qp->icid, rc); |
483 | |
484 | return rc; |
485 | } |
486 | |
487 | static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, |
488 | struct qed_iwarp_ep *ep, |
489 | bool remove_from_active_list) |
490 | { |
491 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
492 | size: sizeof(*ep->ep_buffer_virt), |
493 | cpu_addr: ep->ep_buffer_virt, dma_handle: ep->ep_buffer_phys); |
494 | |
495 | if (remove_from_active_list) { |
496 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
497 | list_del(entry: &ep->list_entry); |
498 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
499 | } |
500 | |
501 | if (ep->qp) |
502 | ep->qp->ep = NULL; |
503 | |
504 | kfree(objp: ep); |
505 | } |
506 | |
507 | int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) |
508 | { |
509 | struct qed_iwarp_ep *ep = qp->ep; |
510 | int wait_count = 0; |
511 | int rc = 0; |
512 | |
513 | if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { |
514 | rc = qed_iwarp_modify_qp(p_hwfn, qp, |
515 | new_state: QED_IWARP_QP_STATE_ERROR, internal: false); |
516 | if (rc) |
517 | return rc; |
518 | } |
519 | |
520 | /* Make sure ep is closed before returning and freeing memory. */ |
521 | if (ep) { |
522 | while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED && |
523 | wait_count++ < 200) |
524 | msleep(msecs: 100); |
525 | |
526 | if (ep->state != QED_IWARP_EP_CLOSED) |
527 | DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n" , |
528 | ep->state); |
529 | |
530 | qed_iwarp_destroy_ep(p_hwfn, ep, remove_from_active_list: false); |
531 | } |
532 | |
533 | rc = qed_iwarp_fw_destroy(p_hwfn, qp); |
534 | |
535 | if (qp->shared_queue) |
536 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
537 | IWARP_SHARED_QUEUE_PAGE_SIZE, |
538 | cpu_addr: qp->shared_queue, dma_handle: qp->shared_queue_phys_addr); |
539 | |
540 | return rc; |
541 | } |
542 | |
543 | static int |
544 | qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) |
545 | { |
546 | struct qed_iwarp_ep *ep; |
547 | int rc; |
548 | |
549 | ep = kzalloc(size: sizeof(*ep), GFP_KERNEL); |
550 | if (!ep) |
551 | return -ENOMEM; |
552 | |
553 | ep->state = QED_IWARP_EP_INIT; |
554 | |
555 | ep->ep_buffer_virt = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
556 | size: sizeof(*ep->ep_buffer_virt), |
557 | dma_handle: &ep->ep_buffer_phys, |
558 | GFP_KERNEL); |
559 | if (!ep->ep_buffer_virt) { |
560 | rc = -ENOMEM; |
561 | goto err; |
562 | } |
563 | |
564 | ep->sig = QED_EP_SIG; |
565 | |
566 | *ep_out = ep; |
567 | |
568 | return 0; |
569 | |
570 | err: |
571 | kfree(objp: ep); |
572 | return rc; |
573 | } |
574 | |
575 | static void |
576 | qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, |
577 | struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) |
578 | { |
579 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n" , |
580 | p_tcp_ramrod->tcp.local_mac_addr_lo, |
581 | p_tcp_ramrod->tcp.local_mac_addr_mid, |
582 | p_tcp_ramrod->tcp.local_mac_addr_hi, |
583 | p_tcp_ramrod->tcp.remote_mac_addr_lo, |
584 | p_tcp_ramrod->tcp.remote_mac_addr_mid, |
585 | p_tcp_ramrod->tcp.remote_mac_addr_hi); |
586 | |
587 | if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { |
588 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
589 | "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n" , |
590 | p_tcp_ramrod->tcp.local_ip, |
591 | p_tcp_ramrod->tcp.local_port, |
592 | p_tcp_ramrod->tcp.remote_ip, |
593 | p_tcp_ramrod->tcp.remote_port, |
594 | p_tcp_ramrod->tcp.vlan_id); |
595 | } else { |
596 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
597 | "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n" , |
598 | p_tcp_ramrod->tcp.local_ip, |
599 | p_tcp_ramrod->tcp.local_port, |
600 | p_tcp_ramrod->tcp.remote_ip, |
601 | p_tcp_ramrod->tcp.remote_port, |
602 | p_tcp_ramrod->tcp.vlan_id); |
603 | } |
604 | |
605 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
606 | "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n" , |
607 | p_tcp_ramrod->tcp.flow_label, |
608 | p_tcp_ramrod->tcp.ttl, |
609 | p_tcp_ramrod->tcp.tos_or_tc, |
610 | p_tcp_ramrod->tcp.mss, |
611 | p_tcp_ramrod->tcp.rcv_wnd_scale, |
612 | p_tcp_ramrod->tcp.connect_mode, |
613 | p_tcp_ramrod->tcp.flags); |
614 | |
615 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n" , |
616 | p_tcp_ramrod->tcp.syn_ip_payload_length, |
617 | p_tcp_ramrod->tcp.syn_phy_addr_lo, |
618 | p_tcp_ramrod->tcp.syn_phy_addr_hi); |
619 | } |
620 | |
621 | static int |
622 | qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
623 | { |
624 | struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
625 | struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; |
626 | struct tcp_offload_params_opt2 *tcp; |
627 | struct qed_sp_init_data init_data; |
628 | struct qed_spq_entry *p_ent; |
629 | dma_addr_t async_output_phys; |
630 | dma_addr_t in_pdata_phys; |
631 | u16 physical_q; |
632 | u16 flags = 0; |
633 | u8 tcp_flags; |
634 | int rc; |
635 | int i; |
636 | |
637 | memset(&init_data, 0, sizeof(init_data)); |
638 | init_data.cid = ep->tcp_cid; |
639 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
640 | if (ep->connect_mode == TCP_CONNECT_PASSIVE) |
641 | init_data.comp_mode = QED_SPQ_MODE_CB; |
642 | else |
643 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
644 | |
645 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
646 | cmd: IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, |
647 | protocol: PROTOCOLID_IWARP, p_data: &init_data); |
648 | if (rc) |
649 | return rc; |
650 | |
651 | p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; |
652 | |
653 | in_pdata_phys = ep->ep_buffer_phys + |
654 | offsetof(struct qed_iwarp_ep_memory, in_pdata); |
655 | DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, |
656 | in_pdata_phys); |
657 | |
658 | p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = |
659 | cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); |
660 | |
661 | async_output_phys = ep->ep_buffer_phys + |
662 | offsetof(struct qed_iwarp_ep_memory, async_output); |
663 | DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, |
664 | async_output_phys); |
665 | |
666 | p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); |
667 | p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); |
668 | |
669 | physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); |
670 | p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); |
671 | physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); |
672 | p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); |
673 | p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; |
674 | |
675 | tcp = &p_tcp_ramrod->tcp; |
676 | qed_set_fw_mac_addr(fw_msb: &tcp->remote_mac_addr_hi, |
677 | fw_mid: &tcp->remote_mac_addr_mid, |
678 | fw_lsb: &tcp->remote_mac_addr_lo, mac: ep->remote_mac_addr); |
679 | qed_set_fw_mac_addr(fw_msb: &tcp->local_mac_addr_hi, fw_mid: &tcp->local_mac_addr_mid, |
680 | fw_lsb: &tcp->local_mac_addr_lo, mac: ep->local_mac_addr); |
681 | |
682 | tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); |
683 | |
684 | tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; |
685 | |
686 | SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, |
687 | !!(tcp_flags & QED_IWARP_TS_EN)); |
688 | |
689 | SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, |
690 | !!(tcp_flags & QED_IWARP_DA_EN)); |
691 | |
692 | tcp->flags = cpu_to_le16(flags); |
693 | tcp->ip_version = ep->cm_info.ip_version; |
694 | |
695 | for (i = 0; i < 4; i++) { |
696 | tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); |
697 | tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); |
698 | } |
699 | |
700 | tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); |
701 | tcp->local_port = cpu_to_le16(ep->cm_info.local_port); |
702 | tcp->mss = cpu_to_le16(ep->mss); |
703 | tcp->flow_label = 0; |
704 | tcp->ttl = 0x40; |
705 | tcp->tos_or_tc = 0; |
706 | |
707 | tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; |
708 | tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss); |
709 | tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; |
710 | tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT); |
711 | tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL); |
712 | |
713 | tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; |
714 | tcp->connect_mode = ep->connect_mode; |
715 | |
716 | if (ep->connect_mode == TCP_CONNECT_PASSIVE) { |
717 | tcp->syn_ip_payload_length = |
718 | cpu_to_le16(ep->syn_ip_payload_length); |
719 | tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); |
720 | tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); |
721 | } |
722 | |
723 | qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); |
724 | |
725 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
726 | |
727 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
728 | "EP(0x%x) Offload completed rc=%d\n" , ep->tcp_cid, rc); |
729 | |
730 | return rc; |
731 | } |
732 | |
733 | static void |
734 | qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
735 | { |
736 | struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
737 | struct qed_iwarp_cm_event_params params; |
738 | struct mpa_v2_hdr *mpa_v2; |
739 | union async_output *async_data; |
740 | u16 mpa_ord, mpa_ird; |
741 | u8 mpa_hdr_size = 0; |
742 | u16 ulp_data_len; |
743 | u8 mpa_rev; |
744 | |
745 | async_data = &ep->ep_buffer_virt->async_output; |
746 | |
747 | mpa_rev = async_data->mpa_request.mpa_handshake_mode; |
748 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
749 | "private_data_len=%x handshake_mode=%x private_data=(%x)\n" , |
750 | async_data->mpa_request.ulp_data_len, |
751 | mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); |
752 | |
753 | if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { |
754 | /* Read ord/ird values from private data buffer */ |
755 | mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; |
756 | mpa_hdr_size = sizeof(*mpa_v2); |
757 | |
758 | mpa_ord = ntohs(mpa_v2->ord); |
759 | mpa_ird = ntohs(mpa_v2->ird); |
760 | |
761 | /* Temprary store in cm_info incoming ord/ird requested, later |
762 | * replace with negotiated value during accept |
763 | */ |
764 | ep->cm_info.ord = (u8)min_t(u16, |
765 | (mpa_ord & MPA_V2_IRD_ORD_MASK), |
766 | QED_IWARP_ORD_DEFAULT); |
767 | |
768 | ep->cm_info.ird = (u8)min_t(u16, |
769 | (mpa_ird & MPA_V2_IRD_ORD_MASK), |
770 | QED_IWARP_IRD_DEFAULT); |
771 | |
772 | /* Peer2Peer negotiation */ |
773 | ep->rtr_type = MPA_RTR_TYPE_NONE; |
774 | if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { |
775 | if (mpa_ord & MPA_V2_WRITE_RTR) |
776 | ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; |
777 | |
778 | if (mpa_ord & MPA_V2_READ_RTR) |
779 | ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; |
780 | |
781 | if (mpa_ird & MPA_V2_SEND_RTR) |
782 | ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; |
783 | |
784 | ep->rtr_type &= iwarp_info->rtr_type; |
785 | |
786 | /* if we're left with no match send our capabilities */ |
787 | if (ep->rtr_type == MPA_RTR_TYPE_NONE) |
788 | ep->rtr_type = iwarp_info->rtr_type; |
789 | } |
790 | |
791 | ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; |
792 | } else { |
793 | ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; |
794 | ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; |
795 | ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; |
796 | } |
797 | |
798 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
799 | "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n" , |
800 | mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, |
801 | async_data->mpa_request.ulp_data_len, mpa_hdr_size); |
802 | |
803 | /* Strip mpa v2 hdr from private data before sending to upper layer */ |
804 | ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; |
805 | |
806 | ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len); |
807 | ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size; |
808 | |
809 | params.event = QED_IWARP_EVENT_MPA_REQUEST; |
810 | params.cm_info = &ep->cm_info; |
811 | params.ep_context = ep; |
812 | params.status = 0; |
813 | |
814 | ep->state = QED_IWARP_EP_MPA_REQ_RCVD; |
815 | ep->event_cb(ep->cb_context, ¶ms); |
816 | } |
817 | |
818 | static int |
819 | qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
820 | { |
821 | struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; |
822 | struct mpa_outgoing_params *common; |
823 | struct qed_iwarp_info *iwarp_info; |
824 | struct qed_sp_init_data init_data; |
825 | dma_addr_t async_output_phys; |
826 | struct qed_spq_entry *p_ent; |
827 | dma_addr_t out_pdata_phys; |
828 | dma_addr_t in_pdata_phys; |
829 | struct qed_rdma_qp *qp; |
830 | bool reject; |
831 | u32 val; |
832 | int rc; |
833 | |
834 | if (!ep) |
835 | return -EINVAL; |
836 | |
837 | qp = ep->qp; |
838 | reject = !qp; |
839 | |
840 | memset(&init_data, 0, sizeof(init_data)); |
841 | init_data.cid = reject ? ep->tcp_cid : qp->icid; |
842 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
843 | |
844 | if (ep->connect_mode == TCP_CONNECT_ACTIVE) |
845 | init_data.comp_mode = QED_SPQ_MODE_CB; |
846 | else |
847 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
848 | |
849 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
850 | cmd: IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, |
851 | protocol: PROTOCOLID_IWARP, p_data: &init_data); |
852 | if (rc) |
853 | return rc; |
854 | |
855 | p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; |
856 | common = &p_mpa_ramrod->common; |
857 | |
858 | out_pdata_phys = ep->ep_buffer_phys + |
859 | offsetof(struct qed_iwarp_ep_memory, out_pdata); |
860 | DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys); |
861 | |
862 | val = ep->cm_info.private_data_len; |
863 | common->outgoing_ulp_buffer.len = cpu_to_le16(val); |
864 | common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; |
865 | |
866 | common->out_rq.ord = cpu_to_le32(ep->cm_info.ord); |
867 | common->out_rq.ird = cpu_to_le32(ep->cm_info.ird); |
868 | |
869 | val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; |
870 | p_mpa_ramrod->tcp_cid = cpu_to_le32(val); |
871 | |
872 | in_pdata_phys = ep->ep_buffer_phys + |
873 | offsetof(struct qed_iwarp_ep_memory, in_pdata); |
874 | p_mpa_ramrod->tcp_connect_side = ep->connect_mode; |
875 | DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, |
876 | in_pdata_phys); |
877 | p_mpa_ramrod->incoming_ulp_buffer.len = |
878 | cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); |
879 | async_output_phys = ep->ep_buffer_phys + |
880 | offsetof(struct qed_iwarp_ep_memory, async_output); |
881 | DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, |
882 | async_output_phys); |
883 | p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); |
884 | p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); |
885 | |
886 | if (!reject) { |
887 | DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, |
888 | qp->shared_queue_phys_addr); |
889 | p_mpa_ramrod->stats_counter_id = |
890 | RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; |
891 | } else { |
892 | common->reject = 1; |
893 | } |
894 | |
895 | iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
896 | p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size); |
897 | p_mpa_ramrod->mode = ep->mpa_rev; |
898 | SET_FIELD(p_mpa_ramrod->rtr_pref, |
899 | IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); |
900 | |
901 | ep->state = QED_IWARP_EP_MPA_OFFLOADED; |
902 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
903 | if (!reject) |
904 | ep->cid = qp->icid; /* Now they're migrated. */ |
905 | |
906 | DP_VERBOSE(p_hwfn, |
907 | QED_MSG_RDMA, |
908 | "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n" , |
909 | reject ? 0xffff : qp->icid, |
910 | ep->tcp_cid, |
911 | rc, |
912 | ep->cm_info.ird, |
913 | ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); |
914 | return rc; |
915 | } |
916 | |
917 | static void |
918 | qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
919 | { |
920 | ep->state = QED_IWARP_EP_INIT; |
921 | if (ep->qp) |
922 | ep->qp->ep = NULL; |
923 | ep->qp = NULL; |
924 | memset(&ep->cm_info, 0, sizeof(ep->cm_info)); |
925 | |
926 | if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { |
927 | /* We don't care about the return code, it's ok if tcp_cid |
928 | * remains invalid...in this case we'll defer allocation |
929 | */ |
930 | qed_iwarp_alloc_tcp_cid(p_hwfn, cid: &ep->tcp_cid); |
931 | } |
932 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
933 | |
934 | list_move_tail(list: &ep->list_entry, |
935 | head: &p_hwfn->p_rdma_info->iwarp.ep_free_list); |
936 | |
937 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
938 | } |
939 | |
940 | static void |
941 | qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
942 | { |
943 | struct mpa_v2_hdr *mpa_v2_params; |
944 | union async_output *async_data; |
945 | u16 mpa_ird, mpa_ord; |
946 | u8 mpa_data_size = 0; |
947 | u16 ulp_data_len; |
948 | |
949 | if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { |
950 | mpa_v2_params = |
951 | (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); |
952 | mpa_data_size = sizeof(*mpa_v2_params); |
953 | mpa_ird = ntohs(mpa_v2_params->ird); |
954 | mpa_ord = ntohs(mpa_v2_params->ord); |
955 | |
956 | ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); |
957 | ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); |
958 | } |
959 | |
960 | async_data = &ep->ep_buffer_virt->async_output; |
961 | ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; |
962 | |
963 | ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len); |
964 | ep->cm_info.private_data_len = ulp_data_len - mpa_data_size; |
965 | } |
966 | |
967 | static void |
968 | qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
969 | { |
970 | struct qed_iwarp_cm_event_params params; |
971 | |
972 | if (ep->connect_mode == TCP_CONNECT_PASSIVE) { |
973 | DP_NOTICE(p_hwfn, |
974 | "MPA reply event not expected on passive side!\n" ); |
975 | return; |
976 | } |
977 | |
978 | params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; |
979 | |
980 | qed_iwarp_parse_private_data(p_hwfn, ep); |
981 | |
982 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
983 | "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n" , |
984 | ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); |
985 | |
986 | params.cm_info = &ep->cm_info; |
987 | params.ep_context = ep; |
988 | params.status = 0; |
989 | |
990 | ep->mpa_reply_processed = true; |
991 | |
992 | ep->event_cb(ep->cb_context, ¶ms); |
993 | } |
994 | |
995 | #define QED_IWARP_CONNECT_MODE_STRING(ep) \ |
996 | ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" |
997 | |
998 | /* Called as a result of the event: |
999 | * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE |
1000 | */ |
1001 | static void |
1002 | qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, |
1003 | struct qed_iwarp_ep *ep, u8 fw_return_code) |
1004 | { |
1005 | struct qed_iwarp_cm_event_params params; |
1006 | |
1007 | if (ep->connect_mode == TCP_CONNECT_ACTIVE) |
1008 | params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; |
1009 | else |
1010 | params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; |
1011 | |
1012 | if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) |
1013 | qed_iwarp_parse_private_data(p_hwfn, ep); |
1014 | |
1015 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1016 | "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n" , |
1017 | ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); |
1018 | |
1019 | params.cm_info = &ep->cm_info; |
1020 | |
1021 | params.ep_context = ep; |
1022 | |
1023 | switch (fw_return_code) { |
1024 | case RDMA_RETURN_OK: |
1025 | ep->qp->max_rd_atomic_req = ep->cm_info.ord; |
1026 | ep->qp->max_rd_atomic_resp = ep->cm_info.ird; |
1027 | qed_iwarp_modify_qp(p_hwfn, qp: ep->qp, new_state: QED_IWARP_QP_STATE_RTS, internal: 1); |
1028 | ep->state = QED_IWARP_EP_ESTABLISHED; |
1029 | params.status = 0; |
1030 | break; |
1031 | case IWARP_CONN_ERROR_MPA_TIMEOUT: |
1032 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n" , |
1033 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1034 | params.status = -EBUSY; |
1035 | break; |
1036 | case IWARP_CONN_ERROR_MPA_ERROR_REJECT: |
1037 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n" , |
1038 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1039 | params.status = -ECONNREFUSED; |
1040 | break; |
1041 | case IWARP_CONN_ERROR_MPA_RST: |
1042 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n" , |
1043 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, |
1044 | ep->tcp_cid); |
1045 | params.status = -ECONNRESET; |
1046 | break; |
1047 | case IWARP_CONN_ERROR_MPA_FIN: |
1048 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n" , |
1049 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1050 | params.status = -ECONNREFUSED; |
1051 | break; |
1052 | case IWARP_CONN_ERROR_MPA_INSUF_IRD: |
1053 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n" , |
1054 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1055 | params.status = -ECONNREFUSED; |
1056 | break; |
1057 | case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: |
1058 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n" , |
1059 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1060 | params.status = -ECONNREFUSED; |
1061 | break; |
1062 | case IWARP_CONN_ERROR_MPA_INVALID_PACKET: |
1063 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n" , |
1064 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1065 | params.status = -ECONNREFUSED; |
1066 | break; |
1067 | case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: |
1068 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n" , |
1069 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1070 | params.status = -ECONNREFUSED; |
1071 | break; |
1072 | case IWARP_CONN_ERROR_MPA_TERMINATE: |
1073 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n" , |
1074 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); |
1075 | params.status = -ECONNREFUSED; |
1076 | break; |
1077 | default: |
1078 | params.status = -ECONNRESET; |
1079 | break; |
1080 | } |
1081 | |
1082 | if (fw_return_code != RDMA_RETURN_OK) |
1083 | /* paired with READ_ONCE in destroy_qp */ |
1084 | smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); |
1085 | |
1086 | ep->event_cb(ep->cb_context, ¶ms); |
1087 | |
1088 | /* on passive side, if there is no associated QP (REJECT) we need to |
1089 | * return the ep to the pool, (in the regular case we add an element |
1090 | * in accept instead of this one. |
1091 | * In both cases we need to remove it from the ep_list. |
1092 | */ |
1093 | if (fw_return_code != RDMA_RETURN_OK) { |
1094 | ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; |
1095 | if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && |
1096 | (!ep->qp)) { /* Rejected */ |
1097 | qed_iwarp_return_ep(p_hwfn, ep); |
1098 | } else { |
1099 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1100 | list_del(entry: &ep->list_entry); |
1101 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1102 | } |
1103 | } |
1104 | } |
1105 | |
1106 | static void |
1107 | qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, |
1108 | struct qed_iwarp_ep *ep, u8 *mpa_data_size) |
1109 | { |
1110 | struct mpa_v2_hdr *mpa_v2_params; |
1111 | u16 mpa_ird, mpa_ord; |
1112 | |
1113 | *mpa_data_size = 0; |
1114 | if (MPA_REV2(ep->mpa_rev)) { |
1115 | mpa_v2_params = |
1116 | (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; |
1117 | *mpa_data_size = sizeof(*mpa_v2_params); |
1118 | |
1119 | mpa_ird = (u16)ep->cm_info.ird; |
1120 | mpa_ord = (u16)ep->cm_info.ord; |
1121 | |
1122 | if (ep->rtr_type != MPA_RTR_TYPE_NONE) { |
1123 | mpa_ird |= MPA_V2_PEER2PEER_MODEL; |
1124 | |
1125 | if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) |
1126 | mpa_ird |= MPA_V2_SEND_RTR; |
1127 | |
1128 | if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) |
1129 | mpa_ord |= MPA_V2_WRITE_RTR; |
1130 | |
1131 | if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) |
1132 | mpa_ord |= MPA_V2_READ_RTR; |
1133 | } |
1134 | |
1135 | mpa_v2_params->ird = htons(mpa_ird); |
1136 | mpa_v2_params->ord = htons(mpa_ord); |
1137 | |
1138 | DP_VERBOSE(p_hwfn, |
1139 | QED_MSG_RDMA, |
1140 | "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n" , |
1141 | mpa_v2_params->ird, |
1142 | mpa_v2_params->ord, |
1143 | *((u32 *)mpa_v2_params), |
1144 | mpa_ord & MPA_V2_IRD_ORD_MASK, |
1145 | mpa_ird & MPA_V2_IRD_ORD_MASK, |
1146 | !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), |
1147 | !!(mpa_ird & MPA_V2_SEND_RTR), |
1148 | !!(mpa_ord & MPA_V2_WRITE_RTR), |
1149 | !!(mpa_ord & MPA_V2_READ_RTR)); |
1150 | } |
1151 | } |
1152 | |
1153 | int qed_iwarp_connect(void *rdma_cxt, |
1154 | struct qed_iwarp_connect_in *iparams, |
1155 | struct qed_iwarp_connect_out *oparams) |
1156 | { |
1157 | struct qed_hwfn *p_hwfn = rdma_cxt; |
1158 | struct qed_iwarp_info *iwarp_info; |
1159 | struct qed_iwarp_ep *ep; |
1160 | u8 mpa_data_size = 0; |
1161 | u32 cid; |
1162 | int rc; |
1163 | |
1164 | if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || |
1165 | (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { |
1166 | DP_NOTICE(p_hwfn, |
1167 | "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n" , |
1168 | iparams->qp->icid, iparams->cm_info.ord, |
1169 | iparams->cm_info.ird); |
1170 | |
1171 | return -EINVAL; |
1172 | } |
1173 | |
1174 | iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
1175 | |
1176 | /* Allocate ep object */ |
1177 | rc = qed_iwarp_alloc_cid(p_hwfn, cid: &cid); |
1178 | if (rc) |
1179 | return rc; |
1180 | |
1181 | rc = qed_iwarp_create_ep(p_hwfn, ep_out: &ep); |
1182 | if (rc) |
1183 | goto err; |
1184 | |
1185 | ep->tcp_cid = cid; |
1186 | |
1187 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1188 | list_add_tail(new: &ep->list_entry, head: &p_hwfn->p_rdma_info->iwarp.ep_list); |
1189 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1190 | |
1191 | ep->qp = iparams->qp; |
1192 | ep->qp->ep = ep; |
1193 | ether_addr_copy(dst: ep->remote_mac_addr, src: iparams->remote_mac_addr); |
1194 | ether_addr_copy(dst: ep->local_mac_addr, src: iparams->local_mac_addr); |
1195 | memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); |
1196 | |
1197 | ep->cm_info.ord = iparams->cm_info.ord; |
1198 | ep->cm_info.ird = iparams->cm_info.ird; |
1199 | |
1200 | ep->rtr_type = iwarp_info->rtr_type; |
1201 | if (!iwarp_info->peer2peer) |
1202 | ep->rtr_type = MPA_RTR_TYPE_NONE; |
1203 | |
1204 | if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) |
1205 | ep->cm_info.ord = 1; |
1206 | |
1207 | ep->mpa_rev = iwarp_info->mpa_rev; |
1208 | |
1209 | qed_iwarp_mpa_v2_set_private(p_hwfn, ep, mpa_data_size: &mpa_data_size); |
1210 | |
1211 | ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; |
1212 | ep->cm_info.private_data_len = iparams->cm_info.private_data_len + |
1213 | mpa_data_size; |
1214 | |
1215 | memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, |
1216 | iparams->cm_info.private_data, |
1217 | iparams->cm_info.private_data_len); |
1218 | |
1219 | ep->mss = iparams->mss; |
1220 | ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); |
1221 | |
1222 | ep->event_cb = iparams->event_cb; |
1223 | ep->cb_context = iparams->cb_context; |
1224 | ep->connect_mode = TCP_CONNECT_ACTIVE; |
1225 | |
1226 | oparams->ep_context = ep; |
1227 | |
1228 | rc = qed_iwarp_tcp_offload(p_hwfn, ep); |
1229 | |
1230 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n" , |
1231 | iparams->qp->icid, ep->tcp_cid, rc); |
1232 | |
1233 | if (rc) { |
1234 | qed_iwarp_destroy_ep(p_hwfn, ep, remove_from_active_list: true); |
1235 | goto err; |
1236 | } |
1237 | |
1238 | return rc; |
1239 | err: |
1240 | qed_iwarp_cid_cleaned(p_hwfn, cid); |
1241 | |
1242 | return rc; |
1243 | } |
1244 | |
1245 | static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) |
1246 | { |
1247 | struct qed_iwarp_ep *ep = NULL; |
1248 | int rc; |
1249 | |
1250 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1251 | |
1252 | if (list_empty(head: &p_hwfn->p_rdma_info->iwarp.ep_free_list)) { |
1253 | DP_ERR(p_hwfn, "Ep list is empty\n" ); |
1254 | goto out; |
1255 | } |
1256 | |
1257 | ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, |
1258 | struct qed_iwarp_ep, list_entry); |
1259 | |
1260 | /* in some cases we could have failed allocating a tcp cid when added |
1261 | * from accept / failure... retry now..this is not the common case. |
1262 | */ |
1263 | if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { |
1264 | rc = qed_iwarp_alloc_tcp_cid(p_hwfn, cid: &ep->tcp_cid); |
1265 | |
1266 | /* if we fail we could look for another entry with a valid |
1267 | * tcp_cid, but since we don't expect to reach this anyway |
1268 | * it's not worth the handling |
1269 | */ |
1270 | if (rc) { |
1271 | ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; |
1272 | ep = NULL; |
1273 | goto out; |
1274 | } |
1275 | } |
1276 | |
1277 | list_del(entry: &ep->list_entry); |
1278 | |
1279 | out: |
1280 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1281 | return ep; |
1282 | } |
1283 | |
1284 | #define QED_IWARP_MAX_CID_CLEAN_TIME 100 |
1285 | #define QED_IWARP_MAX_NO_PROGRESS_CNT 5 |
1286 | |
1287 | /* This function waits for all the bits of a bmap to be cleared, as long as |
1288 | * there is progress ( i.e. the number of bits left to be cleared decreases ) |
1289 | * the function continues. |
1290 | */ |
1291 | static int |
1292 | qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) |
1293 | { |
1294 | int prev_weight = 0; |
1295 | int wait_count = 0; |
1296 | int weight = 0; |
1297 | |
1298 | weight = bitmap_weight(src: bmap->bitmap, nbits: bmap->max_count); |
1299 | prev_weight = weight; |
1300 | |
1301 | while (weight) { |
1302 | /* If the HW device is during recovery, all resources are |
1303 | * immediately reset without receiving a per-cid indication |
1304 | * from HW. In this case we don't expect the cid_map to be |
1305 | * cleared. |
1306 | */ |
1307 | if (p_hwfn->cdev->recov_in_prog) |
1308 | return 0; |
1309 | |
1310 | msleep(QED_IWARP_MAX_CID_CLEAN_TIME); |
1311 | |
1312 | weight = bitmap_weight(src: bmap->bitmap, nbits: bmap->max_count); |
1313 | |
1314 | if (prev_weight == weight) { |
1315 | wait_count++; |
1316 | } else { |
1317 | prev_weight = weight; |
1318 | wait_count = 0; |
1319 | } |
1320 | |
1321 | if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { |
1322 | DP_NOTICE(p_hwfn, |
1323 | "%s bitmap wait timed out (%d cids pending)\n" , |
1324 | bmap->name, weight); |
1325 | return -EBUSY; |
1326 | } |
1327 | } |
1328 | return 0; |
1329 | } |
1330 | |
1331 | static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) |
1332 | { |
1333 | int rc; |
1334 | int i; |
1335 | |
1336 | rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, |
1337 | bmap: &p_hwfn->p_rdma_info->tcp_cid_map); |
1338 | if (rc) |
1339 | return rc; |
1340 | |
1341 | /* Now free the tcp cids from the main cid map */ |
1342 | for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) |
1343 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->cid_map, id_num: i); |
1344 | |
1345 | /* Now wait for all cids to be completed */ |
1346 | return qed_iwarp_wait_cid_map_cleared(p_hwfn, |
1347 | bmap: &p_hwfn->p_rdma_info->cid_map); |
1348 | } |
1349 | |
1350 | static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) |
1351 | { |
1352 | struct qed_iwarp_ep *ep; |
1353 | |
1354 | while (!list_empty(head: &p_hwfn->p_rdma_info->iwarp.ep_free_list)) { |
1355 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1356 | |
1357 | ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, |
1358 | struct qed_iwarp_ep, list_entry); |
1359 | |
1360 | if (!ep) { |
1361 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1362 | break; |
1363 | } |
1364 | list_del(entry: &ep->list_entry); |
1365 | |
1366 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1367 | |
1368 | if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) |
1369 | qed_iwarp_cid_cleaned(p_hwfn, cid: ep->tcp_cid); |
1370 | |
1371 | qed_iwarp_destroy_ep(p_hwfn, ep, remove_from_active_list: false); |
1372 | } |
1373 | } |
1374 | |
1375 | static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) |
1376 | { |
1377 | struct qed_iwarp_ep *ep; |
1378 | int rc = 0; |
1379 | int count; |
1380 | u32 cid; |
1381 | int i; |
1382 | |
1383 | count = init ? QED_IWARP_PREALLOC_CNT : 1; |
1384 | for (i = 0; i < count; i++) { |
1385 | rc = qed_iwarp_create_ep(p_hwfn, ep_out: &ep); |
1386 | if (rc) |
1387 | return rc; |
1388 | |
1389 | /* During initialization we allocate from the main pool, |
1390 | * afterwards we allocate only from the tcp_cid. |
1391 | */ |
1392 | if (init) { |
1393 | rc = qed_iwarp_alloc_cid(p_hwfn, cid: &cid); |
1394 | if (rc) |
1395 | goto err; |
1396 | qed_iwarp_set_tcp_cid(p_hwfn, cid); |
1397 | } else { |
1398 | /* We don't care about the return code, it's ok if |
1399 | * tcp_cid remains invalid...in this case we'll |
1400 | * defer allocation |
1401 | */ |
1402 | qed_iwarp_alloc_tcp_cid(p_hwfn, cid: &cid); |
1403 | } |
1404 | |
1405 | ep->tcp_cid = cid; |
1406 | |
1407 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1408 | list_add_tail(new: &ep->list_entry, |
1409 | head: &p_hwfn->p_rdma_info->iwarp.ep_free_list); |
1410 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
1411 | } |
1412 | |
1413 | return rc; |
1414 | |
1415 | err: |
1416 | qed_iwarp_destroy_ep(p_hwfn, ep, remove_from_active_list: false); |
1417 | |
1418 | return rc; |
1419 | } |
1420 | |
1421 | int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) |
1422 | { |
1423 | int rc; |
1424 | |
1425 | /* Allocate bitmap for tcp cid. These are used by passive side |
1426 | * to ensure it can allocate a tcp cid during dpc that was |
1427 | * pre-acquired and doesn't require dynamic allocation of ilt |
1428 | */ |
1429 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_hwfn->p_rdma_info->tcp_cid_map, |
1430 | QED_IWARP_PREALLOC_CNT, name: "TCP_CID" ); |
1431 | if (rc) { |
1432 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1433 | "Failed to allocate tcp cid, rc = %d\n" , rc); |
1434 | return rc; |
1435 | } |
1436 | |
1437 | INIT_LIST_HEAD(list: &p_hwfn->p_rdma_info->iwarp.ep_free_list); |
1438 | spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); |
1439 | |
1440 | rc = qed_iwarp_prealloc_ep(p_hwfn, init: true); |
1441 | if (rc) |
1442 | return rc; |
1443 | |
1444 | return qed_ooo_alloc(p_hwfn); |
1445 | } |
1446 | |
1447 | void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) |
1448 | { |
1449 | struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
1450 | |
1451 | qed_ooo_free(p_hwfn); |
1452 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->tcp_cid_map, check: 1); |
1453 | kfree(objp: iwarp_info->mpa_bufs); |
1454 | kfree(objp: iwarp_info->partial_fpdus); |
1455 | kfree(objp: iwarp_info->mpa_intermediate_buf); |
1456 | } |
1457 | |
1458 | int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) |
1459 | { |
1460 | struct qed_hwfn *p_hwfn = rdma_cxt; |
1461 | struct qed_iwarp_ep *ep; |
1462 | u8 mpa_data_size = 0; |
1463 | int rc; |
1464 | |
1465 | ep = iparams->ep_context; |
1466 | if (!ep) { |
1467 | DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n" ); |
1468 | return -EINVAL; |
1469 | } |
1470 | |
1471 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n" , |
1472 | iparams->qp->icid, ep->tcp_cid); |
1473 | |
1474 | if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || |
1475 | (iparams->ird > QED_IWARP_IRD_DEFAULT)) { |
1476 | DP_VERBOSE(p_hwfn, |
1477 | QED_MSG_RDMA, |
1478 | "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n" , |
1479 | iparams->qp->icid, |
1480 | ep->tcp_cid, iparams->ord, iparams->ord); |
1481 | return -EINVAL; |
1482 | } |
1483 | |
1484 | qed_iwarp_prealloc_ep(p_hwfn, init: false); |
1485 | |
1486 | ep->cb_context = iparams->cb_context; |
1487 | ep->qp = iparams->qp; |
1488 | ep->qp->ep = ep; |
1489 | |
1490 | if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { |
1491 | /* Negotiate ord/ird: if upperlayer requested ord larger than |
1492 | * ird advertised by remote, we need to decrease our ord |
1493 | */ |
1494 | if (iparams->ord > ep->cm_info.ird) |
1495 | iparams->ord = ep->cm_info.ird; |
1496 | |
1497 | if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && |
1498 | (iparams->ird == 0)) |
1499 | iparams->ird = 1; |
1500 | } |
1501 | |
1502 | /* Update cm_info ord/ird to be negotiated values */ |
1503 | ep->cm_info.ord = iparams->ord; |
1504 | ep->cm_info.ird = iparams->ird; |
1505 | |
1506 | qed_iwarp_mpa_v2_set_private(p_hwfn, ep, mpa_data_size: &mpa_data_size); |
1507 | |
1508 | ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; |
1509 | ep->cm_info.private_data_len = iparams->private_data_len + |
1510 | mpa_data_size; |
1511 | |
1512 | memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, |
1513 | iparams->private_data, iparams->private_data_len); |
1514 | |
1515 | rc = qed_iwarp_mpa_offload(p_hwfn, ep); |
1516 | if (rc) |
1517 | qed_iwarp_modify_qp(p_hwfn, |
1518 | qp: iparams->qp, new_state: QED_IWARP_QP_STATE_ERROR, internal: 1); |
1519 | |
1520 | return rc; |
1521 | } |
1522 | |
1523 | int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) |
1524 | { |
1525 | struct qed_hwfn *p_hwfn = rdma_cxt; |
1526 | struct qed_iwarp_ep *ep; |
1527 | u8 mpa_data_size = 0; |
1528 | |
1529 | ep = iparams->ep_context; |
1530 | if (!ep) { |
1531 | DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n" ); |
1532 | return -EINVAL; |
1533 | } |
1534 | |
1535 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n" , ep->tcp_cid); |
1536 | |
1537 | ep->cb_context = iparams->cb_context; |
1538 | ep->qp = NULL; |
1539 | |
1540 | qed_iwarp_mpa_v2_set_private(p_hwfn, ep, mpa_data_size: &mpa_data_size); |
1541 | |
1542 | ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; |
1543 | ep->cm_info.private_data_len = iparams->private_data_len + |
1544 | mpa_data_size; |
1545 | |
1546 | memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, |
1547 | iparams->private_data, iparams->private_data_len); |
1548 | |
1549 | return qed_iwarp_mpa_offload(p_hwfn, ep); |
1550 | } |
1551 | |
1552 | static void |
1553 | qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, |
1554 | struct qed_iwarp_cm_info *cm_info) |
1555 | { |
1556 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n" , |
1557 | cm_info->ip_version); |
1558 | |
1559 | if (cm_info->ip_version == QED_TCP_IPV4) |
1560 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1561 | "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n" , |
1562 | cm_info->remote_ip, cm_info->remote_port, |
1563 | cm_info->local_ip, cm_info->local_port, |
1564 | cm_info->vlan); |
1565 | else |
1566 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1567 | "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n" , |
1568 | cm_info->remote_ip, cm_info->remote_port, |
1569 | cm_info->local_ip, cm_info->local_port, |
1570 | cm_info->vlan); |
1571 | |
1572 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1573 | "private_data_len = %x ord = %d, ird = %d\n" , |
1574 | cm_info->private_data_len, cm_info->ord, cm_info->ird); |
1575 | } |
1576 | |
1577 | static int |
1578 | qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, |
1579 | struct qed_iwarp_ll2_buff *buf, u8 handle) |
1580 | { |
1581 | int rc; |
1582 | |
1583 | rc = qed_ll2_post_rx_buffer(cxt: p_hwfn, connection_handle: handle, addr: buf->data_phys_addr, |
1584 | buf_len: (u16)buf->buff_size, cookie: buf, notify_fw: 1); |
1585 | if (rc) { |
1586 | DP_NOTICE(p_hwfn, |
1587 | "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n" , |
1588 | rc, handle); |
1589 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, size: buf->buff_size, |
1590 | cpu_addr: buf->data, dma_handle: buf->data_phys_addr); |
1591 | kfree(objp: buf); |
1592 | } |
1593 | |
1594 | return rc; |
1595 | } |
1596 | |
1597 | static bool |
1598 | qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) |
1599 | { |
1600 | struct qed_iwarp_ep *ep = NULL; |
1601 | bool found = false; |
1602 | |
1603 | list_for_each_entry(ep, |
1604 | &p_hwfn->p_rdma_info->iwarp.ep_list, |
1605 | list_entry) { |
1606 | if ((ep->cm_info.local_port == cm_info->local_port) && |
1607 | (ep->cm_info.remote_port == cm_info->remote_port) && |
1608 | (ep->cm_info.vlan == cm_info->vlan) && |
1609 | !memcmp(p: &ep->cm_info.local_ip, q: cm_info->local_ip, |
1610 | size: sizeof(cm_info->local_ip)) && |
1611 | !memcmp(p: &ep->cm_info.remote_ip, q: cm_info->remote_ip, |
1612 | size: sizeof(cm_info->remote_ip))) { |
1613 | found = true; |
1614 | break; |
1615 | } |
1616 | } |
1617 | |
1618 | if (found) { |
1619 | DP_NOTICE(p_hwfn, |
1620 | "SYN received on active connection - dropping\n" ); |
1621 | qed_iwarp_print_cm_info(p_hwfn, cm_info); |
1622 | |
1623 | return true; |
1624 | } |
1625 | |
1626 | return false; |
1627 | } |
1628 | |
1629 | static struct qed_iwarp_listener * |
1630 | qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, |
1631 | struct qed_iwarp_cm_info *cm_info) |
1632 | { |
1633 | struct qed_iwarp_listener *listener = NULL; |
1634 | static const u32 ip_zero[4] = { 0, 0, 0, 0 }; |
1635 | bool found = false; |
1636 | |
1637 | list_for_each_entry(listener, |
1638 | &p_hwfn->p_rdma_info->iwarp.listen_list, |
1639 | list_entry) { |
1640 | if (listener->port == cm_info->local_port) { |
1641 | if (!memcmp(p: listener->ip_addr, |
1642 | q: ip_zero, size: sizeof(ip_zero))) { |
1643 | found = true; |
1644 | break; |
1645 | } |
1646 | |
1647 | if (!memcmp(p: listener->ip_addr, |
1648 | q: cm_info->local_ip, |
1649 | size: sizeof(cm_info->local_ip)) && |
1650 | (listener->vlan == cm_info->vlan)) { |
1651 | found = true; |
1652 | break; |
1653 | } |
1654 | } |
1655 | } |
1656 | |
1657 | if (found) { |
1658 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n" , |
1659 | listener); |
1660 | return listener; |
1661 | } |
1662 | |
1663 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n" ); |
1664 | return NULL; |
1665 | } |
1666 | |
1667 | static int |
1668 | qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, |
1669 | struct qed_iwarp_cm_info *cm_info, |
1670 | void *buf, |
1671 | u8 *remote_mac_addr, |
1672 | u8 *local_mac_addr, |
1673 | int *payload_len, int *tcp_start_offset) |
1674 | { |
1675 | struct vlan_ethhdr *vethh; |
1676 | bool vlan_valid = false; |
1677 | struct ipv6hdr *ip6h; |
1678 | struct ethhdr *ethh; |
1679 | struct tcphdr *tcph; |
1680 | struct iphdr *iph; |
1681 | int eth_hlen; |
1682 | int ip_hlen; |
1683 | int eth_type; |
1684 | int i; |
1685 | |
1686 | ethh = buf; |
1687 | eth_type = ntohs(ethh->h_proto); |
1688 | if (eth_type == ETH_P_8021Q) { |
1689 | vlan_valid = true; |
1690 | vethh = (struct vlan_ethhdr *)ethh; |
1691 | cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; |
1692 | eth_type = ntohs(vethh->h_vlan_encapsulated_proto); |
1693 | } |
1694 | |
1695 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); |
1696 | |
1697 | if (!ether_addr_equal(addr1: ethh->h_dest, |
1698 | addr2: p_hwfn->p_rdma_info->iwarp.mac_addr)) { |
1699 | DP_VERBOSE(p_hwfn, |
1700 | QED_MSG_RDMA, |
1701 | "Got unexpected mac %pM instead of %pM\n" , |
1702 | ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); |
1703 | return -EINVAL; |
1704 | } |
1705 | |
1706 | ether_addr_copy(dst: remote_mac_addr, src: ethh->h_source); |
1707 | ether_addr_copy(dst: local_mac_addr, src: ethh->h_dest); |
1708 | |
1709 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n" , |
1710 | eth_type, ethh->h_source); |
1711 | |
1712 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n" , |
1713 | eth_hlen, ethh->h_dest); |
1714 | |
1715 | iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); |
1716 | |
1717 | if (eth_type == ETH_P_IP) { |
1718 | if (iph->protocol != IPPROTO_TCP) { |
1719 | DP_NOTICE(p_hwfn, |
1720 | "Unexpected ip protocol on ll2 %x\n" , |
1721 | iph->protocol); |
1722 | return -EINVAL; |
1723 | } |
1724 | |
1725 | cm_info->local_ip[0] = ntohl(iph->daddr); |
1726 | cm_info->remote_ip[0] = ntohl(iph->saddr); |
1727 | cm_info->ip_version = QED_TCP_IPV4; |
1728 | |
1729 | ip_hlen = (iph->ihl) * sizeof(u32); |
1730 | *payload_len = ntohs(iph->tot_len) - ip_hlen; |
1731 | } else if (eth_type == ETH_P_IPV6) { |
1732 | ip6h = (struct ipv6hdr *)iph; |
1733 | |
1734 | if (ip6h->nexthdr != IPPROTO_TCP) { |
1735 | DP_NOTICE(p_hwfn, |
1736 | "Unexpected ip protocol on ll2 %x\n" , |
1737 | iph->protocol); |
1738 | return -EINVAL; |
1739 | } |
1740 | |
1741 | for (i = 0; i < 4; i++) { |
1742 | cm_info->local_ip[i] = |
1743 | ntohl(ip6h->daddr.in6_u.u6_addr32[i]); |
1744 | cm_info->remote_ip[i] = |
1745 | ntohl(ip6h->saddr.in6_u.u6_addr32[i]); |
1746 | } |
1747 | cm_info->ip_version = QED_TCP_IPV6; |
1748 | |
1749 | ip_hlen = sizeof(*ip6h); |
1750 | *payload_len = ntohs(ip6h->payload_len); |
1751 | } else { |
1752 | DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n" , eth_type); |
1753 | return -EINVAL; |
1754 | } |
1755 | |
1756 | tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); |
1757 | |
1758 | if (!tcph->syn) { |
1759 | DP_NOTICE(p_hwfn, |
1760 | "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n" , |
1761 | iph->ihl, tcph->source, tcph->dest); |
1762 | return -EINVAL; |
1763 | } |
1764 | |
1765 | cm_info->local_port = ntohs(tcph->dest); |
1766 | cm_info->remote_port = ntohs(tcph->source); |
1767 | |
1768 | qed_iwarp_print_cm_info(p_hwfn, cm_info); |
1769 | |
1770 | *tcp_start_offset = eth_hlen + ip_hlen; |
1771 | |
1772 | return 0; |
1773 | } |
1774 | |
1775 | static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn, |
1776 | u16 cid) |
1777 | { |
1778 | struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
1779 | struct qed_iwarp_fpdu *partial_fpdu; |
1780 | u32 idx; |
1781 | |
1782 | idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, type: PROTOCOLID_IWARP); |
1783 | if (idx >= iwarp_info->max_num_partial_fpdus) { |
1784 | DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n" , cid, |
1785 | iwarp_info->max_num_partial_fpdus); |
1786 | return NULL; |
1787 | } |
1788 | |
1789 | partial_fpdu = &iwarp_info->partial_fpdus[idx]; |
1790 | |
1791 | return partial_fpdu; |
1792 | } |
1793 | |
1794 | enum qed_iwarp_mpa_pkt_type { |
1795 | QED_IWARP_MPA_PKT_PACKED, |
1796 | QED_IWARP_MPA_PKT_PARTIAL, |
1797 | QED_IWARP_MPA_PKT_UNALIGNED |
1798 | }; |
1799 | |
1800 | #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff |
1801 | #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) |
1802 | #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) |
1803 | |
1804 | /* Pad to multiple of 4 */ |
1805 | #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4) |
1806 | #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ |
1807 | (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \ |
1808 | QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \ |
1809 | QED_IWARP_MPA_CRC32_DIGEST_SIZE) |
1810 | |
1811 | /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ |
1812 | #define QED_IWARP_MAX_BDS_PER_FPDU 3 |
1813 | |
1814 | static const char * const pkt_type_str[] = { |
1815 | "QED_IWARP_MPA_PKT_PACKED" , |
1816 | "QED_IWARP_MPA_PKT_PARTIAL" , |
1817 | "QED_IWARP_MPA_PKT_UNALIGNED" |
1818 | }; |
1819 | |
1820 | static int |
1821 | qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, |
1822 | struct qed_iwarp_fpdu *fpdu, |
1823 | struct qed_iwarp_ll2_buff *buf); |
1824 | |
1825 | static enum qed_iwarp_mpa_pkt_type |
1826 | qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, |
1827 | struct qed_iwarp_fpdu *fpdu, |
1828 | u16 tcp_payload_len, u8 *mpa_data) |
1829 | { |
1830 | enum qed_iwarp_mpa_pkt_type pkt_type; |
1831 | u16 mpa_len; |
1832 | |
1833 | if (fpdu->incomplete_bytes) { |
1834 | pkt_type = QED_IWARP_MPA_PKT_UNALIGNED; |
1835 | goto out; |
1836 | } |
1837 | |
1838 | /* special case of one byte remaining... |
1839 | * lower byte will be read next packet |
1840 | */ |
1841 | if (tcp_payload_len == 1) { |
1842 | fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE; |
1843 | pkt_type = QED_IWARP_MPA_PKT_PARTIAL; |
1844 | goto out; |
1845 | } |
1846 | |
1847 | mpa_len = ntohs(*(__force __be16 *)mpa_data); |
1848 | fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); |
1849 | |
1850 | if (fpdu->fpdu_length <= tcp_payload_len) |
1851 | pkt_type = QED_IWARP_MPA_PKT_PACKED; |
1852 | else |
1853 | pkt_type = QED_IWARP_MPA_PKT_PARTIAL; |
1854 | |
1855 | out: |
1856 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1857 | "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n" , |
1858 | pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); |
1859 | |
1860 | return pkt_type; |
1861 | } |
1862 | |
1863 | static void |
1864 | qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, |
1865 | struct qed_iwarp_fpdu *fpdu, |
1866 | struct unaligned_opaque_data *pkt_data, |
1867 | u16 tcp_payload_size, u8 placement_offset) |
1868 | { |
1869 | u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); |
1870 | |
1871 | fpdu->mpa_buf = buf; |
1872 | fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; |
1873 | fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; |
1874 | fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset; |
1875 | fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset; |
1876 | |
1877 | if (tcp_payload_size == 1) |
1878 | fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; |
1879 | else if (tcp_payload_size < fpdu->fpdu_length) |
1880 | fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; |
1881 | else |
1882 | fpdu->incomplete_bytes = 0; /* complete fpdu */ |
1883 | |
1884 | fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; |
1885 | } |
1886 | |
1887 | static int |
1888 | qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, |
1889 | struct qed_iwarp_fpdu *fpdu, |
1890 | struct unaligned_opaque_data *pkt_data, |
1891 | struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) |
1892 | { |
1893 | u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); |
1894 | u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; |
1895 | int rc; |
1896 | |
1897 | /* need to copy the data from the partial packet stored in fpdu |
1898 | * to the new buf, for this we also need to move the data currently |
1899 | * placed on the buf. The assumption is that the buffer is big enough |
1900 | * since fpdu_length <= mss, we use an intermediate buffer since |
1901 | * we may need to copy the new data to an overlapping location |
1902 | */ |
1903 | if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { |
1904 | DP_ERR(p_hwfn, |
1905 | "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n" , |
1906 | buf->buff_size, fpdu->mpa_frag_len, |
1907 | tcp_payload_size, fpdu->incomplete_bytes); |
1908 | return -EINVAL; |
1909 | } |
1910 | |
1911 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1912 | "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n" , |
1913 | fpdu->mpa_frag_virt, fpdu->mpa_frag_len, |
1914 | (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); |
1915 | |
1916 | memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); |
1917 | memcpy(tmp_buf + fpdu->mpa_frag_len, |
1918 | (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); |
1919 | |
1920 | rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, buf: fpdu->mpa_buf); |
1921 | if (rc) |
1922 | return rc; |
1923 | |
1924 | /* If we managed to post the buffer copy the data to the new buffer |
1925 | * o/w this will occur in the next round... |
1926 | */ |
1927 | memcpy((u8 *)(buf->data), tmp_buf, |
1928 | fpdu->mpa_frag_len + tcp_payload_size); |
1929 | |
1930 | fpdu->mpa_buf = buf; |
1931 | /* fpdu->pkt_hdr remains as is */ |
1932 | /* fpdu->mpa_frag is overridden with new buf */ |
1933 | fpdu->mpa_frag = buf->data_phys_addr; |
1934 | fpdu->mpa_frag_virt = buf->data; |
1935 | fpdu->mpa_frag_len += tcp_payload_size; |
1936 | |
1937 | fpdu->incomplete_bytes -= tcp_payload_size; |
1938 | |
1939 | DP_VERBOSE(p_hwfn, |
1940 | QED_MSG_RDMA, |
1941 | "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n" , |
1942 | buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, |
1943 | fpdu->incomplete_bytes); |
1944 | |
1945 | return 0; |
1946 | } |
1947 | |
1948 | static void |
1949 | qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, |
1950 | struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) |
1951 | { |
1952 | u16 mpa_len; |
1953 | |
1954 | /* Update incomplete packets if needed */ |
1955 | if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) { |
1956 | /* Missing lower byte is now available */ |
1957 | mpa_len = fpdu->fpdu_length | *mpa_data; |
1958 | fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); |
1959 | /* one byte of hdr */ |
1960 | fpdu->mpa_frag_len = 1; |
1961 | fpdu->incomplete_bytes = fpdu->fpdu_length - 1; |
1962 | DP_VERBOSE(p_hwfn, |
1963 | QED_MSG_RDMA, |
1964 | "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n" , |
1965 | mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); |
1966 | } |
1967 | } |
1968 | |
1969 | #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ |
1970 | (GET_FIELD((_curr_pkt)->flags, \ |
1971 | UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) |
1972 | |
1973 | /* This function is used to recycle a buffer using the ll2 drop option. It |
1974 | * uses the mechanism to ensure that all buffers posted to tx before this one |
1975 | * were completed. The buffer sent here will be sent as a cookie in the tx |
1976 | * completion function and can then be reposted to rx chain when done. The flow |
1977 | * that requires this is the flow where a FPDU splits over more than 3 tcp |
1978 | * segments. In this case the driver needs to re-post a rx buffer instead of |
1979 | * the one received, but driver can't simply repost a buffer it copied from |
1980 | * as there is a case where the buffer was originally a packed FPDU, and is |
1981 | * partially posted to FW. Driver needs to ensure FW is done with it. |
1982 | */ |
1983 | static int |
1984 | qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, |
1985 | struct qed_iwarp_fpdu *fpdu, |
1986 | struct qed_iwarp_ll2_buff *buf) |
1987 | { |
1988 | struct qed_ll2_tx_pkt_info tx_pkt; |
1989 | u8 ll2_handle; |
1990 | int rc; |
1991 | |
1992 | memset(&tx_pkt, 0, sizeof(tx_pkt)); |
1993 | tx_pkt.num_of_bds = 1; |
1994 | tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; |
1995 | tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; |
1996 | tx_pkt.first_frag = fpdu->pkt_hdr; |
1997 | tx_pkt.first_frag_len = fpdu->pkt_hdr_size; |
1998 | buf->piggy_buf = NULL; |
1999 | tx_pkt.cookie = buf; |
2000 | |
2001 | ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; |
2002 | |
2003 | rc = qed_ll2_prepare_tx_packet(cxt: p_hwfn, connection_handle: ll2_handle, pkt: &tx_pkt, notify_fw: true); |
2004 | if (rc) |
2005 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2006 | "Can't drop packet rc=%d\n" , rc); |
2007 | |
2008 | DP_VERBOSE(p_hwfn, |
2009 | QED_MSG_RDMA, |
2010 | "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n" , |
2011 | (unsigned long int)tx_pkt.first_frag, |
2012 | tx_pkt.first_frag_len, buf, rc); |
2013 | |
2014 | return rc; |
2015 | } |
2016 | |
2017 | static int |
2018 | qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu) |
2019 | { |
2020 | struct qed_ll2_tx_pkt_info tx_pkt; |
2021 | u8 ll2_handle; |
2022 | int rc; |
2023 | |
2024 | memset(&tx_pkt, 0, sizeof(tx_pkt)); |
2025 | tx_pkt.num_of_bds = 1; |
2026 | tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; |
2027 | tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; |
2028 | |
2029 | tx_pkt.first_frag = fpdu->pkt_hdr; |
2030 | tx_pkt.first_frag_len = fpdu->pkt_hdr_size; |
2031 | tx_pkt.enable_ip_cksum = true; |
2032 | tx_pkt.enable_l4_cksum = true; |
2033 | tx_pkt.calc_ip_len = true; |
2034 | /* vlan overload with enum iwarp_ll2_tx_queues */ |
2035 | tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; |
2036 | |
2037 | ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; |
2038 | |
2039 | rc = qed_ll2_prepare_tx_packet(cxt: p_hwfn, connection_handle: ll2_handle, pkt: &tx_pkt, notify_fw: true); |
2040 | if (rc) |
2041 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2042 | "Can't send right edge rc=%d\n" , rc); |
2043 | DP_VERBOSE(p_hwfn, |
2044 | QED_MSG_RDMA, |
2045 | "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n" , |
2046 | tx_pkt.num_of_bds, |
2047 | (unsigned long int)tx_pkt.first_frag, |
2048 | tx_pkt.first_frag_len, rc); |
2049 | |
2050 | return rc; |
2051 | } |
2052 | |
2053 | static int |
2054 | qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, |
2055 | struct qed_iwarp_fpdu *fpdu, |
2056 | struct unaligned_opaque_data *curr_pkt, |
2057 | struct qed_iwarp_ll2_buff *buf, |
2058 | u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) |
2059 | { |
2060 | struct qed_ll2_tx_pkt_info tx_pkt; |
2061 | u16 first_mpa_offset; |
2062 | u8 ll2_handle; |
2063 | int rc; |
2064 | |
2065 | memset(&tx_pkt, 0, sizeof(tx_pkt)); |
2066 | |
2067 | /* An unaligned packet means it's split over two tcp segments. So the |
2068 | * complete packet requires 3 bds, one for the header, one for the |
2069 | * part of the fpdu of the first tcp segment, and the last fragment |
2070 | * will point to the remainder of the fpdu. A packed pdu, requires only |
2071 | * two bds, one for the header and one for the data. |
2072 | */ |
2073 | tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; |
2074 | tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; |
2075 | tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */ |
2076 | |
2077 | /* Send the mpa_buf only with the last fpdu (in case of packed) */ |
2078 | if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED || |
2079 | tcp_payload_size <= fpdu->fpdu_length) |
2080 | tx_pkt.cookie = fpdu->mpa_buf; |
2081 | |
2082 | tx_pkt.first_frag = fpdu->pkt_hdr; |
2083 | tx_pkt.first_frag_len = fpdu->pkt_hdr_size; |
2084 | tx_pkt.enable_ip_cksum = true; |
2085 | tx_pkt.enable_l4_cksum = true; |
2086 | tx_pkt.calc_ip_len = true; |
2087 | /* vlan overload with enum iwarp_ll2_tx_queues */ |
2088 | tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; |
2089 | |
2090 | /* special case of unaligned packet and not packed, need to send |
2091 | * both buffers as cookie to release. |
2092 | */ |
2093 | if (tcp_payload_size == fpdu->incomplete_bytes) |
2094 | fpdu->mpa_buf->piggy_buf = buf; |
2095 | |
2096 | ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; |
2097 | |
2098 | /* Set first fragment to header */ |
2099 | rc = qed_ll2_prepare_tx_packet(cxt: p_hwfn, connection_handle: ll2_handle, pkt: &tx_pkt, notify_fw: true); |
2100 | if (rc) |
2101 | goto out; |
2102 | |
2103 | /* Set second fragment to first part of packet */ |
2104 | rc = qed_ll2_set_fragment_of_tx_packet(cxt: p_hwfn, connection_handle: ll2_handle, |
2105 | addr: fpdu->mpa_frag, |
2106 | nbytes: fpdu->mpa_frag_len); |
2107 | if (rc) |
2108 | goto out; |
2109 | |
2110 | if (!fpdu->incomplete_bytes) |
2111 | goto out; |
2112 | |
2113 | first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); |
2114 | |
2115 | /* Set third fragment to second part of the packet */ |
2116 | rc = qed_ll2_set_fragment_of_tx_packet(cxt: p_hwfn, |
2117 | connection_handle: ll2_handle, |
2118 | addr: buf->data_phys_addr + |
2119 | first_mpa_offset, |
2120 | nbytes: fpdu->incomplete_bytes); |
2121 | out: |
2122 | DP_VERBOSE(p_hwfn, |
2123 | QED_MSG_RDMA, |
2124 | "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n" , |
2125 | tx_pkt.num_of_bds, |
2126 | tx_pkt.first_frag_len, |
2127 | fpdu->mpa_frag_len, |
2128 | fpdu->incomplete_bytes, rc); |
2129 | |
2130 | return rc; |
2131 | } |
2132 | |
2133 | static void |
2134 | qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, |
2135 | struct unaligned_opaque_data *curr_pkt, |
2136 | u32 opaque_data0, u32 opaque_data1) |
2137 | { |
2138 | u64 opaque_data; |
2139 | |
2140 | opaque_data = HILO_64(cpu_to_le32(opaque_data1), |
2141 | cpu_to_le32(opaque_data0)); |
2142 | *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); |
2143 | |
2144 | le16_add_cpu(var: &curr_pkt->first_mpa_offset, |
2145 | val: curr_pkt->tcp_payload_offset); |
2146 | } |
2147 | |
2148 | /* This function is called when an unaligned or incomplete MPA packet arrives |
2149 | * driver needs to align the packet, perhaps using previous data and send |
2150 | * it down to FW once it is aligned. |
2151 | */ |
2152 | static int |
2153 | qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, |
2154 | struct qed_iwarp_ll2_mpa_buf *mpa_buf) |
2155 | { |
2156 | struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; |
2157 | struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; |
2158 | enum qed_iwarp_mpa_pkt_type pkt_type; |
2159 | struct qed_iwarp_fpdu *fpdu; |
2160 | u16 cid, first_mpa_offset; |
2161 | int rc = -EINVAL; |
2162 | u8 *mpa_data; |
2163 | |
2164 | cid = le32_to_cpu(curr_pkt->cid); |
2165 | |
2166 | fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, cid: (u16)cid); |
2167 | if (!fpdu) { /* something corrupt with cid, post rx back */ |
2168 | DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n" , |
2169 | cid); |
2170 | goto err; |
2171 | } |
2172 | |
2173 | do { |
2174 | first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); |
2175 | mpa_data = ((u8 *)(buf->data) + first_mpa_offset); |
2176 | |
2177 | pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, |
2178 | tcp_payload_len: mpa_buf->tcp_payload_len, |
2179 | mpa_data); |
2180 | |
2181 | switch (pkt_type) { |
2182 | case QED_IWARP_MPA_PKT_PARTIAL: |
2183 | qed_iwarp_init_fpdu(buf, fpdu, |
2184 | pkt_data: curr_pkt, |
2185 | tcp_payload_size: mpa_buf->tcp_payload_len, |
2186 | placement_offset: mpa_buf->placement_offset); |
2187 | |
2188 | if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { |
2189 | mpa_buf->tcp_payload_len = 0; |
2190 | break; |
2191 | } |
2192 | |
2193 | rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); |
2194 | |
2195 | if (rc) { |
2196 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2197 | "Can't send FPDU:reset rc=%d\n" , rc); |
2198 | memset(fpdu, 0, sizeof(*fpdu)); |
2199 | break; |
2200 | } |
2201 | |
2202 | mpa_buf->tcp_payload_len = 0; |
2203 | break; |
2204 | case QED_IWARP_MPA_PKT_PACKED: |
2205 | qed_iwarp_init_fpdu(buf, fpdu, |
2206 | pkt_data: curr_pkt, |
2207 | tcp_payload_size: mpa_buf->tcp_payload_len, |
2208 | placement_offset: mpa_buf->placement_offset); |
2209 | |
2210 | rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, |
2211 | tcp_payload_size: mpa_buf->tcp_payload_len, |
2212 | pkt_type); |
2213 | if (rc) { |
2214 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2215 | "Can't send FPDU:reset rc=%d\n" , rc); |
2216 | memset(fpdu, 0, sizeof(*fpdu)); |
2217 | break; |
2218 | } |
2219 | |
2220 | mpa_buf->tcp_payload_len -= fpdu->fpdu_length; |
2221 | le16_add_cpu(var: &curr_pkt->first_mpa_offset, |
2222 | val: fpdu->fpdu_length); |
2223 | break; |
2224 | case QED_IWARP_MPA_PKT_UNALIGNED: |
2225 | qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); |
2226 | if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { |
2227 | /* special handling of fpdu split over more |
2228 | * than 2 segments |
2229 | */ |
2230 | if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { |
2231 | rc = qed_iwarp_win_right_edge(p_hwfn, |
2232 | fpdu); |
2233 | /* packet will be re-processed later */ |
2234 | if (rc) |
2235 | return rc; |
2236 | } |
2237 | |
2238 | rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, pkt_data: curr_pkt, |
2239 | buf, |
2240 | tcp_payload_size: mpa_buf->tcp_payload_len); |
2241 | if (rc) /* packet will be re-processed later */ |
2242 | return rc; |
2243 | |
2244 | mpa_buf->tcp_payload_len = 0; |
2245 | break; |
2246 | } |
2247 | |
2248 | rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, |
2249 | tcp_payload_size: mpa_buf->tcp_payload_len, |
2250 | pkt_type); |
2251 | if (rc) { |
2252 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2253 | "Can't send FPDU:delay rc=%d\n" , rc); |
2254 | /* don't reset fpdu -> we need it for next |
2255 | * classify |
2256 | */ |
2257 | break; |
2258 | } |
2259 | |
2260 | mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; |
2261 | le16_add_cpu(var: &curr_pkt->first_mpa_offset, |
2262 | val: fpdu->incomplete_bytes); |
2263 | |
2264 | /* The framed PDU was sent - no more incomplete bytes */ |
2265 | fpdu->incomplete_bytes = 0; |
2266 | break; |
2267 | } |
2268 | } while (mpa_buf->tcp_payload_len && !rc); |
2269 | |
2270 | return rc; |
2271 | |
2272 | err: |
2273 | qed_iwarp_ll2_post_rx(p_hwfn, |
2274 | buf, |
2275 | handle: p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); |
2276 | return rc; |
2277 | } |
2278 | |
2279 | static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) |
2280 | { |
2281 | struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
2282 | struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL; |
2283 | int rc; |
2284 | |
2285 | while (!list_empty(head: &iwarp_info->mpa_buf_pending_list)) { |
2286 | mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list, |
2287 | struct qed_iwarp_ll2_mpa_buf, |
2288 | list_entry); |
2289 | |
2290 | rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); |
2291 | |
2292 | /* busy means break and continue processing later, don't |
2293 | * remove the buf from the pending list. |
2294 | */ |
2295 | if (rc == -EBUSY) |
2296 | break; |
2297 | |
2298 | list_move_tail(list: &mpa_buf->list_entry, |
2299 | head: &iwarp_info->mpa_buf_list); |
2300 | |
2301 | if (rc) { /* different error, don't continue */ |
2302 | DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n" , rc); |
2303 | break; |
2304 | } |
2305 | } |
2306 | } |
2307 | |
2308 | static void |
2309 | qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) |
2310 | { |
2311 | struct qed_iwarp_ll2_mpa_buf *mpa_buf; |
2312 | struct qed_iwarp_info *iwarp_info; |
2313 | struct qed_hwfn *p_hwfn = cxt; |
2314 | u16 first_mpa_offset; |
2315 | |
2316 | iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
2317 | mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, |
2318 | struct qed_iwarp_ll2_mpa_buf, list_entry); |
2319 | if (!mpa_buf) { |
2320 | DP_ERR(p_hwfn, "No free mpa buf\n" ); |
2321 | goto err; |
2322 | } |
2323 | |
2324 | list_del(entry: &mpa_buf->list_entry); |
2325 | qed_iwarp_mpa_get_data(p_hwfn, curr_pkt: &mpa_buf->data, |
2326 | opaque_data0: data->opaque_data_0, opaque_data1: data->opaque_data_1); |
2327 | |
2328 | first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset); |
2329 | |
2330 | DP_VERBOSE(p_hwfn, |
2331 | QED_MSG_RDMA, |
2332 | "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n" , |
2333 | data->length.packet_length, first_mpa_offset, |
2334 | mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, |
2335 | mpa_buf->data.cid); |
2336 | |
2337 | mpa_buf->ll2_buf = data->cookie; |
2338 | mpa_buf->tcp_payload_len = data->length.packet_length - |
2339 | first_mpa_offset; |
2340 | |
2341 | first_mpa_offset += data->u.placement_offset; |
2342 | mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset); |
2343 | mpa_buf->placement_offset = data->u.placement_offset; |
2344 | |
2345 | list_add_tail(new: &mpa_buf->list_entry, head: &iwarp_info->mpa_buf_pending_list); |
2346 | |
2347 | qed_iwarp_process_pending_pkts(p_hwfn); |
2348 | return; |
2349 | err: |
2350 | qed_iwarp_ll2_post_rx(p_hwfn, buf: data->cookie, |
2351 | handle: iwarp_info->ll2_mpa_handle); |
2352 | } |
2353 | |
2354 | static void |
2355 | qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) |
2356 | { |
2357 | struct qed_iwarp_ll2_buff *buf = data->cookie; |
2358 | struct qed_iwarp_listener *listener; |
2359 | struct qed_ll2_tx_pkt_info tx_pkt; |
2360 | struct qed_iwarp_cm_info cm_info; |
2361 | struct qed_hwfn *p_hwfn = cxt; |
2362 | u8 remote_mac_addr[ETH_ALEN]; |
2363 | u8 local_mac_addr[ETH_ALEN]; |
2364 | struct qed_iwarp_ep *ep; |
2365 | int tcp_start_offset; |
2366 | u8 ll2_syn_handle; |
2367 | int payload_len; |
2368 | u32 hdr_size; |
2369 | int rc; |
2370 | |
2371 | memset(&cm_info, 0, sizeof(cm_info)); |
2372 | ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; |
2373 | |
2374 | /* Check if packet was received with errors... */ |
2375 | if (data->err_flags) { |
2376 | DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n" , |
2377 | data->err_flags); |
2378 | goto err; |
2379 | } |
2380 | |
2381 | if (GET_FIELD(data->parse_flags, |
2382 | PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && |
2383 | GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { |
2384 | DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n" ); |
2385 | goto err; |
2386 | } |
2387 | |
2388 | rc = qed_iwarp_parse_rx_pkt(p_hwfn, cm_info: &cm_info, buf: (u8 *)(buf->data) + |
2389 | data->u.placement_offset, remote_mac_addr, |
2390 | local_mac_addr, payload_len: &payload_len, |
2391 | tcp_start_offset: &tcp_start_offset); |
2392 | if (rc) |
2393 | goto err; |
2394 | |
2395 | /* Check if there is a listener for this 4-tuple+vlan */ |
2396 | listener = qed_iwarp_get_listener(p_hwfn, cm_info: &cm_info); |
2397 | if (!listener) { |
2398 | DP_VERBOSE(p_hwfn, |
2399 | QED_MSG_RDMA, |
2400 | "SYN received on tuple not listened on parse_flags=%d packet len=%d\n" , |
2401 | data->parse_flags, data->length.packet_length); |
2402 | |
2403 | memset(&tx_pkt, 0, sizeof(tx_pkt)); |
2404 | tx_pkt.num_of_bds = 1; |
2405 | tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; |
2406 | tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; |
2407 | tx_pkt.first_frag = buf->data_phys_addr + |
2408 | data->u.placement_offset; |
2409 | tx_pkt.first_frag_len = data->length.packet_length; |
2410 | tx_pkt.cookie = buf; |
2411 | |
2412 | rc = qed_ll2_prepare_tx_packet(cxt: p_hwfn, connection_handle: ll2_syn_handle, |
2413 | pkt: &tx_pkt, notify_fw: true); |
2414 | |
2415 | if (rc) { |
2416 | DP_NOTICE(p_hwfn, |
2417 | "Can't post SYN back to chip rc=%d\n" , rc); |
2418 | goto err; |
2419 | } |
2420 | return; |
2421 | } |
2422 | |
2423 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n" ); |
2424 | /* There may be an open ep on this connection if this is a syn |
2425 | * retrasnmit... need to make sure there isn't... |
2426 | */ |
2427 | if (qed_iwarp_ep_exists(p_hwfn, cm_info: &cm_info)) |
2428 | goto err; |
2429 | |
2430 | ep = qed_iwarp_get_free_ep(p_hwfn); |
2431 | if (!ep) |
2432 | goto err; |
2433 | |
2434 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
2435 | list_add_tail(new: &ep->list_entry, head: &p_hwfn->p_rdma_info->iwarp.ep_list); |
2436 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
2437 | |
2438 | ether_addr_copy(dst: ep->remote_mac_addr, src: remote_mac_addr); |
2439 | ether_addr_copy(dst: ep->local_mac_addr, src: local_mac_addr); |
2440 | |
2441 | memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); |
2442 | |
2443 | hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); |
2444 | ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; |
2445 | ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); |
2446 | |
2447 | ep->event_cb = listener->event_cb; |
2448 | ep->cb_context = listener->cb_context; |
2449 | ep->connect_mode = TCP_CONNECT_PASSIVE; |
2450 | |
2451 | ep->syn = buf; |
2452 | ep->syn_ip_payload_length = (u16)payload_len; |
2453 | ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + |
2454 | tcp_start_offset; |
2455 | |
2456 | rc = qed_iwarp_tcp_offload(p_hwfn, ep); |
2457 | if (rc) { |
2458 | qed_iwarp_return_ep(p_hwfn, ep); |
2459 | goto err; |
2460 | } |
2461 | |
2462 | return; |
2463 | err: |
2464 | qed_iwarp_ll2_post_rx(p_hwfn, buf, handle: ll2_syn_handle); |
2465 | } |
2466 | |
2467 | static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, |
2468 | void *cookie, dma_addr_t rx_buf_addr, |
2469 | bool b_last_packet) |
2470 | { |
2471 | struct qed_iwarp_ll2_buff *buffer = cookie; |
2472 | struct qed_hwfn *p_hwfn = cxt; |
2473 | |
2474 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, size: buffer->buff_size, |
2475 | cpu_addr: buffer->data, dma_handle: buffer->data_phys_addr); |
2476 | kfree(objp: buffer); |
2477 | } |
2478 | |
2479 | static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, |
2480 | void *cookie, dma_addr_t first_frag_addr, |
2481 | bool b_last_fragment, bool b_last_packet) |
2482 | { |
2483 | struct qed_iwarp_ll2_buff *buffer = cookie; |
2484 | struct qed_iwarp_ll2_buff *piggy; |
2485 | struct qed_hwfn *p_hwfn = cxt; |
2486 | |
2487 | if (!buffer) /* can happen in packed mpa unaligned... */ |
2488 | return; |
2489 | |
2490 | /* this was originally an rx packet, post it back */ |
2491 | piggy = buffer->piggy_buf; |
2492 | if (piggy) { |
2493 | buffer->piggy_buf = NULL; |
2494 | qed_iwarp_ll2_post_rx(p_hwfn, buf: piggy, handle: connection_handle); |
2495 | } |
2496 | |
2497 | qed_iwarp_ll2_post_rx(p_hwfn, buf: buffer, handle: connection_handle); |
2498 | |
2499 | if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) |
2500 | qed_iwarp_process_pending_pkts(p_hwfn); |
2501 | |
2502 | return; |
2503 | } |
2504 | |
2505 | static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, |
2506 | void *cookie, dma_addr_t first_frag_addr, |
2507 | bool b_last_fragment, bool b_last_packet) |
2508 | { |
2509 | struct qed_iwarp_ll2_buff *buffer = cookie; |
2510 | struct qed_hwfn *p_hwfn = cxt; |
2511 | |
2512 | if (!buffer) |
2513 | return; |
2514 | |
2515 | if (buffer->piggy_buf) { |
2516 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
2517 | size: buffer->piggy_buf->buff_size, |
2518 | cpu_addr: buffer->piggy_buf->data, |
2519 | dma_handle: buffer->piggy_buf->data_phys_addr); |
2520 | |
2521 | kfree(objp: buffer->piggy_buf); |
2522 | } |
2523 | |
2524 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, size: buffer->buff_size, |
2525 | cpu_addr: buffer->data, dma_handle: buffer->data_phys_addr); |
2526 | |
2527 | kfree(objp: buffer); |
2528 | } |
2529 | |
2530 | /* The only slowpath for iwarp ll2 is unalign flush. When this completion |
2531 | * is received, need to reset the FPDU. |
2532 | */ |
2533 | static void |
2534 | qed_iwarp_ll2_slowpath(void *cxt, |
2535 | u8 connection_handle, |
2536 | u32 opaque_data_0, u32 opaque_data_1) |
2537 | { |
2538 | struct unaligned_opaque_data unalign_data; |
2539 | struct qed_hwfn *p_hwfn = cxt; |
2540 | struct qed_iwarp_fpdu *fpdu; |
2541 | u32 cid; |
2542 | |
2543 | qed_iwarp_mpa_get_data(p_hwfn, curr_pkt: &unalign_data, |
2544 | opaque_data0: opaque_data_0, opaque_data1: opaque_data_1); |
2545 | |
2546 | cid = le32_to_cpu(unalign_data.cid); |
2547 | |
2548 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n" , cid); |
2549 | |
2550 | fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, cid: (u16)cid); |
2551 | if (fpdu) |
2552 | memset(fpdu, 0, sizeof(*fpdu)); |
2553 | } |
2554 | |
2555 | static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn) |
2556 | { |
2557 | struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
2558 | int rc = 0; |
2559 | |
2560 | if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) { |
2561 | rc = qed_ll2_terminate_connection(cxt: p_hwfn, |
2562 | connection_handle: iwarp_info->ll2_syn_handle); |
2563 | if (rc) |
2564 | DP_INFO(p_hwfn, "Failed to terminate syn connection\n" ); |
2565 | |
2566 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: iwarp_info->ll2_syn_handle); |
2567 | iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; |
2568 | } |
2569 | |
2570 | if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) { |
2571 | rc = qed_ll2_terminate_connection(cxt: p_hwfn, |
2572 | connection_handle: iwarp_info->ll2_ooo_handle); |
2573 | if (rc) |
2574 | DP_INFO(p_hwfn, "Failed to terminate ooo connection\n" ); |
2575 | |
2576 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: iwarp_info->ll2_ooo_handle); |
2577 | iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; |
2578 | } |
2579 | |
2580 | if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) { |
2581 | rc = qed_ll2_terminate_connection(cxt: p_hwfn, |
2582 | connection_handle: iwarp_info->ll2_mpa_handle); |
2583 | if (rc) |
2584 | DP_INFO(p_hwfn, "Failed to terminate mpa connection\n" ); |
2585 | |
2586 | qed_ll2_release_connection(cxt: p_hwfn, connection_handle: iwarp_info->ll2_mpa_handle); |
2587 | iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; |
2588 | } |
2589 | |
2590 | qed_llh_remove_mac_filter(cdev: p_hwfn->cdev, ppfid: 0, |
2591 | mac_addr: p_hwfn->p_rdma_info->iwarp.mac_addr); |
2592 | |
2593 | return rc; |
2594 | } |
2595 | |
2596 | static int |
2597 | qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn, |
2598 | int num_rx_bufs, int buff_size, u8 ll2_handle) |
2599 | { |
2600 | struct qed_iwarp_ll2_buff *buffer; |
2601 | int rc = 0; |
2602 | int i; |
2603 | |
2604 | for (i = 0; i < num_rx_bufs; i++) { |
2605 | buffer = kzalloc(size: sizeof(*buffer), GFP_KERNEL); |
2606 | if (!buffer) { |
2607 | rc = -ENOMEM; |
2608 | break; |
2609 | } |
2610 | |
2611 | buffer->data = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
2612 | size: buff_size, |
2613 | dma_handle: &buffer->data_phys_addr, |
2614 | GFP_KERNEL); |
2615 | if (!buffer->data) { |
2616 | kfree(objp: buffer); |
2617 | rc = -ENOMEM; |
2618 | break; |
2619 | } |
2620 | |
2621 | buffer->buff_size = buff_size; |
2622 | rc = qed_iwarp_ll2_post_rx(p_hwfn, buf: buffer, handle: ll2_handle); |
2623 | if (rc) |
2624 | /* buffers will be deallocated by qed_ll2 */ |
2625 | break; |
2626 | } |
2627 | return rc; |
2628 | } |
2629 | |
2630 | #define QED_IWARP_MAX_BUF_SIZE(mtu) \ |
2631 | ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \ |
2632 | ETH_CACHE_LINE_SIZE) |
2633 | |
2634 | static int |
2635 | qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, |
2636 | struct qed_rdma_start_in_params *params, |
2637 | u32 rcv_wnd_size) |
2638 | { |
2639 | struct qed_iwarp_info *iwarp_info; |
2640 | struct qed_ll2_acquire_data data; |
2641 | struct qed_ll2_cbs cbs; |
2642 | u32 buff_size; |
2643 | u16 n_ooo_bufs; |
2644 | int rc = 0; |
2645 | int i; |
2646 | |
2647 | iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
2648 | iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; |
2649 | iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; |
2650 | iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; |
2651 | |
2652 | iwarp_info->max_mtu = params->max_mtu; |
2653 | |
2654 | ether_addr_copy(dst: p_hwfn->p_rdma_info->iwarp.mac_addr, src: params->mac_addr); |
2655 | |
2656 | rc = qed_llh_add_mac_filter(cdev: p_hwfn->cdev, ppfid: 0, mac_addr: params->mac_addr); |
2657 | if (rc) |
2658 | return rc; |
2659 | |
2660 | /* Start SYN connection */ |
2661 | cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; |
2662 | cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; |
2663 | cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; |
2664 | cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; |
2665 | cbs.slowpath_cb = NULL; |
2666 | cbs.cookie = p_hwfn; |
2667 | |
2668 | memset(&data, 0, sizeof(data)); |
2669 | data.input.conn_type = QED_LL2_TYPE_IWARP; |
2670 | /* SYN will use ctx based queues */ |
2671 | data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX; |
2672 | data.input.mtu = params->max_mtu; |
2673 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; |
2674 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; |
2675 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ |
2676 | data.input.tx_tc = PKT_LB_TC; |
2677 | data.input.tx_dest = QED_LL2_TX_DEST_LB; |
2678 | data.p_connection_handle = &iwarp_info->ll2_syn_handle; |
2679 | data.cbs = &cbs; |
2680 | |
2681 | rc = qed_ll2_acquire_connection(cxt: p_hwfn, data: &data); |
2682 | if (rc) { |
2683 | DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n" ); |
2684 | qed_llh_remove_mac_filter(cdev: p_hwfn->cdev, ppfid: 0, mac_addr: params->mac_addr); |
2685 | return rc; |
2686 | } |
2687 | |
2688 | rc = qed_ll2_establish_connection(cxt: p_hwfn, connection_handle: iwarp_info->ll2_syn_handle); |
2689 | if (rc) { |
2690 | DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n" ); |
2691 | goto err; |
2692 | } |
2693 | |
2694 | buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); |
2695 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2696 | QED_IWARP_LL2_SYN_RX_SIZE, |
2697 | buff_size, |
2698 | ll2_handle: iwarp_info->ll2_syn_handle); |
2699 | if (rc) |
2700 | goto err; |
2701 | |
2702 | /* Start OOO connection */ |
2703 | data.input.conn_type = QED_LL2_TYPE_OOO; |
2704 | /* OOO/unaligned will use legacy ll2 queues (ram based) */ |
2705 | data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY; |
2706 | data.input.mtu = params->max_mtu; |
2707 | |
2708 | n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) / |
2709 | iwarp_info->max_mtu; |
2710 | n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE); |
2711 | |
2712 | data.input.rx_num_desc = n_ooo_bufs; |
2713 | data.input.rx_num_ooo_buffers = n_ooo_bufs; |
2714 | |
2715 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ |
2716 | data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE; |
2717 | data.p_connection_handle = &iwarp_info->ll2_ooo_handle; |
2718 | |
2719 | rc = qed_ll2_acquire_connection(cxt: p_hwfn, data: &data); |
2720 | if (rc) |
2721 | goto err; |
2722 | |
2723 | rc = qed_ll2_establish_connection(cxt: p_hwfn, connection_handle: iwarp_info->ll2_ooo_handle); |
2724 | if (rc) |
2725 | goto err; |
2726 | |
2727 | /* Start Unaligned MPA connection */ |
2728 | cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; |
2729 | cbs.slowpath_cb = qed_iwarp_ll2_slowpath; |
2730 | |
2731 | memset(&data, 0, sizeof(data)); |
2732 | data.input.conn_type = QED_LL2_TYPE_IWARP; |
2733 | data.input.mtu = params->max_mtu; |
2734 | /* FW requires that once a packet arrives OOO, it must have at |
2735 | * least 2 rx buffers available on the unaligned connection |
2736 | * for handling the case that it is a partial fpdu. |
2737 | */ |
2738 | data.input.rx_num_desc = n_ooo_bufs * 2; |
2739 | data.input.tx_num_desc = data.input.rx_num_desc; |
2740 | data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; |
2741 | data.input.tx_tc = PKT_LB_TC; |
2742 | data.input.tx_dest = QED_LL2_TX_DEST_LB; |
2743 | data.p_connection_handle = &iwarp_info->ll2_mpa_handle; |
2744 | data.input.secondary_queue = true; |
2745 | data.cbs = &cbs; |
2746 | |
2747 | rc = qed_ll2_acquire_connection(cxt: p_hwfn, data: &data); |
2748 | if (rc) |
2749 | goto err; |
2750 | |
2751 | rc = qed_ll2_establish_connection(cxt: p_hwfn, connection_handle: iwarp_info->ll2_mpa_handle); |
2752 | if (rc) |
2753 | goto err; |
2754 | |
2755 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2756 | num_rx_bufs: data.input.rx_num_desc, |
2757 | buff_size, |
2758 | ll2_handle: iwarp_info->ll2_mpa_handle); |
2759 | if (rc) |
2760 | goto err; |
2761 | |
2762 | iwarp_info->partial_fpdus = kcalloc(n: (u16)p_hwfn->p_rdma_info->num_qps, |
2763 | size: sizeof(*iwarp_info->partial_fpdus), |
2764 | GFP_KERNEL); |
2765 | if (!iwarp_info->partial_fpdus) { |
2766 | rc = -ENOMEM; |
2767 | goto err; |
2768 | } |
2769 | |
2770 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; |
2771 | |
2772 | iwarp_info->mpa_intermediate_buf = kzalloc(size: buff_size, GFP_KERNEL); |
2773 | if (!iwarp_info->mpa_intermediate_buf) { |
2774 | rc = -ENOMEM; |
2775 | goto err; |
2776 | } |
2777 | |
2778 | /* The mpa_bufs array serves for pending RX packets received on the |
2779 | * mpa ll2 that don't have place on the tx ring and require later |
2780 | * processing. We can't fail on allocation of such a struct therefore |
2781 | * we allocate enough to take care of all rx packets |
2782 | */ |
2783 | iwarp_info->mpa_bufs = kcalloc(n: data.input.rx_num_desc, |
2784 | size: sizeof(*iwarp_info->mpa_bufs), |
2785 | GFP_KERNEL); |
2786 | if (!iwarp_info->mpa_bufs) { |
2787 | rc = -ENOMEM; |
2788 | goto err; |
2789 | } |
2790 | |
2791 | INIT_LIST_HEAD(list: &iwarp_info->mpa_buf_pending_list); |
2792 | INIT_LIST_HEAD(list: &iwarp_info->mpa_buf_list); |
2793 | for (i = 0; i < data.input.rx_num_desc; i++) |
2794 | list_add_tail(new: &iwarp_info->mpa_bufs[i].list_entry, |
2795 | head: &iwarp_info->mpa_buf_list); |
2796 | return rc; |
2797 | err: |
2798 | qed_iwarp_ll2_stop(p_hwfn); |
2799 | |
2800 | return rc; |
2801 | } |
2802 | |
2803 | static struct { |
2804 | u32 two_ports; |
2805 | u32 four_ports; |
2806 | } qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = { |
2807 | {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P}, |
2808 | {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P} |
2809 | }; |
2810 | |
2811 | int qed_iwarp_setup(struct qed_hwfn *p_hwfn, |
2812 | struct qed_rdma_start_in_params *params) |
2813 | { |
2814 | struct qed_dev *cdev = p_hwfn->cdev; |
2815 | struct qed_iwarp_info *iwarp_info; |
2816 | enum chip_ids chip_id; |
2817 | u32 rcv_wnd_size; |
2818 | |
2819 | iwarp_info = &p_hwfn->p_rdma_info->iwarp; |
2820 | |
2821 | iwarp_info->tcp_flags = QED_IWARP_TS_EN; |
2822 | |
2823 | chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; |
2824 | rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ? |
2825 | qed_iwarp_rcv_wnd_size[chip_id].four_ports : |
2826 | qed_iwarp_rcv_wnd_size[chip_id].two_ports; |
2827 | |
2828 | /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ |
2829 | iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - |
2830 | ilog2(QED_IWARP_RCV_WND_SIZE_MIN); |
2831 | iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; |
2832 | iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; |
2833 | iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; |
2834 | |
2835 | iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; |
2836 | |
2837 | iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | |
2838 | MPA_RTR_TYPE_ZERO_WRITE | |
2839 | MPA_RTR_TYPE_ZERO_READ; |
2840 | |
2841 | spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); |
2842 | INIT_LIST_HEAD(list: &p_hwfn->p_rdma_info->iwarp.ep_list); |
2843 | INIT_LIST_HEAD(list: &p_hwfn->p_rdma_info->iwarp.listen_list); |
2844 | |
2845 | qed_spq_register_async_cb(p_hwfn, protocol_id: PROTOCOLID_IWARP, |
2846 | cb: qed_iwarp_async_event); |
2847 | qed_ooo_setup(p_hwfn); |
2848 | |
2849 | return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size); |
2850 | } |
2851 | |
2852 | int qed_iwarp_stop(struct qed_hwfn *p_hwfn) |
2853 | { |
2854 | int rc; |
2855 | |
2856 | qed_iwarp_free_prealloc_ep(p_hwfn); |
2857 | rc = qed_iwarp_wait_for_all_cids(p_hwfn); |
2858 | if (rc) |
2859 | return rc; |
2860 | |
2861 | return qed_iwarp_ll2_stop(p_hwfn); |
2862 | } |
2863 | |
2864 | static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, |
2865 | struct qed_iwarp_ep *ep, |
2866 | u8 fw_return_code) |
2867 | { |
2868 | struct qed_iwarp_cm_event_params params; |
2869 | |
2870 | qed_iwarp_modify_qp(p_hwfn, qp: ep->qp, new_state: QED_IWARP_QP_STATE_ERROR, internal: true); |
2871 | |
2872 | params.event = QED_IWARP_EVENT_CLOSE; |
2873 | params.ep_context = ep; |
2874 | params.cm_info = &ep->cm_info; |
2875 | params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? |
2876 | 0 : -ECONNRESET; |
2877 | |
2878 | /* paired with READ_ONCE in destroy_qp */ |
2879 | smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); |
2880 | |
2881 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
2882 | list_del(entry: &ep->list_entry); |
2883 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
2884 | |
2885 | ep->event_cb(ep->cb_context, ¶ms); |
2886 | } |
2887 | |
2888 | static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, |
2889 | struct qed_iwarp_ep *ep, |
2890 | int fw_ret_code) |
2891 | { |
2892 | struct qed_iwarp_cm_event_params params; |
2893 | bool event_cb = false; |
2894 | |
2895 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n" , |
2896 | ep->cid, fw_ret_code); |
2897 | |
2898 | switch (fw_ret_code) { |
2899 | case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: |
2900 | params.status = 0; |
2901 | params.event = QED_IWARP_EVENT_DISCONNECT; |
2902 | event_cb = true; |
2903 | break; |
2904 | case IWARP_EXCEPTION_DETECTED_LLP_RESET: |
2905 | params.status = -ECONNRESET; |
2906 | params.event = QED_IWARP_EVENT_DISCONNECT; |
2907 | event_cb = true; |
2908 | break; |
2909 | case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: |
2910 | params.event = QED_IWARP_EVENT_RQ_EMPTY; |
2911 | event_cb = true; |
2912 | break; |
2913 | case IWARP_EXCEPTION_DETECTED_IRQ_FULL: |
2914 | params.event = QED_IWARP_EVENT_IRQ_FULL; |
2915 | event_cb = true; |
2916 | break; |
2917 | case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: |
2918 | params.event = QED_IWARP_EVENT_LLP_TIMEOUT; |
2919 | event_cb = true; |
2920 | break; |
2921 | case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: |
2922 | params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; |
2923 | event_cb = true; |
2924 | break; |
2925 | case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: |
2926 | params.event = QED_IWARP_EVENT_CQ_OVERFLOW; |
2927 | event_cb = true; |
2928 | break; |
2929 | case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: |
2930 | params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; |
2931 | event_cb = true; |
2932 | break; |
2933 | case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: |
2934 | params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; |
2935 | event_cb = true; |
2936 | break; |
2937 | case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: |
2938 | params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; |
2939 | event_cb = true; |
2940 | break; |
2941 | case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: |
2942 | params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; |
2943 | event_cb = true; |
2944 | break; |
2945 | default: |
2946 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2947 | "Unhandled exception received...fw_ret_code=%d\n" , |
2948 | fw_ret_code); |
2949 | break; |
2950 | } |
2951 | |
2952 | if (event_cb) { |
2953 | params.ep_context = ep; |
2954 | params.cm_info = &ep->cm_info; |
2955 | ep->event_cb(ep->cb_context, ¶ms); |
2956 | } |
2957 | } |
2958 | |
2959 | static void |
2960 | qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, |
2961 | struct qed_iwarp_ep *ep, u8 fw_return_code) |
2962 | { |
2963 | struct qed_iwarp_cm_event_params params; |
2964 | |
2965 | memset(¶ms, 0, sizeof(params)); |
2966 | params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; |
2967 | params.ep_context = ep; |
2968 | params.cm_info = &ep->cm_info; |
2969 | /* paired with READ_ONCE in destroy_qp */ |
2970 | smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); |
2971 | |
2972 | switch (fw_return_code) { |
2973 | case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: |
2974 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2975 | "%s(0x%x) TCP connect got invalid packet\n" , |
2976 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); |
2977 | params.status = -ECONNRESET; |
2978 | break; |
2979 | case IWARP_CONN_ERROR_TCP_CONNECTION_RST: |
2980 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
2981 | "%s(0x%x) TCP Connection Reset\n" , |
2982 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); |
2983 | params.status = -ECONNRESET; |
2984 | break; |
2985 | case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: |
2986 | DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n" , |
2987 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); |
2988 | params.status = -EBUSY; |
2989 | break; |
2990 | case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: |
2991 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n" , |
2992 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); |
2993 | params.status = -ECONNREFUSED; |
2994 | break; |
2995 | case IWARP_CONN_ERROR_MPA_INVALID_PACKET: |
2996 | DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n" , |
2997 | QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); |
2998 | params.status = -ECONNRESET; |
2999 | break; |
3000 | default: |
3001 | DP_ERR(p_hwfn, |
3002 | "%s(0x%x) Unexpected return code tcp connect: %d\n" , |
3003 | QED_IWARP_CONNECT_MODE_STRING(ep), |
3004 | ep->tcp_cid, fw_return_code); |
3005 | params.status = -ECONNRESET; |
3006 | break; |
3007 | } |
3008 | |
3009 | if (ep->connect_mode == TCP_CONNECT_PASSIVE) { |
3010 | ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; |
3011 | qed_iwarp_return_ep(p_hwfn, ep); |
3012 | } else { |
3013 | ep->event_cb(ep->cb_context, ¶ms); |
3014 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
3015 | list_del(entry: &ep->list_entry); |
3016 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
3017 | } |
3018 | } |
3019 | |
3020 | static void |
3021 | qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, |
3022 | struct qed_iwarp_ep *ep, u8 fw_return_code) |
3023 | { |
3024 | u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; |
3025 | |
3026 | if (ep->connect_mode == TCP_CONNECT_PASSIVE) { |
3027 | /* Done with the SYN packet, post back to ll2 rx */ |
3028 | qed_iwarp_ll2_post_rx(p_hwfn, buf: ep->syn, handle: ll2_syn_handle); |
3029 | |
3030 | ep->syn = NULL; |
3031 | |
3032 | /* If connect failed - upper layer doesn't know about it */ |
3033 | if (fw_return_code == RDMA_RETURN_OK) |
3034 | qed_iwarp_mpa_received(p_hwfn, ep); |
3035 | else |
3036 | qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, |
3037 | fw_return_code); |
3038 | } else { |
3039 | if (fw_return_code == RDMA_RETURN_OK) |
3040 | qed_iwarp_mpa_offload(p_hwfn, ep); |
3041 | else |
3042 | qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, |
3043 | fw_return_code); |
3044 | } |
3045 | } |
3046 | |
3047 | static inline bool |
3048 | qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) |
3049 | { |
3050 | if (!ep || (ep->sig != QED_EP_SIG)) { |
3051 | DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n" , ep); |
3052 | return false; |
3053 | } |
3054 | |
3055 | return true; |
3056 | } |
3057 | |
3058 | static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, |
3059 | __le16 echo, union event_ring_data *data, |
3060 | u8 fw_return_code) |
3061 | { |
3062 | struct qed_rdma_events events = p_hwfn->p_rdma_info->events; |
3063 | struct regpair *fw_handle = &data->rdma_data.async_handle; |
3064 | struct qed_iwarp_ep *ep = NULL; |
3065 | u16 srq_offset; |
3066 | u16 srq_id; |
3067 | u16 cid; |
3068 | |
3069 | ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, |
3070 | fw_handle->lo); |
3071 | |
3072 | switch (fw_event_code) { |
3073 | case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: |
3074 | /* Async completion after TCP 3-way handshake */ |
3075 | if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) |
3076 | return -EINVAL; |
3077 | DP_VERBOSE(p_hwfn, |
3078 | QED_MSG_RDMA, |
3079 | "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n" , |
3080 | ep->tcp_cid, fw_return_code); |
3081 | qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); |
3082 | break; |
3083 | case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: |
3084 | if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) |
3085 | return -EINVAL; |
3086 | DP_VERBOSE(p_hwfn, |
3087 | QED_MSG_RDMA, |
3088 | "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n" , |
3089 | ep->cid, fw_return_code); |
3090 | qed_iwarp_exception_received(p_hwfn, ep, fw_ret_code: fw_return_code); |
3091 | break; |
3092 | case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: |
3093 | /* Async completion for Close Connection ramrod */ |
3094 | if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) |
3095 | return -EINVAL; |
3096 | DP_VERBOSE(p_hwfn, |
3097 | QED_MSG_RDMA, |
3098 | "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n" , |
3099 | ep->cid, fw_return_code); |
3100 | qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); |
3101 | break; |
3102 | case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: |
3103 | /* Async event for active side only */ |
3104 | if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) |
3105 | return -EINVAL; |
3106 | DP_VERBOSE(p_hwfn, |
3107 | QED_MSG_RDMA, |
3108 | "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n" , |
3109 | ep->cid, fw_return_code); |
3110 | qed_iwarp_mpa_reply_arrived(p_hwfn, ep); |
3111 | break; |
3112 | case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: |
3113 | if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) |
3114 | return -EINVAL; |
3115 | DP_VERBOSE(p_hwfn, |
3116 | QED_MSG_RDMA, |
3117 | "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n" , |
3118 | ep->cid, fw_return_code); |
3119 | qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); |
3120 | break; |
3121 | case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: |
3122 | cid = (u16)le32_to_cpu(fw_handle->lo); |
3123 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
3124 | "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n" , cid); |
3125 | qed_iwarp_cid_cleaned(p_hwfn, cid); |
3126 | |
3127 | break; |
3128 | case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: |
3129 | DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n" ); |
3130 | srq_offset = p_hwfn->p_rdma_info->srq_id_offset; |
3131 | /* FW assigns value that is no greater than u16 */ |
3132 | srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; |
3133 | events.affiliated_event(events.context, |
3134 | QED_IWARP_EVENT_SRQ_EMPTY, |
3135 | &srq_id); |
3136 | break; |
3137 | case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: |
3138 | DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n" ); |
3139 | srq_offset = p_hwfn->p_rdma_info->srq_id_offset; |
3140 | /* FW assigns value that is no greater than u16 */ |
3141 | srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; |
3142 | events.affiliated_event(events.context, |
3143 | QED_IWARP_EVENT_SRQ_LIMIT, |
3144 | &srq_id); |
3145 | break; |
3146 | case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: |
3147 | DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n" ); |
3148 | |
3149 | p_hwfn->p_rdma_info->events.affiliated_event( |
3150 | p_hwfn->p_rdma_info->events.context, |
3151 | QED_IWARP_EVENT_CQ_OVERFLOW, |
3152 | (void *)fw_handle); |
3153 | break; |
3154 | default: |
3155 | DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n" , |
3156 | fw_event_code); |
3157 | return -EINVAL; |
3158 | } |
3159 | return 0; |
3160 | } |
3161 | |
3162 | int |
3163 | qed_iwarp_create_listen(void *rdma_cxt, |
3164 | struct qed_iwarp_listen_in *iparams, |
3165 | struct qed_iwarp_listen_out *oparams) |
3166 | { |
3167 | struct qed_hwfn *p_hwfn = rdma_cxt; |
3168 | struct qed_iwarp_listener *listener; |
3169 | |
3170 | listener = kzalloc(size: sizeof(*listener), GFP_KERNEL); |
3171 | if (!listener) |
3172 | return -ENOMEM; |
3173 | |
3174 | listener->ip_version = iparams->ip_version; |
3175 | memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); |
3176 | listener->port = iparams->port; |
3177 | listener->vlan = iparams->vlan; |
3178 | |
3179 | listener->event_cb = iparams->event_cb; |
3180 | listener->cb_context = iparams->cb_context; |
3181 | listener->max_backlog = iparams->max_backlog; |
3182 | oparams->handle = listener; |
3183 | |
3184 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
3185 | list_add_tail(new: &listener->list_entry, |
3186 | head: &p_hwfn->p_rdma_info->iwarp.listen_list); |
3187 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
3188 | |
3189 | DP_VERBOSE(p_hwfn, |
3190 | QED_MSG_RDMA, |
3191 | "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n" , |
3192 | listener->event_cb, |
3193 | listener, |
3194 | listener->ip_addr[0], |
3195 | listener->ip_addr[1], |
3196 | listener->ip_addr[2], |
3197 | listener->ip_addr[3], listener->port, listener->vlan); |
3198 | |
3199 | return 0; |
3200 | } |
3201 | |
3202 | int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) |
3203 | { |
3204 | struct qed_iwarp_listener *listener = handle; |
3205 | struct qed_hwfn *p_hwfn = rdma_cxt; |
3206 | |
3207 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n" , handle); |
3208 | |
3209 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
3210 | list_del(entry: &listener->list_entry); |
3211 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->iwarp.iw_lock); |
3212 | |
3213 | kfree(objp: listener); |
3214 | |
3215 | return 0; |
3216 | } |
3217 | |
3218 | int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) |
3219 | { |
3220 | struct qed_hwfn *p_hwfn = rdma_cxt; |
3221 | struct qed_sp_init_data init_data; |
3222 | struct qed_spq_entry *p_ent; |
3223 | struct qed_iwarp_ep *ep; |
3224 | struct qed_rdma_qp *qp; |
3225 | int rc; |
3226 | |
3227 | ep = iparams->ep_context; |
3228 | if (!ep) { |
3229 | DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n" ); |
3230 | return -EINVAL; |
3231 | } |
3232 | |
3233 | qp = ep->qp; |
3234 | |
3235 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n" , |
3236 | qp->icid, ep->tcp_cid); |
3237 | |
3238 | memset(&init_data, 0, sizeof(init_data)); |
3239 | init_data.cid = qp->icid; |
3240 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
3241 | init_data.comp_mode = QED_SPQ_MODE_CB; |
3242 | |
3243 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
3244 | cmd: IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, |
3245 | protocol: PROTOCOLID_IWARP, p_data: &init_data); |
3246 | |
3247 | if (rc) |
3248 | return rc; |
3249 | |
3250 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
3251 | |
3252 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n" , rc); |
3253 | |
3254 | return rc; |
3255 | } |
3256 | |
3257 | void |
3258 | qed_iwarp_query_qp(struct qed_rdma_qp *qp, |
3259 | struct qed_rdma_query_qp_out_params *out_params) |
3260 | { |
3261 | out_params->state = qed_iwarp2roce_state(state: qp->iwarp_state); |
3262 | } |
3263 | |