1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* Copyright (c) 2015 - 2021 Intel Corporation */ |
3 | #include "osdep.h" |
4 | #include "hmc.h" |
5 | #include "defs.h" |
6 | #include "type.h" |
7 | #include "protos.h" |
8 | #include "puda.h" |
9 | #include "ws.h" |
10 | |
11 | static void irdma_ieq_receive(struct irdma_sc_vsi *vsi, |
12 | struct irdma_puda_buf *buf); |
13 | static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid); |
14 | static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, |
15 | struct irdma_puda_buf *buf, u32 wqe_idx); |
16 | /** |
17 | * irdma_puda_get_listbuf - get buffer from puda list |
18 | * @list: list to use for buffers (ILQ or IEQ) |
19 | */ |
20 | static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list) |
21 | { |
22 | struct irdma_puda_buf *buf = NULL; |
23 | |
24 | if (!list_empty(head: list)) { |
25 | buf = (struct irdma_puda_buf *)list->next; |
26 | list_del(entry: (struct list_head *)&buf->list); |
27 | } |
28 | |
29 | return buf; |
30 | } |
31 | |
32 | /** |
33 | * irdma_puda_get_bufpool - return buffer from resource |
34 | * @rsrc: resource to use for buffer |
35 | */ |
36 | struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc) |
37 | { |
38 | struct irdma_puda_buf *buf = NULL; |
39 | struct list_head *list = &rsrc->bufpool; |
40 | unsigned long flags; |
41 | |
42 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); |
43 | buf = irdma_puda_get_listbuf(list); |
44 | if (buf) { |
45 | rsrc->avail_buf_count--; |
46 | buf->vsi = rsrc->vsi; |
47 | } else { |
48 | rsrc->stats_buf_alloc_fail++; |
49 | } |
50 | spin_unlock_irqrestore(lock: &rsrc->bufpool_lock, flags); |
51 | |
52 | return buf; |
53 | } |
54 | |
55 | /** |
56 | * irdma_puda_ret_bufpool - return buffer to rsrc list |
57 | * @rsrc: resource to use for buffer |
58 | * @buf: buffer to return to resource |
59 | */ |
60 | void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc, |
61 | struct irdma_puda_buf *buf) |
62 | { |
63 | unsigned long flags; |
64 | |
65 | buf->do_lpb = false; |
66 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); |
67 | list_add(new: &buf->list, head: &rsrc->bufpool); |
68 | spin_unlock_irqrestore(lock: &rsrc->bufpool_lock, flags); |
69 | rsrc->avail_buf_count++; |
70 | } |
71 | |
72 | /** |
73 | * irdma_puda_post_recvbuf - set wqe for rcv buffer |
74 | * @rsrc: resource ptr |
75 | * @wqe_idx: wqe index to use |
76 | * @buf: puda buffer for rcv q |
77 | * @initial: flag if during init time |
78 | */ |
79 | static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx, |
80 | struct irdma_puda_buf *buf, bool initial) |
81 | { |
82 | __le64 *wqe; |
83 | struct irdma_sc_qp *qp = &rsrc->qp; |
84 | u64 offset24 = 0; |
85 | |
86 | /* Synch buffer for use by device */ |
87 | dma_sync_single_for_device(dev: rsrc->dev->hw->device, addr: buf->mem.pa, |
88 | size: buf->mem.size, dir: DMA_BIDIRECTIONAL); |
89 | qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; |
90 | wqe = qp->qp_uk.rq_base[wqe_idx].elem; |
91 | if (!initial) |
92 | get_64bit_val(wqe_words: wqe, byte_index: 24, val: &offset24); |
93 | |
94 | offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1); |
95 | |
96 | set_64bit_val(wqe_words: wqe, byte_index: 16, val: 0); |
97 | set_64bit_val(wqe_words: wqe, byte_index: 0, val: buf->mem.pa); |
98 | if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { |
99 | set_64bit_val(wqe_words: wqe, byte_index: 8, |
100 | FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size)); |
101 | } else { |
102 | set_64bit_val(wqe_words: wqe, byte_index: 8, |
103 | FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) | |
104 | offset24); |
105 | } |
106 | dma_wmb(); /* make sure WQE is written before valid bit is set */ |
107 | |
108 | set_64bit_val(wqe_words: wqe, byte_index: 24, val: offset24); |
109 | } |
110 | |
111 | /** |
112 | * irdma_puda_replenish_rq - post rcv buffers |
113 | * @rsrc: resource to use for buffer |
114 | * @initial: flag if during init time |
115 | */ |
116 | static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) |
117 | { |
118 | u32 i; |
119 | u32 invalid_cnt = rsrc->rxq_invalid_cnt; |
120 | struct irdma_puda_buf *buf = NULL; |
121 | |
122 | for (i = 0; i < invalid_cnt; i++) { |
123 | buf = irdma_puda_get_bufpool(rsrc); |
124 | if (!buf) |
125 | return -ENOBUFS; |
126 | irdma_puda_post_recvbuf(rsrc, wqe_idx: rsrc->rx_wqe_idx, buf, initial); |
127 | rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); |
128 | rsrc->rxq_invalid_cnt--; |
129 | } |
130 | |
131 | return 0; |
132 | } |
133 | |
134 | /** |
135 | * irdma_puda_alloc_buf - allocate mem for buffer |
136 | * @dev: iwarp device |
137 | * @len: length of buffer |
138 | */ |
139 | static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev, |
140 | u32 len) |
141 | { |
142 | struct irdma_puda_buf *buf; |
143 | struct irdma_virt_mem buf_mem; |
144 | |
145 | buf_mem.size = sizeof(struct irdma_puda_buf); |
146 | buf_mem.va = kzalloc(size: buf_mem.size, GFP_KERNEL); |
147 | if (!buf_mem.va) |
148 | return NULL; |
149 | |
150 | buf = buf_mem.va; |
151 | buf->mem.size = len; |
152 | buf->mem.va = kzalloc(size: buf->mem.size, GFP_KERNEL); |
153 | if (!buf->mem.va) |
154 | goto free_virt; |
155 | buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va, |
156 | buf->mem.size, DMA_BIDIRECTIONAL); |
157 | if (dma_mapping_error(dev: dev->hw->device, dma_addr: buf->mem.pa)) { |
158 | kfree(objp: buf->mem.va); |
159 | goto free_virt; |
160 | } |
161 | |
162 | buf->buf_mem.va = buf_mem.va; |
163 | buf->buf_mem.size = buf_mem.size; |
164 | |
165 | return buf; |
166 | |
167 | free_virt: |
168 | kfree(objp: buf_mem.va); |
169 | return NULL; |
170 | } |
171 | |
172 | /** |
173 | * irdma_puda_dele_buf - delete buffer back to system |
174 | * @dev: iwarp device |
175 | * @buf: buffer to free |
176 | */ |
177 | static void irdma_puda_dele_buf(struct irdma_sc_dev *dev, |
178 | struct irdma_puda_buf *buf) |
179 | { |
180 | dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size, |
181 | DMA_BIDIRECTIONAL); |
182 | kfree(objp: buf->mem.va); |
183 | kfree(objp: buf->buf_mem.va); |
184 | } |
185 | |
186 | /** |
187 | * irdma_puda_get_next_send_wqe - return next wqe for processing |
188 | * @qp: puda qp for wqe |
189 | * @wqe_idx: wqe index for caller |
190 | */ |
191 | static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, |
192 | u32 *wqe_idx) |
193 | { |
194 | int ret_code = 0; |
195 | |
196 | *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); |
197 | if (!*wqe_idx) |
198 | qp->swqe_polarity = !qp->swqe_polarity; |
199 | IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code); |
200 | if (ret_code) |
201 | return NULL; |
202 | |
203 | return qp->sq_base[*wqe_idx].elem; |
204 | } |
205 | |
206 | /** |
207 | * irdma_puda_poll_info - poll cq for completion |
208 | * @cq: cq for poll |
209 | * @info: info return for successful completion |
210 | */ |
211 | static int irdma_puda_poll_info(struct irdma_sc_cq *cq, |
212 | struct irdma_puda_cmpl_info *info) |
213 | { |
214 | struct irdma_cq_uk *cq_uk = &cq->cq_uk; |
215 | u64 qword0, qword2, qword3, qword6; |
216 | __le64 *cqe; |
217 | __le64 *ext_cqe = NULL; |
218 | u64 qword7 = 0; |
219 | u64 comp_ctx; |
220 | bool valid_bit; |
221 | bool ext_valid = 0; |
222 | u32 major_err, minor_err; |
223 | u32 peek_head; |
224 | bool error; |
225 | u8 polarity; |
226 | |
227 | cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk); |
228 | get_64bit_val(wqe_words: cqe, byte_index: 24, val: &qword3); |
229 | valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3); |
230 | if (valid_bit != cq_uk->polarity) |
231 | return -ENOENT; |
232 | |
233 | /* Ensure CQE contents are read after valid bit is checked */ |
234 | dma_rmb(); |
235 | |
236 | if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) |
237 | ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); |
238 | |
239 | if (ext_valid) { |
240 | peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size; |
241 | ext_cqe = cq_uk->cq_base[peek_head].buf; |
242 | get_64bit_val(wqe_words: ext_cqe, byte_index: 24, val: &qword7); |
243 | polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); |
244 | if (!peek_head) |
245 | polarity ^= 1; |
246 | if (polarity != cq_uk->polarity) |
247 | return -ENOENT; |
248 | |
249 | /* Ensure ext CQE contents are read after ext valid bit is checked */ |
250 | dma_rmb(); |
251 | |
252 | IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); |
253 | if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) |
254 | cq_uk->polarity = !cq_uk->polarity; |
255 | /* update cq tail in cq shadow memory also */ |
256 | IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); |
257 | } |
258 | |
259 | print_hex_dump_debug("PUDA: PUDA CQE" , DUMP_PREFIX_OFFSET, 16, 8, cqe, |
260 | 32, false); |
261 | if (ext_valid) |
262 | print_hex_dump_debug("PUDA: PUDA EXT-CQE" , DUMP_PREFIX_OFFSET, |
263 | 16, 8, ext_cqe, 32, false); |
264 | |
265 | error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); |
266 | if (error) { |
267 | ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n" ); |
268 | major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3)); |
269 | minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3)); |
270 | info->compl_error = major_err << 16 | minor_err; |
271 | return -EIO; |
272 | } |
273 | |
274 | get_64bit_val(wqe_words: cqe, byte_index: 0, val: &qword0); |
275 | get_64bit_val(wqe_words: cqe, byte_index: 16, val: &qword2); |
276 | |
277 | info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); |
278 | info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); |
279 | if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) |
280 | info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); |
281 | |
282 | get_64bit_val(wqe_words: cqe, byte_index: 8, val: &comp_ctx); |
283 | info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx; |
284 | info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); |
285 | |
286 | if (info->q_type == IRDMA_CQE_QTYPE_RQ) { |
287 | if (ext_valid) { |
288 | info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7); |
289 | if (info->vlan_valid) { |
290 | get_64bit_val(wqe_words: ext_cqe, byte_index: 16, val: &qword6); |
291 | info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6); |
292 | } |
293 | info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7); |
294 | if (info->smac_valid) { |
295 | get_64bit_val(wqe_words: ext_cqe, byte_index: 16, val: &qword6); |
296 | info->smac[0] = (u8)((qword6 >> 40) & 0xFF); |
297 | info->smac[1] = (u8)((qword6 >> 32) & 0xFF); |
298 | info->smac[2] = (u8)((qword6 >> 24) & 0xFF); |
299 | info->smac[3] = (u8)((qword6 >> 16) & 0xFF); |
300 | info->smac[4] = (u8)((qword6 >> 8) & 0xFF); |
301 | info->smac[5] = (u8)(qword6 & 0xFF); |
302 | } |
303 | } |
304 | |
305 | if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { |
306 | info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3); |
307 | info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2); |
308 | info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2); |
309 | } |
310 | |
311 | info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); |
312 | } |
313 | |
314 | return 0; |
315 | } |
316 | |
317 | /** |
318 | * irdma_puda_poll_cmpl - processes completion for cq |
319 | * @dev: iwarp device |
320 | * @cq: cq getting interrupt |
321 | * @compl_err: return any completion err |
322 | */ |
323 | int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq, |
324 | u32 *compl_err) |
325 | { |
326 | struct irdma_qp_uk *qp; |
327 | struct irdma_cq_uk *cq_uk = &cq->cq_uk; |
328 | struct irdma_puda_cmpl_info info = {}; |
329 | int ret = 0; |
330 | struct irdma_puda_buf *buf; |
331 | struct irdma_puda_rsrc *rsrc; |
332 | u8 cq_type = cq->cq_type; |
333 | unsigned long flags; |
334 | |
335 | if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) { |
336 | rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq : |
337 | cq->vsi->ieq; |
338 | } else { |
339 | ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n" ); |
340 | return -EINVAL; |
341 | } |
342 | |
343 | ret = irdma_puda_poll_info(cq, info: &info); |
344 | *compl_err = info.compl_error; |
345 | if (ret == -ENOENT) |
346 | return ret; |
347 | if (ret) |
348 | goto done; |
349 | |
350 | qp = info.qp; |
351 | if (!qp || !rsrc) { |
352 | ret = -EFAULT; |
353 | goto done; |
354 | } |
355 | |
356 | if (qp->qp_id != rsrc->qp_id) { |
357 | ret = -EFAULT; |
358 | goto done; |
359 | } |
360 | |
361 | if (info.q_type == IRDMA_CQE_QTYPE_RQ) { |
362 | buf = (struct irdma_puda_buf *)(uintptr_t) |
363 | qp->rq_wrid_array[info.wqe_idx]; |
364 | |
365 | /* reusing so synch the buffer for CPU use */ |
366 | dma_sync_single_for_cpu(dev: dev->hw->device, addr: buf->mem.pa, |
367 | size: buf->mem.size, dir: DMA_BIDIRECTIONAL); |
368 | /* Get all the tcpip information in the buf header */ |
369 | ret = irdma_puda_get_tcpip_info(info: &info, buf); |
370 | if (ret) { |
371 | rsrc->stats_rcvd_pkt_err++; |
372 | if (cq_type == IRDMA_CQ_TYPE_ILQ) { |
373 | irdma_ilq_putback_rcvbuf(qp: &rsrc->qp, buf, |
374 | wqe_idx: info.wqe_idx); |
375 | } else { |
376 | irdma_puda_ret_bufpool(rsrc, buf); |
377 | irdma_puda_replenish_rq(rsrc, initial: false); |
378 | } |
379 | goto done; |
380 | } |
381 | |
382 | rsrc->stats_pkt_rcvd++; |
383 | rsrc->compl_rxwqe_idx = info.wqe_idx; |
384 | ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n" ); |
385 | rsrc->receive(rsrc->vsi, buf); |
386 | if (cq_type == IRDMA_CQ_TYPE_ILQ) |
387 | irdma_ilq_putback_rcvbuf(qp: &rsrc->qp, buf, wqe_idx: info.wqe_idx); |
388 | else |
389 | irdma_puda_replenish_rq(rsrc, initial: false); |
390 | |
391 | } else { |
392 | ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n" ); |
393 | buf = (struct irdma_puda_buf *)(uintptr_t) |
394 | qp->sq_wrtrk_array[info.wqe_idx].wrid; |
395 | |
396 | /* reusing so synch the buffer for CPU use */ |
397 | dma_sync_single_for_cpu(dev: dev->hw->device, addr: buf->mem.pa, |
398 | size: buf->mem.size, dir: DMA_BIDIRECTIONAL); |
399 | IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); |
400 | rsrc->xmit_complete(rsrc->vsi, buf); |
401 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); |
402 | rsrc->tx_wqe_avail_cnt++; |
403 | spin_unlock_irqrestore(lock: &rsrc->bufpool_lock, flags); |
404 | if (!list_empty(head: &rsrc->txpend)) |
405 | irdma_puda_send_buf(rsrc, NULL); |
406 | } |
407 | |
408 | done: |
409 | IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); |
410 | if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) |
411 | cq_uk->polarity = !cq_uk->polarity; |
412 | /* update cq tail in cq shadow memory also */ |
413 | IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); |
414 | set_64bit_val(wqe_words: cq_uk->shadow_area, byte_index: 0, |
415 | IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)); |
416 | |
417 | return ret; |
418 | } |
419 | |
420 | /** |
421 | * irdma_puda_send - complete send wqe for transmit |
422 | * @qp: puda qp for send |
423 | * @info: buffer information for transmit |
424 | */ |
425 | int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info) |
426 | { |
427 | __le64 *wqe; |
428 | u32 iplen, l4len; |
429 | u64 hdr[2]; |
430 | u32 wqe_idx; |
431 | u8 iipt; |
432 | |
433 | /* number of 32 bits DWORDS in header */ |
434 | l4len = info->tcplen >> 2; |
435 | if (info->ipv4) { |
436 | iipt = 3; |
437 | iplen = 5; |
438 | } else { |
439 | iipt = 1; |
440 | iplen = 10; |
441 | } |
442 | |
443 | wqe = irdma_puda_get_next_send_wqe(qp: &qp->qp_uk, wqe_idx: &wqe_idx); |
444 | if (!wqe) |
445 | return -ENOMEM; |
446 | |
447 | qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; |
448 | /* Third line of WQE descriptor */ |
449 | /* maclen is in words */ |
450 | |
451 | if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
452 | hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */ |
453 | hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) | |
454 | FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) | |
455 | FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) | |
456 | FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) | |
457 | FIELD_PREP(IRDMA_UDA_QPSQ_VALID, |
458 | qp->qp_uk.swqe_polarity); |
459 | |
460 | /* Forth line of WQE descriptor */ |
461 | |
462 | set_64bit_val(wqe_words: wqe, byte_index: 0, val: info->paddr); |
463 | set_64bit_val(wqe_words: wqe, byte_index: 8, |
464 | FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) | |
465 | FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity)); |
466 | } else { |
467 | hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) | |
468 | FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) | |
469 | FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) | |
470 | FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) | |
471 | FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len); |
472 | |
473 | hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) | |
474 | FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) | |
475 | FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) | |
476 | FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity); |
477 | |
478 | /* Forth line of WQE descriptor */ |
479 | |
480 | set_64bit_val(wqe_words: wqe, byte_index: 0, val: info->paddr); |
481 | set_64bit_val(wqe_words: wqe, byte_index: 8, |
482 | FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len)); |
483 | } |
484 | |
485 | set_64bit_val(wqe_words: wqe, byte_index: 16, val: hdr[0]); |
486 | dma_wmb(); /* make sure WQE is written before valid bit is set */ |
487 | |
488 | set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr[1]); |
489 | |
490 | print_hex_dump_debug("PUDA: PUDA SEND WQE" , DUMP_PREFIX_OFFSET, 16, 8, |
491 | wqe, 32, false); |
492 | irdma_uk_qp_post_wr(qp: &qp->qp_uk); |
493 | return 0; |
494 | } |
495 | |
496 | /** |
497 | * irdma_puda_send_buf - transmit puda buffer |
498 | * @rsrc: resource to use for buffer |
499 | * @buf: puda buffer to transmit |
500 | */ |
501 | void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, |
502 | struct irdma_puda_buf *buf) |
503 | { |
504 | struct irdma_puda_send_info info; |
505 | int ret = 0; |
506 | unsigned long flags; |
507 | |
508 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); |
509 | /* if no wqe available or not from a completion and we have |
510 | * pending buffers, we must queue new buffer |
511 | */ |
512 | if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(head: &rsrc->txpend))) { |
513 | list_add_tail(new: &buf->list, head: &rsrc->txpend); |
514 | spin_unlock_irqrestore(lock: &rsrc->bufpool_lock, flags); |
515 | rsrc->stats_sent_pkt_q++; |
516 | if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) |
517 | ibdev_dbg(to_ibdev(rsrc->dev), |
518 | "PUDA: adding to txpend\n" ); |
519 | return; |
520 | } |
521 | rsrc->tx_wqe_avail_cnt--; |
522 | /* if we are coming from a completion and have pending buffers |
523 | * then Get one from pending list |
524 | */ |
525 | if (!buf) { |
526 | buf = irdma_puda_get_listbuf(list: &rsrc->txpend); |
527 | if (!buf) |
528 | goto done; |
529 | } |
530 | |
531 | info.scratch = buf; |
532 | info.paddr = buf->mem.pa; |
533 | info.len = buf->totallen; |
534 | info.tcplen = buf->tcphlen; |
535 | info.ipv4 = buf->ipv4; |
536 | |
537 | if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
538 | info.ah_id = buf->ah_id; |
539 | } else { |
540 | info.maclen = buf->maclen; |
541 | info.do_lpb = buf->do_lpb; |
542 | } |
543 | |
544 | /* Synch buffer for use by device */ |
545 | dma_sync_single_for_cpu(dev: rsrc->dev->hw->device, addr: buf->mem.pa, |
546 | size: buf->mem.size, dir: DMA_BIDIRECTIONAL); |
547 | ret = irdma_puda_send(qp: &rsrc->qp, info: &info); |
548 | if (ret) { |
549 | rsrc->tx_wqe_avail_cnt++; |
550 | rsrc->stats_sent_pkt_q++; |
551 | list_add(new: &buf->list, head: &rsrc->txpend); |
552 | if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) |
553 | ibdev_dbg(to_ibdev(rsrc->dev), |
554 | "PUDA: adding to puda_send\n" ); |
555 | } else { |
556 | rsrc->stats_pkt_sent++; |
557 | } |
558 | done: |
559 | spin_unlock_irqrestore(lock: &rsrc->bufpool_lock, flags); |
560 | } |
561 | |
562 | /** |
563 | * irdma_puda_qp_setctx - during init, set qp's context |
564 | * @rsrc: qp's resource |
565 | */ |
566 | static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc) |
567 | { |
568 | struct irdma_sc_qp *qp = &rsrc->qp; |
569 | __le64 *qp_ctx = qp->hw_host_ctx; |
570 | |
571 | set_64bit_val(wqe_words: qp_ctx, byte_index: 8, val: qp->sq_pa); |
572 | set_64bit_val(wqe_words: qp_ctx, byte_index: 16, val: qp->rq_pa); |
573 | set_64bit_val(wqe_words: qp_ctx, byte_index: 24, |
574 | FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) | |
575 | FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size)); |
576 | set_64bit_val(wqe_words: qp_ctx, byte_index: 48, |
577 | FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size)); |
578 | set_64bit_val(wqe_words: qp_ctx, byte_index: 56, val: 0); |
579 | if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) |
580 | set_64bit_val(wqe_words: qp_ctx, byte_index: 64, val: 1); |
581 | set_64bit_val(wqe_words: qp_ctx, byte_index: 136, |
582 | FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) | |
583 | FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id)); |
584 | set_64bit_val(wqe_words: qp_ctx, byte_index: 144, |
585 | FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx)); |
586 | set_64bit_val(wqe_words: qp_ctx, byte_index: 160, |
587 | FIELD_PREP(IRDMAQPC_PRIVEN, 1) | |
588 | FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid)); |
589 | set_64bit_val(wqe_words: qp_ctx, byte_index: 168, |
590 | FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp)); |
591 | set_64bit_val(wqe_words: qp_ctx, byte_index: 176, |
592 | FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) | |
593 | FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) | |
594 | FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle)); |
595 | |
596 | print_hex_dump_debug("PUDA: PUDA QP CONTEXT" , DUMP_PREFIX_OFFSET, 16, |
597 | 8, qp_ctx, IRDMA_QP_CTX_SIZE, false); |
598 | } |
599 | |
600 | /** |
601 | * irdma_puda_qp_wqe - setup wqe for qp create |
602 | * @dev: Device |
603 | * @qp: Resource qp |
604 | */ |
605 | static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) |
606 | { |
607 | struct irdma_sc_cqp *cqp; |
608 | __le64 *wqe; |
609 | u64 hdr; |
610 | struct irdma_ccq_cqe_info compl_info; |
611 | int status = 0; |
612 | |
613 | cqp = dev->cqp; |
614 | wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch: 0); |
615 | if (!wqe) |
616 | return -ENOMEM; |
617 | |
618 | set_64bit_val(wqe_words: wqe, byte_index: 16, val: qp->hw_host_ctx_pa); |
619 | set_64bit_val(wqe_words: wqe, byte_index: 40, val: qp->shadow_area_pa); |
620 | |
621 | hdr = qp->qp_uk.qp_id | |
622 | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) | |
623 | FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) | |
624 | FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) | |
625 | FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) | |
626 | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); |
627 | dma_wmb(); /* make sure WQE is written before valid bit is set */ |
628 | |
629 | set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr); |
630 | |
631 | print_hex_dump_debug("PUDA: PUDA QP CREATE" , DUMP_PREFIX_OFFSET, 16, |
632 | 8, wqe, 40, false); |
633 | irdma_sc_cqp_post_sq(cqp); |
634 | status = irdma_sc_poll_for_cqp_op_done(cqp: dev->cqp, IRDMA_CQP_OP_CREATE_QP, |
635 | cmpl_info: &compl_info); |
636 | |
637 | return status; |
638 | } |
639 | |
640 | /** |
641 | * irdma_puda_qp_create - create qp for resource |
642 | * @rsrc: resource to use for buffer |
643 | */ |
644 | static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) |
645 | { |
646 | struct irdma_sc_qp *qp = &rsrc->qp; |
647 | struct irdma_qp_uk *ukqp = &qp->qp_uk; |
648 | int ret = 0; |
649 | u32 sq_size, rq_size; |
650 | struct irdma_dma_mem *mem; |
651 | |
652 | sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE; |
653 | rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE; |
654 | rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE), |
655 | IRDMA_HW_PAGE_SIZE); |
656 | rsrc->qpmem.va = dma_alloc_coherent(dev: rsrc->dev->hw->device, |
657 | size: rsrc->qpmem.size, dma_handle: &rsrc->qpmem.pa, |
658 | GFP_KERNEL); |
659 | if (!rsrc->qpmem.va) |
660 | return -ENOMEM; |
661 | |
662 | mem = &rsrc->qpmem; |
663 | memset(mem->va, 0, rsrc->qpmem.size); |
664 | qp->hw_sq_size = irdma_get_encoded_wqe_size(wqsize: rsrc->sq_size, queue_type: IRDMA_QUEUE_TYPE_SQ_RQ); |
665 | qp->hw_rq_size = irdma_get_encoded_wqe_size(wqsize: rsrc->rq_size, queue_type: IRDMA_QUEUE_TYPE_SQ_RQ); |
666 | qp->pd = &rsrc->sc_pd; |
667 | qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA; |
668 | qp->dev = rsrc->dev; |
669 | qp->qp_uk.back_qp = rsrc; |
670 | qp->sq_pa = mem->pa; |
671 | qp->rq_pa = qp->sq_pa + sq_size; |
672 | qp->vsi = rsrc->vsi; |
673 | ukqp->sq_base = mem->va; |
674 | ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; |
675 | ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; |
676 | ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs; |
677 | qp->shadow_area_pa = qp->rq_pa + rq_size; |
678 | qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE; |
679 | qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3); |
680 | qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; |
681 | ukqp->qp_id = rsrc->qp_id; |
682 | ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; |
683 | ukqp->rq_wrid_array = rsrc->rq_wrid_array; |
684 | ukqp->sq_size = rsrc->sq_size; |
685 | ukqp->rq_size = rsrc->rq_size; |
686 | |
687 | IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size); |
688 | IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size); |
689 | IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size); |
690 | ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db; |
691 | |
692 | ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri); |
693 | if (ret) { |
694 | dma_free_coherent(dev: rsrc->dev->hw->device, size: rsrc->qpmem.size, |
695 | cpu_addr: rsrc->qpmem.va, dma_handle: rsrc->qpmem.pa); |
696 | rsrc->qpmem.va = NULL; |
697 | return ret; |
698 | } |
699 | |
700 | irdma_qp_add_qos(qp); |
701 | irdma_puda_qp_setctx(rsrc); |
702 | |
703 | if (rsrc->dev->ceq_valid) |
704 | ret = irdma_cqp_qp_create_cmd(dev: rsrc->dev, qp); |
705 | else |
706 | ret = irdma_puda_qp_wqe(dev: rsrc->dev, qp); |
707 | if (ret) { |
708 | irdma_qp_rem_qos(qp); |
709 | rsrc->dev->ws_remove(qp->vsi, qp->user_pri); |
710 | dma_free_coherent(dev: rsrc->dev->hw->device, size: rsrc->qpmem.size, |
711 | cpu_addr: rsrc->qpmem.va, dma_handle: rsrc->qpmem.pa); |
712 | rsrc->qpmem.va = NULL; |
713 | } |
714 | |
715 | return ret; |
716 | } |
717 | |
718 | /** |
719 | * irdma_puda_cq_wqe - setup wqe for CQ create |
720 | * @dev: Device |
721 | * @cq: resource for cq |
722 | */ |
723 | static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) |
724 | { |
725 | __le64 *wqe; |
726 | struct irdma_sc_cqp *cqp; |
727 | u64 hdr; |
728 | struct irdma_ccq_cqe_info compl_info; |
729 | int status = 0; |
730 | |
731 | cqp = dev->cqp; |
732 | wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch: 0); |
733 | if (!wqe) |
734 | return -ENOMEM; |
735 | |
736 | set_64bit_val(wqe_words: wqe, byte_index: 0, val: cq->cq_uk.cq_size); |
737 | set_64bit_val(wqe_words: wqe, byte_index: 8, val: (uintptr_t)cq >> 1); |
738 | set_64bit_val(wqe_words: wqe, byte_index: 16, |
739 | FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold)); |
740 | set_64bit_val(wqe_words: wqe, byte_index: 32, val: cq->cq_pa); |
741 | set_64bit_val(wqe_words: wqe, byte_index: 40, val: cq->shadow_area_pa); |
742 | set_64bit_val(wqe_words: wqe, byte_index: 56, |
743 | FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) | |
744 | FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx)); |
745 | |
746 | hdr = cq->cq_uk.cq_id | |
747 | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) | |
748 | FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) | |
749 | FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) | |
750 | FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) | |
751 | FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity); |
752 | dma_wmb(); /* make sure WQE is written before valid bit is set */ |
753 | |
754 | set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr); |
755 | |
756 | print_hex_dump_debug("PUDA: PUDA CREATE CQ" , DUMP_PREFIX_OFFSET, 16, |
757 | 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); |
758 | irdma_sc_cqp_post_sq(cqp: dev->cqp); |
759 | status = irdma_sc_poll_for_cqp_op_done(cqp: dev->cqp, IRDMA_CQP_OP_CREATE_CQ, |
760 | cmpl_info: &compl_info); |
761 | if (!status) { |
762 | struct irdma_sc_ceq *ceq = dev->ceq[0]; |
763 | |
764 | if (ceq && ceq->reg_cq) |
765 | status = irdma_sc_add_cq_ctx(ceq, cq); |
766 | } |
767 | |
768 | return status; |
769 | } |
770 | |
771 | /** |
772 | * irdma_puda_cq_create - create cq for resource |
773 | * @rsrc: resource for which cq to create |
774 | */ |
775 | static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) |
776 | { |
777 | struct irdma_sc_dev *dev = rsrc->dev; |
778 | struct irdma_sc_cq *cq = &rsrc->cq; |
779 | int ret = 0; |
780 | u32 cqsize; |
781 | struct irdma_dma_mem *mem; |
782 | struct irdma_cq_init_info info = {}; |
783 | struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info; |
784 | |
785 | cq->vsi = rsrc->vsi; |
786 | cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe)); |
787 | rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area), |
788 | IRDMA_CQ0_ALIGNMENT); |
789 | rsrc->cqmem.va = dma_alloc_coherent(dev: dev->hw->device, size: rsrc->cqmem.size, |
790 | dma_handle: &rsrc->cqmem.pa, GFP_KERNEL); |
791 | if (!rsrc->cqmem.va) |
792 | return -ENOMEM; |
793 | |
794 | mem = &rsrc->cqmem; |
795 | info.dev = dev; |
796 | info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ? |
797 | IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ; |
798 | info.shadow_read_threshold = rsrc->cq_size >> 2; |
799 | info.cq_base_pa = mem->pa; |
800 | info.shadow_area_pa = mem->pa + cqsize; |
801 | init_info->cq_base = mem->va; |
802 | init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize); |
803 | init_info->cq_size = rsrc->cq_size; |
804 | init_info->cq_id = rsrc->cq_id; |
805 | info.ceqe_mask = true; |
806 | info.ceq_id_valid = true; |
807 | info.vsi = rsrc->vsi; |
808 | |
809 | ret = irdma_sc_cq_init(cq, info: &info); |
810 | if (ret) |
811 | goto error; |
812 | |
813 | if (rsrc->dev->ceq_valid) |
814 | ret = irdma_cqp_cq_create_cmd(dev, cq); |
815 | else |
816 | ret = irdma_puda_cq_wqe(dev, cq); |
817 | error: |
818 | if (ret) { |
819 | dma_free_coherent(dev: dev->hw->device, size: rsrc->cqmem.size, |
820 | cpu_addr: rsrc->cqmem.va, dma_handle: rsrc->cqmem.pa); |
821 | rsrc->cqmem.va = NULL; |
822 | } |
823 | |
824 | return ret; |
825 | } |
826 | |
827 | /** |
828 | * irdma_puda_free_qp - free qp for resource |
829 | * @rsrc: resource for which qp to free |
830 | */ |
831 | static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc) |
832 | { |
833 | int ret; |
834 | struct irdma_ccq_cqe_info compl_info; |
835 | struct irdma_sc_dev *dev = rsrc->dev; |
836 | |
837 | if (rsrc->dev->ceq_valid) { |
838 | irdma_cqp_qp_destroy_cmd(dev, qp: &rsrc->qp); |
839 | rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); |
840 | return; |
841 | } |
842 | |
843 | ret = irdma_sc_qp_destroy(qp: &rsrc->qp, scratch: 0, remove_hash_idx: false, ignore_mw_bnd: true, post_sq: true); |
844 | if (ret) |
845 | ibdev_dbg(to_ibdev(dev), |
846 | "PUDA: error puda qp destroy wqe, status = %d\n" , |
847 | ret); |
848 | if (!ret) { |
849 | ret = irdma_sc_poll_for_cqp_op_done(cqp: dev->cqp, IRDMA_CQP_OP_DESTROY_QP, |
850 | cmpl_info: &compl_info); |
851 | if (ret) |
852 | ibdev_dbg(to_ibdev(dev), |
853 | "PUDA: error puda qp destroy failed, status = %d\n" , |
854 | ret); |
855 | } |
856 | rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); |
857 | } |
858 | |
859 | /** |
860 | * irdma_puda_free_cq - free cq for resource |
861 | * @rsrc: resource for which cq to free |
862 | */ |
863 | static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc) |
864 | { |
865 | int ret; |
866 | struct irdma_ccq_cqe_info compl_info; |
867 | struct irdma_sc_dev *dev = rsrc->dev; |
868 | |
869 | if (rsrc->dev->ceq_valid) { |
870 | irdma_cqp_cq_destroy_cmd(dev, cq: &rsrc->cq); |
871 | return; |
872 | } |
873 | |
874 | ret = irdma_sc_cq_destroy(cq: &rsrc->cq, scratch: 0, post_sq: true); |
875 | if (ret) |
876 | ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n" ); |
877 | if (!ret) { |
878 | ret = irdma_sc_poll_for_cqp_op_done(cqp: dev->cqp, IRDMA_CQP_OP_DESTROY_CQ, |
879 | cmpl_info: &compl_info); |
880 | if (ret) |
881 | ibdev_dbg(to_ibdev(dev), |
882 | "PUDA: error ieq qp destroy done\n" ); |
883 | } |
884 | } |
885 | |
886 | /** |
887 | * irdma_puda_dele_rsrc - delete all resources during close |
888 | * @vsi: VSI structure of device |
889 | * @type: type of resource to dele |
890 | * @reset: true if reset chip |
891 | */ |
892 | void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, |
893 | bool reset) |
894 | { |
895 | struct irdma_sc_dev *dev = vsi->dev; |
896 | struct irdma_puda_rsrc *rsrc; |
897 | struct irdma_puda_buf *buf = NULL; |
898 | struct irdma_puda_buf *nextbuf = NULL; |
899 | struct irdma_virt_mem *vmem; |
900 | struct irdma_sc_ceq *ceq; |
901 | |
902 | ceq = vsi->dev->ceq[0]; |
903 | switch (type) { |
904 | case IRDMA_PUDA_RSRC_TYPE_ILQ: |
905 | rsrc = vsi->ilq; |
906 | vmem = &vsi->ilq_mem; |
907 | vsi->ilq = NULL; |
908 | if (ceq && ceq->reg_cq) |
909 | irdma_sc_remove_cq_ctx(ceq, cq: &rsrc->cq); |
910 | break; |
911 | case IRDMA_PUDA_RSRC_TYPE_IEQ: |
912 | rsrc = vsi->ieq; |
913 | vmem = &vsi->ieq_mem; |
914 | vsi->ieq = NULL; |
915 | if (ceq && ceq->reg_cq) |
916 | irdma_sc_remove_cq_ctx(ceq, cq: &rsrc->cq); |
917 | break; |
918 | default: |
919 | ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n" , |
920 | type); |
921 | return; |
922 | } |
923 | |
924 | switch (rsrc->cmpl) { |
925 | case PUDA_HASH_CRC_COMPLETE: |
926 | irdma_free_hash_desc(desc: rsrc->hash_desc); |
927 | fallthrough; |
928 | case PUDA_QP_CREATED: |
929 | irdma_qp_rem_qos(qp: &rsrc->qp); |
930 | |
931 | if (!reset) |
932 | irdma_puda_free_qp(rsrc); |
933 | |
934 | dma_free_coherent(dev: dev->hw->device, size: rsrc->qpmem.size, |
935 | cpu_addr: rsrc->qpmem.va, dma_handle: rsrc->qpmem.pa); |
936 | rsrc->qpmem.va = NULL; |
937 | fallthrough; |
938 | case PUDA_CQ_CREATED: |
939 | if (!reset) |
940 | irdma_puda_free_cq(rsrc); |
941 | |
942 | dma_free_coherent(dev: dev->hw->device, size: rsrc->cqmem.size, |
943 | cpu_addr: rsrc->cqmem.va, dma_handle: rsrc->cqmem.pa); |
944 | rsrc->cqmem.va = NULL; |
945 | break; |
946 | default: |
947 | ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n" ); |
948 | break; |
949 | } |
950 | /* Free all allocated puda buffers for both tx and rx */ |
951 | buf = rsrc->alloclist; |
952 | while (buf) { |
953 | nextbuf = buf->next; |
954 | irdma_puda_dele_buf(dev, buf); |
955 | buf = nextbuf; |
956 | rsrc->alloc_buf_count--; |
957 | } |
958 | |
959 | kfree(objp: vmem->va); |
960 | } |
961 | |
962 | /** |
963 | * irdma_puda_allocbufs - allocate buffers for resource |
964 | * @rsrc: resource for buffer allocation |
965 | * @count: number of buffers to create |
966 | */ |
967 | static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count) |
968 | { |
969 | u32 i; |
970 | struct irdma_puda_buf *buf; |
971 | struct irdma_puda_buf *nextbuf; |
972 | |
973 | for (i = 0; i < count; i++) { |
974 | buf = irdma_puda_alloc_buf(dev: rsrc->dev, len: rsrc->buf_size); |
975 | if (!buf) { |
976 | rsrc->stats_buf_alloc_fail++; |
977 | return -ENOMEM; |
978 | } |
979 | irdma_puda_ret_bufpool(rsrc, buf); |
980 | rsrc->alloc_buf_count++; |
981 | if (!rsrc->alloclist) { |
982 | rsrc->alloclist = buf; |
983 | } else { |
984 | nextbuf = rsrc->alloclist; |
985 | rsrc->alloclist = buf; |
986 | buf->next = nextbuf; |
987 | } |
988 | } |
989 | |
990 | rsrc->avail_buf_count = rsrc->alloc_buf_count; |
991 | |
992 | return 0; |
993 | } |
994 | |
995 | /** |
996 | * irdma_puda_create_rsrc - create resource (ilq or ieq) |
997 | * @vsi: sc VSI struct |
998 | * @info: resource information |
999 | */ |
1000 | int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, |
1001 | struct irdma_puda_rsrc_info *info) |
1002 | { |
1003 | struct irdma_sc_dev *dev = vsi->dev; |
1004 | int ret = 0; |
1005 | struct irdma_puda_rsrc *rsrc; |
1006 | u32 pudasize; |
1007 | u32 sqwridsize, rqwridsize; |
1008 | struct irdma_virt_mem *vmem; |
1009 | |
1010 | info->count = 1; |
1011 | pudasize = sizeof(struct irdma_puda_rsrc); |
1012 | sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info); |
1013 | rqwridsize = info->rq_size * 8; |
1014 | switch (info->type) { |
1015 | case IRDMA_PUDA_RSRC_TYPE_ILQ: |
1016 | vmem = &vsi->ilq_mem; |
1017 | break; |
1018 | case IRDMA_PUDA_RSRC_TYPE_IEQ: |
1019 | vmem = &vsi->ieq_mem; |
1020 | break; |
1021 | default: |
1022 | return -EOPNOTSUPP; |
1023 | } |
1024 | vmem->size = pudasize + sqwridsize + rqwridsize; |
1025 | vmem->va = kzalloc(size: vmem->size, GFP_KERNEL); |
1026 | if (!vmem->va) |
1027 | return -ENOMEM; |
1028 | |
1029 | rsrc = vmem->va; |
1030 | spin_lock_init(&rsrc->bufpool_lock); |
1031 | switch (info->type) { |
1032 | case IRDMA_PUDA_RSRC_TYPE_ILQ: |
1033 | vsi->ilq = vmem->va; |
1034 | vsi->ilq_count = info->count; |
1035 | rsrc->receive = info->receive; |
1036 | rsrc->xmit_complete = info->xmit_complete; |
1037 | break; |
1038 | case IRDMA_PUDA_RSRC_TYPE_IEQ: |
1039 | vsi->ieq_count = info->count; |
1040 | vsi->ieq = vmem->va; |
1041 | rsrc->receive = irdma_ieq_receive; |
1042 | rsrc->xmit_complete = irdma_ieq_tx_compl; |
1043 | break; |
1044 | default: |
1045 | return -EOPNOTSUPP; |
1046 | } |
1047 | |
1048 | rsrc->type = info->type; |
1049 | rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *) |
1050 | ((u8 *)vmem->va + pudasize); |
1051 | rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); |
1052 | /* Initialize all ieq lists */ |
1053 | INIT_LIST_HEAD(list: &rsrc->bufpool); |
1054 | INIT_LIST_HEAD(list: &rsrc->txpend); |
1055 | |
1056 | rsrc->tx_wqe_avail_cnt = info->sq_size - 1; |
1057 | irdma_sc_pd_init(dev, pd: &rsrc->sc_pd, pd_id: info->pd_id, abi_ver: info->abi_ver); |
1058 | rsrc->qp_id = info->qp_id; |
1059 | rsrc->cq_id = info->cq_id; |
1060 | rsrc->sq_size = info->sq_size; |
1061 | rsrc->rq_size = info->rq_size; |
1062 | rsrc->cq_size = info->rq_size + info->sq_size; |
1063 | if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
1064 | if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) |
1065 | rsrc->cq_size += info->rq_size; |
1066 | } |
1067 | rsrc->buf_size = info->buf_size; |
1068 | rsrc->dev = dev; |
1069 | rsrc->vsi = vsi; |
1070 | rsrc->stats_idx = info->stats_idx; |
1071 | rsrc->stats_idx_valid = info->stats_idx_valid; |
1072 | |
1073 | ret = irdma_puda_cq_create(rsrc); |
1074 | if (!ret) { |
1075 | rsrc->cmpl = PUDA_CQ_CREATED; |
1076 | ret = irdma_puda_qp_create(rsrc); |
1077 | } |
1078 | if (ret) { |
1079 | ibdev_dbg(to_ibdev(dev), |
1080 | "PUDA: error qp_create type=%d, status=%d\n" , |
1081 | rsrc->type, ret); |
1082 | goto error; |
1083 | } |
1084 | rsrc->cmpl = PUDA_QP_CREATED; |
1085 | |
1086 | ret = irdma_puda_allocbufs(rsrc, count: info->tx_buf_cnt + info->rq_size); |
1087 | if (ret) { |
1088 | ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n" ); |
1089 | goto error; |
1090 | } |
1091 | |
1092 | rsrc->rxq_invalid_cnt = info->rq_size; |
1093 | ret = irdma_puda_replenish_rq(rsrc, initial: true); |
1094 | if (ret) |
1095 | goto error; |
1096 | |
1097 | if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) { |
1098 | if (!irdma_init_hash_desc(desc: &rsrc->hash_desc)) { |
1099 | rsrc->check_crc = true; |
1100 | rsrc->cmpl = PUDA_HASH_CRC_COMPLETE; |
1101 | ret = 0; |
1102 | } |
1103 | } |
1104 | |
1105 | irdma_sc_ccq_arm(ccq: &rsrc->cq); |
1106 | return ret; |
1107 | |
1108 | error: |
1109 | irdma_puda_dele_rsrc(vsi, type: info->type, reset: false); |
1110 | |
1111 | return ret; |
1112 | } |
1113 | |
1114 | /** |
1115 | * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq |
1116 | * @qp: ilq's qp resource |
1117 | * @buf: puda buffer for rcv q |
1118 | * @wqe_idx: wqe index of completed rcvbuf |
1119 | */ |
1120 | static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, |
1121 | struct irdma_puda_buf *buf, u32 wqe_idx) |
1122 | { |
1123 | __le64 *wqe; |
1124 | u64 offset8, offset24; |
1125 | |
1126 | /* Synch buffer for use by device */ |
1127 | dma_sync_single_for_device(dev: qp->dev->hw->device, addr: buf->mem.pa, |
1128 | size: buf->mem.size, dir: DMA_BIDIRECTIONAL); |
1129 | wqe = qp->qp_uk.rq_base[wqe_idx].elem; |
1130 | get_64bit_val(wqe_words: wqe, byte_index: 24, val: &offset24); |
1131 | if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
1132 | get_64bit_val(wqe_words: wqe, byte_index: 8, val: &offset8); |
1133 | if (offset24) |
1134 | offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1); |
1135 | else |
1136 | offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1); |
1137 | set_64bit_val(wqe_words: wqe, byte_index: 8, val: offset8); |
1138 | dma_wmb(); /* make sure WQE is written before valid bit is set */ |
1139 | } |
1140 | if (offset24) |
1141 | offset24 = 0; |
1142 | else |
1143 | offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1); |
1144 | |
1145 | set_64bit_val(wqe_words: wqe, byte_index: 24, val: offset24); |
1146 | } |
1147 | |
1148 | /** |
1149 | * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker |
1150 | * @pfpdu: pointer to fpdu |
1151 | * @datap: pointer to data in the buffer |
1152 | * @rcv_seq: seqnum of the data buffer |
1153 | */ |
1154 | static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap, |
1155 | u32 rcv_seq) |
1156 | { |
1157 | u32 marker_seq, end_seq, blk_start; |
1158 | u8 marker_len = pfpdu->marker_len; |
1159 | u16 total_len = 0; |
1160 | u16 fpdu_len; |
1161 | |
1162 | blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1); |
1163 | if (!blk_start) { |
1164 | total_len = marker_len; |
1165 | marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ; |
1166 | if (marker_len && *(u32 *)datap) |
1167 | return 0; |
1168 | } else { |
1169 | marker_seq = rcv_seq + blk_start; |
1170 | } |
1171 | |
1172 | datap += total_len; |
1173 | fpdu_len = ntohs(*(__be16 *)datap); |
1174 | fpdu_len += IRDMA_IEQ_MPA_FRAMING; |
1175 | fpdu_len = (fpdu_len + 3) & 0xfffc; |
1176 | |
1177 | if (fpdu_len > pfpdu->max_fpdu_data) |
1178 | return 0; |
1179 | |
1180 | total_len += fpdu_len; |
1181 | end_seq = rcv_seq + total_len; |
1182 | while ((int)(marker_seq - end_seq) < 0) { |
1183 | total_len += marker_len; |
1184 | end_seq += marker_len; |
1185 | marker_seq += IRDMA_MRK_BLK_SZ; |
1186 | } |
1187 | |
1188 | return total_len; |
1189 | } |
1190 | |
1191 | /** |
1192 | * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf |
1193 | * @buf: rcv buffer with partial |
1194 | * @txbuf: tx buffer for sending back |
1195 | * @buf_offset: rcv buffer offset to copy from |
1196 | * @txbuf_offset: at offset in tx buf to copy |
1197 | * @len: length of data to copy |
1198 | */ |
1199 | static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf, |
1200 | struct irdma_puda_buf *txbuf, |
1201 | u16 buf_offset, u32 txbuf_offset, u32 len) |
1202 | { |
1203 | void *mem1 = (u8 *)buf->mem.va + buf_offset; |
1204 | void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; |
1205 | |
1206 | memcpy(mem2, mem1, len); |
1207 | } |
1208 | |
1209 | /** |
1210 | * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling |
1211 | * @buf: reeive buffer with partial |
1212 | * @txbuf: buffer to prepare |
1213 | */ |
1214 | static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf, |
1215 | struct irdma_puda_buf *txbuf) |
1216 | { |
1217 | txbuf->tcphlen = buf->tcphlen; |
1218 | txbuf->ipv4 = buf->ipv4; |
1219 | |
1220 | if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
1221 | txbuf->hdrlen = txbuf->tcphlen; |
1222 | irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, txbuf_offset: 0, |
1223 | len: txbuf->hdrlen); |
1224 | } else { |
1225 | txbuf->maclen = buf->maclen; |
1226 | txbuf->hdrlen = buf->hdrlen; |
1227 | irdma_ieq_copy_to_txbuf(buf, txbuf, buf_offset: 0, txbuf_offset: 0, len: buf->hdrlen); |
1228 | } |
1229 | } |
1230 | |
1231 | /** |
1232 | * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range |
1233 | * @buf: receive exception buffer |
1234 | * @fps: first partial sequence number |
1235 | */ |
1236 | static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps) |
1237 | { |
1238 | u32 offset; |
1239 | |
1240 | if (buf->seqnum < fps) { |
1241 | offset = fps - buf->seqnum; |
1242 | if (offset > buf->datalen) |
1243 | return; |
1244 | buf->data += offset; |
1245 | buf->datalen -= (u16)offset; |
1246 | buf->seqnum = fps; |
1247 | } |
1248 | } |
1249 | |
1250 | /** |
1251 | * irdma_ieq_compl_pfpdu - write txbuf with full fpdu |
1252 | * @ieq: ieq resource |
1253 | * @rxlist: ieq's received buffer list |
1254 | * @pbufl: temporary list for buffers for fpddu |
1255 | * @txbuf: tx buffer for fpdu |
1256 | * @fpdu_len: total length of fpdu |
1257 | */ |
1258 | static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq, |
1259 | struct list_head *rxlist, |
1260 | struct list_head *pbufl, |
1261 | struct irdma_puda_buf *txbuf, u16 fpdu_len) |
1262 | { |
1263 | struct irdma_puda_buf *buf; |
1264 | u32 nextseqnum; |
1265 | u16 txoffset, bufoffset; |
1266 | |
1267 | buf = irdma_puda_get_listbuf(list: pbufl); |
1268 | if (!buf) |
1269 | return; |
1270 | |
1271 | nextseqnum = buf->seqnum + fpdu_len; |
1272 | irdma_ieq_setup_tx_buf(buf, txbuf); |
1273 | if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
1274 | txoffset = txbuf->hdrlen; |
1275 | txbuf->totallen = txbuf->hdrlen + fpdu_len; |
1276 | txbuf->data = (u8 *)txbuf->mem.va + txoffset; |
1277 | } else { |
1278 | txoffset = buf->hdrlen; |
1279 | txbuf->totallen = buf->hdrlen + fpdu_len; |
1280 | txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; |
1281 | } |
1282 | bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); |
1283 | |
1284 | do { |
1285 | if (buf->datalen >= fpdu_len) { |
1286 | /* copied full fpdu */ |
1287 | irdma_ieq_copy_to_txbuf(buf, txbuf, buf_offset: bufoffset, txbuf_offset: txoffset, |
1288 | len: fpdu_len); |
1289 | buf->datalen -= fpdu_len; |
1290 | buf->data += fpdu_len; |
1291 | buf->seqnum = nextseqnum; |
1292 | break; |
1293 | } |
1294 | /* copy partial fpdu */ |
1295 | irdma_ieq_copy_to_txbuf(buf, txbuf, buf_offset: bufoffset, txbuf_offset: txoffset, |
1296 | len: buf->datalen); |
1297 | txoffset += buf->datalen; |
1298 | fpdu_len -= buf->datalen; |
1299 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1300 | buf = irdma_puda_get_listbuf(list: pbufl); |
1301 | if (!buf) |
1302 | return; |
1303 | |
1304 | bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); |
1305 | } while (1); |
1306 | |
1307 | /* last buffer on the list*/ |
1308 | if (buf->datalen) |
1309 | list_add(new: &buf->list, head: rxlist); |
1310 | else |
1311 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1312 | } |
1313 | |
1314 | /** |
1315 | * irdma_ieq_create_pbufl - create buffer list for single fpdu |
1316 | * @pfpdu: pointer to fpdu |
1317 | * @rxlist: resource list for receive ieq buffes |
1318 | * @pbufl: temp. list for buffers for fpddu |
1319 | * @buf: first receive buffer |
1320 | * @fpdu_len: total length of fpdu |
1321 | */ |
1322 | static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, |
1323 | struct list_head *rxlist, |
1324 | struct list_head *pbufl, |
1325 | struct irdma_puda_buf *buf, u16 fpdu_len) |
1326 | { |
1327 | int status = 0; |
1328 | struct irdma_puda_buf *nextbuf; |
1329 | u32 nextseqnum; |
1330 | u16 plen = fpdu_len - buf->datalen; |
1331 | bool done = false; |
1332 | |
1333 | nextseqnum = buf->seqnum + buf->datalen; |
1334 | do { |
1335 | nextbuf = irdma_puda_get_listbuf(list: rxlist); |
1336 | if (!nextbuf) { |
1337 | status = -ENOBUFS; |
1338 | break; |
1339 | } |
1340 | list_add_tail(new: &nextbuf->list, head: pbufl); |
1341 | if (nextbuf->seqnum != nextseqnum) { |
1342 | pfpdu->bad_seq_num++; |
1343 | status = -ERANGE; |
1344 | break; |
1345 | } |
1346 | if (nextbuf->datalen >= plen) { |
1347 | done = true; |
1348 | } else { |
1349 | plen -= nextbuf->datalen; |
1350 | nextseqnum = nextbuf->seqnum + nextbuf->datalen; |
1351 | } |
1352 | |
1353 | } while (!done); |
1354 | |
1355 | return status; |
1356 | } |
1357 | |
1358 | /** |
1359 | * irdma_ieq_handle_partial - process partial fpdu buffer |
1360 | * @ieq: ieq resource |
1361 | * @pfpdu: partial management per user qp |
1362 | * @buf: receive buffer |
1363 | * @fpdu_len: fpdu len in the buffer |
1364 | */ |
1365 | static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, |
1366 | struct irdma_pfpdu *pfpdu, |
1367 | struct irdma_puda_buf *buf, u16 fpdu_len) |
1368 | { |
1369 | int status = 0; |
1370 | u8 *crcptr; |
1371 | u32 mpacrc; |
1372 | u32 seqnum = buf->seqnum; |
1373 | struct list_head pbufl; /* partial buffer list */ |
1374 | struct irdma_puda_buf *txbuf = NULL; |
1375 | struct list_head *rxlist = &pfpdu->rxlist; |
1376 | |
1377 | ieq->partials_handled++; |
1378 | |
1379 | INIT_LIST_HEAD(list: &pbufl); |
1380 | list_add(new: &buf->list, head: &pbufl); |
1381 | |
1382 | status = irdma_ieq_create_pbufl(pfpdu, rxlist, pbufl: &pbufl, buf, fpdu_len); |
1383 | if (status) |
1384 | goto error; |
1385 | |
1386 | txbuf = irdma_puda_get_bufpool(rsrc: ieq); |
1387 | if (!txbuf) { |
1388 | pfpdu->no_tx_bufs++; |
1389 | status = -ENOBUFS; |
1390 | goto error; |
1391 | } |
1392 | |
1393 | irdma_ieq_compl_pfpdu(ieq, rxlist, pbufl: &pbufl, txbuf, fpdu_len); |
1394 | irdma_ieq_update_tcpip_info(buf: txbuf, len: fpdu_len, seqnum); |
1395 | |
1396 | crcptr = txbuf->data + fpdu_len - 4; |
1397 | mpacrc = *(u32 *)crcptr; |
1398 | if (ieq->check_crc) { |
1399 | status = irdma_ieq_check_mpacrc(desc: ieq->hash_desc, addr: txbuf->data, |
1400 | len: (fpdu_len - 4), val: mpacrc); |
1401 | if (status) { |
1402 | ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n" ); |
1403 | goto error; |
1404 | } |
1405 | } |
1406 | |
1407 | print_hex_dump_debug("IEQ: IEQ TX BUFFER" , DUMP_PREFIX_OFFSET, 16, 8, |
1408 | txbuf->mem.va, txbuf->totallen, false); |
1409 | if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) |
1410 | txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; |
1411 | txbuf->do_lpb = true; |
1412 | irdma_puda_send_buf(rsrc: ieq, buf: txbuf); |
1413 | pfpdu->rcv_nxt = seqnum + fpdu_len; |
1414 | return status; |
1415 | |
1416 | error: |
1417 | while (!list_empty(head: &pbufl)) { |
1418 | buf = list_last_entry(&pbufl, struct irdma_puda_buf, list); |
1419 | list_move(list: &buf->list, head: rxlist); |
1420 | } |
1421 | if (txbuf) |
1422 | irdma_puda_ret_bufpool(rsrc: ieq, buf: txbuf); |
1423 | |
1424 | return status; |
1425 | } |
1426 | |
1427 | /** |
1428 | * irdma_ieq_process_buf - process buffer rcvd for ieq |
1429 | * @ieq: ieq resource |
1430 | * @pfpdu: partial management per user qp |
1431 | * @buf: receive buffer |
1432 | */ |
1433 | static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, |
1434 | struct irdma_pfpdu *pfpdu, |
1435 | struct irdma_puda_buf *buf) |
1436 | { |
1437 | u16 fpdu_len = 0; |
1438 | u16 datalen = buf->datalen; |
1439 | u8 *datap = buf->data; |
1440 | u8 *crcptr; |
1441 | u16 ioffset = 0; |
1442 | u32 mpacrc; |
1443 | u32 seqnum = buf->seqnum; |
1444 | u16 len = 0; |
1445 | u16 full = 0; |
1446 | bool partial = false; |
1447 | struct irdma_puda_buf *txbuf; |
1448 | struct list_head *rxlist = &pfpdu->rxlist; |
1449 | int ret = 0; |
1450 | |
1451 | ioffset = (u16)(buf->data - (u8 *)buf->mem.va); |
1452 | while (datalen) { |
1453 | fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, rcv_seq: buf->seqnum); |
1454 | if (!fpdu_len) { |
1455 | ibdev_dbg(to_ibdev(ieq->dev), |
1456 | "IEQ: error bad fpdu len\n" ); |
1457 | list_add(new: &buf->list, head: rxlist); |
1458 | return -EINVAL; |
1459 | } |
1460 | |
1461 | if (datalen < fpdu_len) { |
1462 | partial = true; |
1463 | break; |
1464 | } |
1465 | crcptr = datap + fpdu_len - 4; |
1466 | mpacrc = *(u32 *)crcptr; |
1467 | if (ieq->check_crc) |
1468 | ret = irdma_ieq_check_mpacrc(desc: ieq->hash_desc, addr: datap, |
1469 | len: fpdu_len - 4, val: mpacrc); |
1470 | if (ret) { |
1471 | list_add(new: &buf->list, head: rxlist); |
1472 | ibdev_dbg(to_ibdev(ieq->dev), |
1473 | "ERR: IRDMA_ERR_MPA_CRC\n" ); |
1474 | return -EINVAL; |
1475 | } |
1476 | full++; |
1477 | pfpdu->fpdu_processed++; |
1478 | ieq->fpdu_processed++; |
1479 | datap += fpdu_len; |
1480 | len += fpdu_len; |
1481 | datalen -= fpdu_len; |
1482 | } |
1483 | if (full) { |
1484 | /* copy full pdu's in the txbuf and send them out */ |
1485 | txbuf = irdma_puda_get_bufpool(rsrc: ieq); |
1486 | if (!txbuf) { |
1487 | pfpdu->no_tx_bufs++; |
1488 | list_add(new: &buf->list, head: rxlist); |
1489 | return -ENOBUFS; |
1490 | } |
1491 | /* modify txbuf's buffer header */ |
1492 | irdma_ieq_setup_tx_buf(buf, txbuf); |
1493 | /* copy full fpdu's to new buffer */ |
1494 | if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { |
1495 | irdma_ieq_copy_to_txbuf(buf, txbuf, buf_offset: ioffset, |
1496 | txbuf_offset: txbuf->hdrlen, len); |
1497 | txbuf->totallen = txbuf->hdrlen + len; |
1498 | txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; |
1499 | } else { |
1500 | irdma_ieq_copy_to_txbuf(buf, txbuf, buf_offset: ioffset, |
1501 | txbuf_offset: buf->hdrlen, len); |
1502 | txbuf->totallen = buf->hdrlen + len; |
1503 | } |
1504 | irdma_ieq_update_tcpip_info(buf: txbuf, len, seqnum: buf->seqnum); |
1505 | print_hex_dump_debug("IEQ: IEQ TX BUFFER" , DUMP_PREFIX_OFFSET, |
1506 | 16, 8, txbuf->mem.va, txbuf->totallen, |
1507 | false); |
1508 | txbuf->do_lpb = true; |
1509 | irdma_puda_send_buf(rsrc: ieq, buf: txbuf); |
1510 | |
1511 | if (!datalen) { |
1512 | pfpdu->rcv_nxt = buf->seqnum + len; |
1513 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1514 | return 0; |
1515 | } |
1516 | buf->data = datap; |
1517 | buf->seqnum = seqnum + len; |
1518 | buf->datalen = datalen; |
1519 | pfpdu->rcv_nxt = buf->seqnum; |
1520 | } |
1521 | if (partial) |
1522 | return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); |
1523 | |
1524 | return 0; |
1525 | } |
1526 | |
1527 | /** |
1528 | * irdma_ieq_process_fpdus - process fpdu's buffers on its list |
1529 | * @qp: qp for which partial fpdus |
1530 | * @ieq: ieq resource |
1531 | */ |
1532 | void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, |
1533 | struct irdma_puda_rsrc *ieq) |
1534 | { |
1535 | struct irdma_pfpdu *pfpdu = &qp->pfpdu; |
1536 | struct list_head *rxlist = &pfpdu->rxlist; |
1537 | struct irdma_puda_buf *buf; |
1538 | int status; |
1539 | |
1540 | do { |
1541 | if (list_empty(head: rxlist)) |
1542 | break; |
1543 | buf = irdma_puda_get_listbuf(list: rxlist); |
1544 | if (!buf) { |
1545 | ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n" ); |
1546 | break; |
1547 | } |
1548 | if (buf->seqnum != pfpdu->rcv_nxt) { |
1549 | /* This could be out of order or missing packet */ |
1550 | pfpdu->out_of_order++; |
1551 | list_add(new: &buf->list, head: rxlist); |
1552 | break; |
1553 | } |
1554 | /* keep processing buffers from the head of the list */ |
1555 | status = irdma_ieq_process_buf(ieq, pfpdu, buf); |
1556 | if (status == -EINVAL) { |
1557 | pfpdu->mpa_crc_err = true; |
1558 | while (!list_empty(head: rxlist)) { |
1559 | buf = irdma_puda_get_listbuf(list: rxlist); |
1560 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1561 | pfpdu->crc_err++; |
1562 | ieq->crc_err++; |
1563 | } |
1564 | /* create CQP for AE */ |
1565 | irdma_ieq_mpa_crc_ae(dev: ieq->dev, qp); |
1566 | } |
1567 | } while (!status); |
1568 | } |
1569 | |
1570 | /** |
1571 | * irdma_ieq_create_ah - create an address handle for IEQ |
1572 | * @qp: qp pointer |
1573 | * @buf: buf received on IEQ used to create AH |
1574 | */ |
1575 | static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf) |
1576 | { |
1577 | struct irdma_ah_info ah_info = {}; |
1578 | |
1579 | qp->pfpdu.ah_buf = buf; |
1580 | irdma_puda_ieq_get_ah_info(qp, ah_info: &ah_info); |
1581 | return irdma_puda_create_ah(dev: qp->vsi->dev, ah_info: &ah_info, wait: false, |
1582 | type: IRDMA_PUDA_RSRC_TYPE_IEQ, cb_param: qp, |
1583 | ah: &qp->pfpdu.ah); |
1584 | } |
1585 | |
1586 | /** |
1587 | * irdma_ieq_handle_exception - handle qp's exception |
1588 | * @ieq: ieq resource |
1589 | * @qp: qp receiving excpetion |
1590 | * @buf: receive buffer |
1591 | */ |
1592 | static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq, |
1593 | struct irdma_sc_qp *qp, |
1594 | struct irdma_puda_buf *buf) |
1595 | { |
1596 | struct irdma_pfpdu *pfpdu = &qp->pfpdu; |
1597 | u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; |
1598 | u32 rcv_wnd = hw_host_ctx[23]; |
1599 | /* first partial seq # in q2 */ |
1600 | u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); |
1601 | struct list_head *rxlist = &pfpdu->rxlist; |
1602 | unsigned long flags = 0; |
1603 | u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev; |
1604 | |
1605 | print_hex_dump_debug("IEQ: IEQ RX BUFFER" , DUMP_PREFIX_OFFSET, 16, 8, |
1606 | buf->mem.va, buf->totallen, false); |
1607 | |
1608 | spin_lock_irqsave(&pfpdu->lock, flags); |
1609 | pfpdu->total_ieq_bufs++; |
1610 | if (pfpdu->mpa_crc_err) { |
1611 | pfpdu->crc_err++; |
1612 | goto error; |
1613 | } |
1614 | if (pfpdu->mode && fps != pfpdu->fps) { |
1615 | /* clean up qp as it is new partial sequence */ |
1616 | irdma_ieq_cleanup_qp(ieq, qp); |
1617 | ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n" ); |
1618 | pfpdu->mode = false; |
1619 | } |
1620 | |
1621 | if (!pfpdu->mode) { |
1622 | print_hex_dump_debug("IEQ: Q2 BUFFER" , DUMP_PREFIX_OFFSET, 16, |
1623 | 8, (u64 *)qp->q2_buf, 128, false); |
1624 | /* First_Partial_Sequence_Number check */ |
1625 | pfpdu->rcv_nxt = fps; |
1626 | pfpdu->fps = fps; |
1627 | pfpdu->mode = true; |
1628 | pfpdu->max_fpdu_data = (buf->ipv4) ? |
1629 | (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) : |
1630 | (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6); |
1631 | pfpdu->pmode_count++; |
1632 | ieq->pmode_count++; |
1633 | INIT_LIST_HEAD(list: rxlist); |
1634 | irdma_ieq_check_first_buf(buf, fps); |
1635 | } |
1636 | |
1637 | if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { |
1638 | pfpdu->bad_seq_num++; |
1639 | ieq->bad_seq_num++; |
1640 | goto error; |
1641 | } |
1642 | |
1643 | if (!list_empty(head: rxlist)) { |
1644 | if (buf->seqnum != pfpdu->nextseqnum) { |
1645 | irdma_send_ieq_ack(qp); |
1646 | /* throw away out-of-order, duplicates*/ |
1647 | goto error; |
1648 | } |
1649 | } |
1650 | /* Insert buf before head */ |
1651 | list_add_tail(new: &buf->list, head: rxlist); |
1652 | pfpdu->nextseqnum = buf->seqnum + buf->datalen; |
1653 | pfpdu->lastrcv_buf = buf; |
1654 | if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) { |
1655 | irdma_ieq_create_ah(qp, buf); |
1656 | if (!pfpdu->ah) |
1657 | goto error; |
1658 | goto exit; |
1659 | } |
1660 | if (hw_rev == IRDMA_GEN_1) |
1661 | irdma_ieq_process_fpdus(qp, ieq); |
1662 | else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid) |
1663 | irdma_ieq_process_fpdus(qp, ieq); |
1664 | exit: |
1665 | spin_unlock_irqrestore(lock: &pfpdu->lock, flags); |
1666 | |
1667 | return; |
1668 | |
1669 | error: |
1670 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1671 | spin_unlock_irqrestore(lock: &pfpdu->lock, flags); |
1672 | } |
1673 | |
1674 | /** |
1675 | * irdma_ieq_receive - received exception buffer |
1676 | * @vsi: VSI of device |
1677 | * @buf: exception buffer received |
1678 | */ |
1679 | static void irdma_ieq_receive(struct irdma_sc_vsi *vsi, |
1680 | struct irdma_puda_buf *buf) |
1681 | { |
1682 | struct irdma_puda_rsrc *ieq = vsi->ieq; |
1683 | struct irdma_sc_qp *qp = NULL; |
1684 | u32 wqe_idx = ieq->compl_rxwqe_idx; |
1685 | |
1686 | qp = irdma_ieq_get_qp(dev: vsi->dev, buf); |
1687 | if (!qp) { |
1688 | ieq->stats_bad_qp_id++; |
1689 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1690 | } else { |
1691 | irdma_ieq_handle_exception(ieq, qp, buf); |
1692 | } |
1693 | /* |
1694 | * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq() |
1695 | * on which wqe_idx to start replenish rq |
1696 | */ |
1697 | if (!ieq->rxq_invalid_cnt) |
1698 | ieq->rx_wqe_idx = wqe_idx; |
1699 | ieq->rxq_invalid_cnt++; |
1700 | } |
1701 | |
1702 | /** |
1703 | * irdma_ieq_tx_compl - put back after sending completed exception buffer |
1704 | * @vsi: sc VSI struct |
1705 | * @sqwrid: pointer to puda buffer |
1706 | */ |
1707 | static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid) |
1708 | { |
1709 | struct irdma_puda_rsrc *ieq = vsi->ieq; |
1710 | struct irdma_puda_buf *buf = sqwrid; |
1711 | |
1712 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1713 | } |
1714 | |
1715 | /** |
1716 | * irdma_ieq_cleanup_qp - qp is being destroyed |
1717 | * @ieq: ieq resource |
1718 | * @qp: all pending fpdu buffers |
1719 | */ |
1720 | void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp) |
1721 | { |
1722 | struct irdma_puda_buf *buf; |
1723 | struct irdma_pfpdu *pfpdu = &qp->pfpdu; |
1724 | struct list_head *rxlist = &pfpdu->rxlist; |
1725 | |
1726 | if (qp->pfpdu.ah) { |
1727 | irdma_puda_free_ah(dev: ieq->dev, ah: qp->pfpdu.ah); |
1728 | qp->pfpdu.ah = NULL; |
1729 | qp->pfpdu.ah_buf = NULL; |
1730 | } |
1731 | |
1732 | if (!pfpdu->mode) |
1733 | return; |
1734 | |
1735 | while (!list_empty(head: rxlist)) { |
1736 | buf = irdma_puda_get_listbuf(list: rxlist); |
1737 | irdma_puda_ret_bufpool(rsrc: ieq, buf); |
1738 | } |
1739 | } |
1740 | |