1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2015 - 2021 Intel Corporation */
3#include <linux/etherdevice.h>
4
5#include "osdep.h"
6#include "hmc.h"
7#include "defs.h"
8#include "type.h"
9#include "ws.h"
10#include "protos.h"
11
12/**
13 * irdma_get_qp_from_list - get next qp from a list
14 * @head: Listhead of qp's
15 * @qp: current qp
16 */
17struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
18 struct irdma_sc_qp *qp)
19{
20 struct list_head *lastentry;
21 struct list_head *entry = NULL;
22
23 if (list_empty(head))
24 return NULL;
25
26 if (!qp) {
27 entry = head->next;
28 } else {
29 lastentry = &qp->list;
30 entry = lastentry->next;
31 if (entry == head)
32 return NULL;
33 }
34
35 return container_of(entry, struct irdma_sc_qp, list);
36}
37
38/**
39 * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
40 * @vsi: the VSI struct pointer
41 * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
42 */
43void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
44{
45 struct irdma_sc_qp *qp = NULL;
46 u8 i;
47
48 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
49 mutex_lock(&vsi->qos[i].qos_mutex);
50 qp = irdma_get_qp_from_list(head: &vsi->qos[i].qplist, qp);
51 while (qp) {
52 if (op == IRDMA_OP_RESUME) {
53 if (!qp->dev->ws_add(vsi, i)) {
54 qp->qs_handle =
55 vsi->qos[qp->user_pri].qs_handle;
56 irdma_cqp_qp_suspend_resume(qp, cmd: op);
57 } else {
58 irdma_cqp_qp_suspend_resume(qp, cmd: op);
59 irdma_modify_qp_to_err(sc_qp: qp);
60 }
61 } else if (op == IRDMA_OP_SUSPEND) {
62 /* issue cqp suspend command */
63 if (!irdma_cqp_qp_suspend_resume(qp, cmd: op))
64 atomic_inc(v: &vsi->qp_suspend_reqs);
65 }
66 qp = irdma_get_qp_from_list(head: &vsi->qos[i].qplist, qp);
67 }
68 mutex_unlock(lock: &vsi->qos[i].qos_mutex);
69 }
70}
71
72static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
73 struct irdma_l2params *l2p)
74{
75 u8 i;
76
77 if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
78 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
79 vsi->qos[i].qs_handle = vsi->dev->qos[i].qs_handle;
80 vsi->qos[i].valid = true;
81 }
82
83 return;
84 }
85 vsi->qos_rel_bw = l2p->vsi_rel_bw;
86 vsi->qos_prio_type = l2p->vsi_prio_type;
87 vsi->dscp_mode = l2p->dscp_mode;
88 if (l2p->dscp_mode) {
89 memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
90 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
91 l2p->up2tc[i] = i;
92 }
93 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
94 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
95 vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
96 vsi->qos[i].traffic_class = l2p->up2tc[i];
97 vsi->qos[i].rel_bw =
98 l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
99 vsi->qos[i].prio_type =
100 l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
101 vsi->qos[i].valid = false;
102 }
103}
104
105/**
106 * irdma_change_l2params - given the new l2 parameters, change all qp
107 * @vsi: RDMA VSI pointer
108 * @l2params: New parameters from l2
109 */
110void irdma_change_l2params(struct irdma_sc_vsi *vsi,
111 struct irdma_l2params *l2params)
112{
113 if (l2params->mtu_changed) {
114 vsi->mtu = l2params->mtu;
115 if (vsi->ieq)
116 irdma_reinitialize_ieq(vsi);
117 }
118
119 if (!l2params->tc_changed)
120 return;
121
122 vsi->tc_change_pending = false;
123 irdma_set_qos_info(vsi, l2p: l2params);
124 irdma_sc_suspend_resume_qps(vsi, op: IRDMA_OP_RESUME);
125}
126
127/**
128 * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
129 * @qp: qp to be removed from qos
130 */
131void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
132{
133 struct irdma_sc_vsi *vsi = qp->vsi;
134
135 ibdev_dbg(to_ibdev(qp->dev),
136 "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
137 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
138 qp->on_qoslist);
139 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
140 if (qp->on_qoslist) {
141 qp->on_qoslist = false;
142 list_del(entry: &qp->list);
143 }
144 mutex_unlock(lock: &vsi->qos[qp->user_pri].qos_mutex);
145}
146
147/**
148 * irdma_qp_add_qos - called during setctx for qp to be added to qos
149 * @qp: qp to be added to qos
150 */
151void irdma_qp_add_qos(struct irdma_sc_qp *qp)
152{
153 struct irdma_sc_vsi *vsi = qp->vsi;
154
155 ibdev_dbg(to_ibdev(qp->dev),
156 "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
157 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
158 qp->on_qoslist);
159 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
160 if (!qp->on_qoslist) {
161 list_add(new: &qp->list, head: &vsi->qos[qp->user_pri].qplist);
162 qp->on_qoslist = true;
163 qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
164 }
165 mutex_unlock(lock: &vsi->qos[qp->user_pri].qos_mutex);
166}
167
168/**
169 * irdma_sc_pd_init - initialize sc pd struct
170 * @dev: sc device struct
171 * @pd: sc pd ptr
172 * @pd_id: pd_id for allocated pd
173 * @abi_ver: User/Kernel ABI version
174 */
175void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
176 int abi_ver)
177{
178 pd->pd_id = pd_id;
179 pd->abi_ver = abi_ver;
180 pd->dev = dev;
181}
182
183/**
184 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
185 * @cqp: struct for cqp hw
186 * @info: arp entry information
187 * @scratch: u64 saved to be used during cqp completion
188 * @post_sq: flag for cqp db to ring
189 */
190static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
191 struct irdma_add_arp_cache_entry_info *info,
192 u64 scratch, bool post_sq)
193{
194 __le64 *wqe;
195 u64 hdr;
196
197 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
198 if (!wqe)
199 return -ENOMEM;
200 set_64bit_val(wqe_words: wqe, byte_index: 8, val: info->reach_max);
201 set_64bit_val(wqe_words: wqe, byte_index: 16, val: ether_addr_to_u64(addr: info->mac_addr));
202
203 hdr = info->arp_index |
204 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
205 FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
206 FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
207 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
208 dma_wmb(); /* make sure WQE is written before valid bit is set */
209
210 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
211
212 print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
213 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
214 if (post_sq)
215 irdma_sc_cqp_post_sq(cqp);
216
217 return 0;
218}
219
220/**
221 * irdma_sc_del_arp_cache_entry - dele arp cache entry
222 * @cqp: struct for cqp hw
223 * @scratch: u64 saved to be used during cqp completion
224 * @arp_index: arp index to delete arp entry
225 * @post_sq: flag for cqp db to ring
226 */
227static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
228 u16 arp_index, bool post_sq)
229{
230 __le64 *wqe;
231 u64 hdr;
232
233 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
234 if (!wqe)
235 return -ENOMEM;
236
237 hdr = arp_index |
238 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
239 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
240 dma_wmb(); /* make sure WQE is written before valid bit is set */
241
242 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
243
244 print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
245 DUMP_PREFIX_OFFSET, 16, 8, wqe,
246 IRDMA_CQP_WQE_SIZE * 8, false);
247 if (post_sq)
248 irdma_sc_cqp_post_sq(cqp);
249
250 return 0;
251}
252
253/**
254 * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
255 * @cqp: struct for cqp hw
256 * @info: info for apbvt entry to add or delete
257 * @scratch: u64 saved to be used during cqp completion
258 * @post_sq: flag for cqp db to ring
259 */
260static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
261 struct irdma_apbvt_info *info,
262 u64 scratch, bool post_sq)
263{
264 __le64 *wqe;
265 u64 hdr;
266
267 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
268 if (!wqe)
269 return -ENOMEM;
270
271 set_64bit_val(wqe_words: wqe, byte_index: 16, val: info->port);
272
273 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
274 FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
275 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
276 dma_wmb(); /* make sure WQE is written before valid bit is set */
277
278 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
279
280 print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
281 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
282 if (post_sq)
283 irdma_sc_cqp_post_sq(cqp);
284
285 return 0;
286}
287
288/**
289 * irdma_sc_manage_qhash_table_entry - manage quad hash entries
290 * @cqp: struct for cqp hw
291 * @info: info for quad hash to manage
292 * @scratch: u64 saved to be used during cqp completion
293 * @post_sq: flag for cqp db to ring
294 *
295 * This is called before connection establishment is started.
296 * For passive connections, when listener is created, it will
297 * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
298 * ip address and tcp port. When SYN is received (passive
299 * connections) or sent (active connections), this routine is
300 * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
301 * and quad is passed in info.
302 *
303 * When iwarp connection is done and its state moves to RTS, the
304 * quad hash entry in the hardware will point to iwarp's qp
305 * number and requires no calls from the driver.
306 */
307static int
308irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
309 struct irdma_qhash_table_info *info,
310 u64 scratch, bool post_sq)
311{
312 __le64 *wqe;
313 u64 qw1 = 0;
314 u64 qw2 = 0;
315 u64 temp;
316 struct irdma_sc_vsi *vsi = info->vsi;
317
318 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
319 if (!wqe)
320 return -ENOMEM;
321
322 set_64bit_val(wqe_words: wqe, byte_index: 0, val: ether_addr_to_u64(addr: info->mac_addr));
323
324 qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
325 FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
326 if (info->ipv4_valid) {
327 set_64bit_val(wqe_words: wqe, byte_index: 48,
328 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
329 } else {
330 set_64bit_val(wqe_words: wqe, byte_index: 56,
331 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
332 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
333
334 set_64bit_val(wqe_words: wqe, byte_index: 48,
335 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
336 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
337 }
338 qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
339 vsi->qos[info->user_pri].qs_handle);
340 if (info->vlan_valid)
341 qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
342 set_64bit_val(wqe_words: wqe, byte_index: 16, val: qw2);
343 if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
344 qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
345 if (!info->ipv4_valid) {
346 set_64bit_val(wqe_words: wqe, byte_index: 40,
347 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
348 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
349 set_64bit_val(wqe_words: wqe, byte_index: 32,
350 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
351 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
352 } else {
353 set_64bit_val(wqe_words: wqe, byte_index: 32,
354 FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
355 }
356 }
357
358 set_64bit_val(wqe_words: wqe, byte_index: 8, val: qw1);
359 temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
360 FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
361 IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
362 FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
363 FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
364 FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
365 FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
366 dma_wmb(); /* make sure WQE is written before valid bit is set */
367
368 set_64bit_val(wqe_words: wqe, byte_index: 24, val: temp);
369
370 print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
371 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
372 if (post_sq)
373 irdma_sc_cqp_post_sq(cqp);
374
375 return 0;
376}
377
378/**
379 * irdma_sc_qp_init - initialize qp
380 * @qp: sc qp
381 * @info: initialization qp info
382 */
383int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
384{
385 int ret_code;
386 u32 pble_obj_cnt;
387 u16 wqe_size;
388
389 if (info->qp_uk_init_info.max_sq_frag_cnt >
390 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
391 info->qp_uk_init_info.max_rq_frag_cnt >
392 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
393 return -EINVAL;
394
395 qp->dev = info->pd->dev;
396 qp->vsi = info->vsi;
397 qp->ieq_qp = info->vsi->exception_lan_q;
398 qp->sq_pa = info->sq_pa;
399 qp->rq_pa = info->rq_pa;
400 qp->hw_host_ctx_pa = info->host_ctx_pa;
401 qp->q2_pa = info->q2_pa;
402 qp->shadow_area_pa = info->shadow_area_pa;
403 qp->q2_buf = info->q2;
404 qp->pd = info->pd;
405 qp->hw_host_ctx = info->host_ctx;
406 info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
407 ret_code = irdma_uk_qp_init(qp: &qp->qp_uk, info: &info->qp_uk_init_info);
408 if (ret_code)
409 return ret_code;
410
411 qp->virtual_map = info->virtual_map;
412 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
413
414 if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
415 (!info->qp_uk_init_info.srq_uk &&
416 info->virtual_map && info->rq_pa >= pble_obj_cnt))
417 return -EINVAL;
418
419 qp->llp_stream_handle = (void *)(-1);
420 qp->hw_sq_size = irdma_get_encoded_wqe_size(wqsize: qp->qp_uk.sq_ring.size,
421 queue_type: IRDMA_QUEUE_TYPE_SQ_RQ);
422 ibdev_dbg(to_ibdev(qp->dev),
423 "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
424 qp->hw_sq_size, qp->qp_uk.sq_ring.size);
425 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
426 wqe_size = IRDMA_WQE_SIZE_128;
427 else
428 ret_code = irdma_fragcnt_to_wqesize_rq(frag_cnt: qp->qp_uk.max_rq_frag_cnt,
429 wqe_size: &wqe_size);
430 if (ret_code)
431 return ret_code;
432
433 qp->hw_rq_size = irdma_get_encoded_wqe_size(wqsize: qp->qp_uk.rq_size *
434 (wqe_size / IRDMA_QP_WQE_MIN_SIZE), queue_type: IRDMA_QUEUE_TYPE_SQ_RQ);
435 ibdev_dbg(to_ibdev(qp->dev),
436 "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
437 qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
438 qp->sq_tph_val = info->sq_tph_val;
439 qp->rq_tph_val = info->rq_tph_val;
440 qp->sq_tph_en = info->sq_tph_en;
441 qp->rq_tph_en = info->rq_tph_en;
442 qp->rcv_tph_en = info->rcv_tph_en;
443 qp->xmit_tph_en = info->xmit_tph_en;
444 qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
445 qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
446
447 return 0;
448}
449
450/**
451 * irdma_sc_srq_init - init sc_srq structure
452 * @srq: srq sc struct
453 * @info: parameters for srq init
454 */
455int irdma_sc_srq_init(struct irdma_sc_srq *srq,
456 struct irdma_srq_init_info *info)
457{
458 u32 srq_size_quanta;
459 int ret_code;
460
461 ret_code = irdma_uk_srq_init(srq: &srq->srq_uk, info: &info->srq_uk_init_info);
462 if (ret_code)
463 return ret_code;
464
465 srq->dev = info->pd->dev;
466 srq->pd = info->pd;
467 srq->vsi = info->vsi;
468 srq->srq_pa = info->srq_pa;
469 srq->first_pm_pbl_idx = info->first_pm_pbl_idx;
470 srq->pasid = info->pasid;
471 srq->pasid_valid = info->pasid_valid;
472 srq->srq_limit = info->srq_limit;
473 srq->leaf_pbl_size = info->leaf_pbl_size;
474 srq->virtual_map = info->virtual_map;
475 srq->tph_en = info->tph_en;
476 srq->arm_limit_event = info->arm_limit_event;
477 srq->tph_val = info->tph_value;
478 srq->shadow_area_pa = info->shadow_area_pa;
479
480 /* Smallest SRQ size is 256B i.e. 8 quanta */
481 srq_size_quanta = max((u32)IRDMA_SRQ_MIN_QUANTA,
482 srq->srq_uk.srq_size *
483 srq->srq_uk.wqe_size_multiplier);
484 srq->hw_srq_size = irdma_get_encoded_wqe_size(wqsize: srq_size_quanta,
485 queue_type: IRDMA_QUEUE_TYPE_SRQ);
486
487 return 0;
488}
489
490/**
491 * irdma_sc_srq_create - send srq create CQP WQE
492 * @srq: srq sc struct
493 * @scratch: u64 saved to be used during cqp completion
494 * @post_sq: flag for cqp db to ring
495 */
496static int irdma_sc_srq_create(struct irdma_sc_srq *srq, u64 scratch,
497 bool post_sq)
498{
499 struct irdma_sc_cqp *cqp;
500 __le64 *wqe;
501 u64 hdr;
502
503 cqp = srq->pd->dev->cqp;
504 if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
505 srq->srq_uk.srq_id >
506 (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
507 return -EINVAL;
508
509 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
510 if (!wqe)
511 return -ENOMEM;
512
513 set_64bit_val(wqe_words: wqe, byte_index: 0,
514 FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, srq->srq_limit) |
515 FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
516 FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
517 set_64bit_val(wqe_words: wqe, byte_index: 8, val: (uintptr_t)srq);
518 set_64bit_val(wqe_words: wqe, byte_index: 16,
519 FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
520 set_64bit_val(wqe_words: wqe, byte_index: 32,
521 FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
522 srq->srq_pa >>
523 IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
524 set_64bit_val(wqe_words: wqe, byte_index: 40,
525 FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
526 srq->shadow_area_pa >>
527 IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
528 set_64bit_val(wqe_words: wqe, byte_index: 48,
529 FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
530 srq->first_pm_pbl_idx));
531
532 hdr = srq->srq_uk.srq_id |
533 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_SRQ) |
534 FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
535 FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
536 FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
537 srq->arm_limit_event) |
538 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
539
540 dma_wmb(); /* make sure WQE is written before valid bit is set */
541
542 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
543
544 print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
545 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
546 if (post_sq)
547 irdma_sc_cqp_post_sq(cqp);
548
549 return 0;
550}
551
552/**
553 * irdma_sc_srq_modify - send modify_srq CQP WQE
554 * @srq: srq sc struct
555 * @info: parameters for srq modification
556 * @scratch: u64 saved to be used during cqp completion
557 * @post_sq: flag for cqp db to ring
558 */
559static int irdma_sc_srq_modify(struct irdma_sc_srq *srq,
560 struct irdma_modify_srq_info *info, u64 scratch,
561 bool post_sq)
562{
563 struct irdma_sc_cqp *cqp;
564 __le64 *wqe;
565 u64 hdr;
566
567 cqp = srq->dev->cqp;
568 if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
569 srq->srq_uk.srq_id >
570 (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
571 return -EINVAL;
572
573 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
574 if (!wqe)
575 return -ENOMEM;
576
577 set_64bit_val(wqe_words: wqe, byte_index: 0,
578 FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, info->srq_limit) |
579 FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
580 FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
581 set_64bit_val(wqe_words: wqe, byte_index: 8,
582 FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQCTX, srq->srq_uk.srq_id));
583 set_64bit_val(wqe_words: wqe, byte_index: 16,
584 FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
585 set_64bit_val(wqe_words: wqe, byte_index: 32,
586 FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
587 srq->srq_pa >>
588 IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
589 set_64bit_val(wqe_words: wqe, byte_index: 40,
590 FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
591 srq->shadow_area_pa >>
592 IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
593 set_64bit_val(wqe_words: wqe, byte_index: 48,
594 FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
595 srq->first_pm_pbl_idx));
596
597 hdr = srq->srq_uk.srq_id |
598 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_SRQ) |
599 FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
600 FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
601 FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
602 info->arm_limit_event) |
603 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
604 dma_wmb(); /* make sure WQE is written before valid bit is set */
605
606 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
607
608 print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
609 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
610 if (post_sq)
611 irdma_sc_cqp_post_sq(cqp);
612
613 return 0;
614}
615
616/**
617 * irdma_sc_srq_destroy - send srq_destroy CQP WQE
618 * @srq: srq sc struct
619 * @scratch: u64 saved to be used during cqp completion
620 * @post_sq: flag for cqp db to ring
621 */
622static int irdma_sc_srq_destroy(struct irdma_sc_srq *srq, u64 scratch,
623 bool post_sq)
624{
625 struct irdma_sc_cqp *cqp;
626 __le64 *wqe;
627 u64 hdr;
628
629 cqp = srq->dev->cqp;
630
631 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
632 if (!wqe)
633 return -ENOMEM;
634
635 set_64bit_val(wqe_words: wqe, byte_index: 8, val: (uintptr_t)srq);
636
637 hdr = srq->srq_uk.srq_id |
638 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_SRQ) |
639 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
640 dma_wmb(); /* make sure WQE is written before valid bit is set */
641
642 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
643
644 print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
645 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
646 if (post_sq)
647 irdma_sc_cqp_post_sq(cqp);
648
649 return 0;
650}
651
652/**
653 * irdma_sc_qp_create - create qp
654 * @qp: sc qp
655 * @info: qp create info
656 * @scratch: u64 saved to be used during cqp completion
657 * @post_sq: flag for cqp db to ring
658 */
659int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
660 u64 scratch, bool post_sq)
661{
662 struct irdma_sc_cqp *cqp;
663 __le64 *wqe;
664 u64 hdr;
665
666 cqp = qp->dev->cqp;
667 if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
668 qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
669 return -EINVAL;
670
671 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
672 if (!wqe)
673 return -ENOMEM;
674
675 set_64bit_val(wqe_words: wqe, byte_index: 16, val: qp->hw_host_ctx_pa);
676 set_64bit_val(wqe_words: wqe, byte_index: 40, val: qp->shadow_area_pa);
677
678 hdr = qp->qp_uk.qp_id |
679 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
680 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
681 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
682 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
683 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
684 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
685 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
686 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
687 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
688 info->arp_cache_idx_valid) |
689 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
690 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
691 dma_wmb(); /* make sure WQE is written before valid bit is set */
692
693 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
694
695 print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
696 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
697 if (post_sq)
698 irdma_sc_cqp_post_sq(cqp);
699
700 return 0;
701}
702
703/**
704 * irdma_sc_qp_modify - modify qp cqp wqe
705 * @qp: sc qp
706 * @info: modify qp info
707 * @scratch: u64 saved to be used during cqp completion
708 * @post_sq: flag for cqp db to ring
709 */
710int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
711 u64 scratch, bool post_sq)
712{
713 __le64 *wqe;
714 struct irdma_sc_cqp *cqp;
715 u64 hdr;
716 u8 term_actions = 0;
717 u8 term_len = 0;
718
719 cqp = qp->dev->cqp;
720 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
721 if (!wqe)
722 return -ENOMEM;
723
724 if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
725 if (info->dont_send_fin)
726 term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
727 if (info->dont_send_term)
728 term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
729 if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
730 term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
731 term_len = info->termlen;
732 }
733
734 set_64bit_val(wqe_words: wqe, byte_index: 8,
735 FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
736 FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
737 set_64bit_val(wqe_words: wqe, byte_index: 16, val: qp->hw_host_ctx_pa);
738 set_64bit_val(wqe_words: wqe, byte_index: 40, val: qp->shadow_area_pa);
739
740 hdr = qp->qp_uk.qp_id |
741 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
742 FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
743 FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
744 FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
745 info->cached_var_valid) |
746 FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
747 FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
748 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
749 FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
750 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
751 FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
752 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
753 info->remove_hash_idx) |
754 FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
755 FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
756 FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
757 info->arp_cache_idx_valid) |
758 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
759 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
760 dma_wmb(); /* make sure WQE is written before valid bit is set */
761
762 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
763
764 print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
765 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
766 if (post_sq)
767 irdma_sc_cqp_post_sq(cqp);
768
769 return 0;
770}
771
772/**
773 * irdma_sc_qp_destroy - cqp destroy qp
774 * @qp: sc qp
775 * @scratch: u64 saved to be used during cqp completion
776 * @remove_hash_idx: flag if to remove hash idx
777 * @ignore_mw_bnd: memory window bind flag
778 * @post_sq: flag for cqp db to ring
779 */
780int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
781 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
782{
783 __le64 *wqe;
784 struct irdma_sc_cqp *cqp;
785 u64 hdr;
786
787 cqp = qp->dev->cqp;
788 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
789 if (!wqe)
790 return -ENOMEM;
791
792 set_64bit_val(wqe_words: wqe, byte_index: 16, val: qp->hw_host_ctx_pa);
793 set_64bit_val(wqe_words: wqe, byte_index: 40, val: qp->shadow_area_pa);
794
795 hdr = qp->qp_uk.qp_id |
796 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
797 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
798 FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
799 FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
800 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
801 dma_wmb(); /* make sure WQE is written before valid bit is set */
802
803 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
804
805 print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
806 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
807 if (post_sq)
808 irdma_sc_cqp_post_sq(cqp);
809
810 return 0;
811}
812
813/**
814 * irdma_sc_get_encoded_ird_size -
815 * @ird_size: IRD size
816 * The ird from the connection is rounded to a supported HW setting and then encoded
817 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
818 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
819 */
820static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
821{
822 switch (ird_size ?
823 roundup_pow_of_two(2 * ird_size) : 4) {
824 case 256:
825 return IRDMA_IRD_HW_SIZE_256;
826 case 128:
827 return IRDMA_IRD_HW_SIZE_128;
828 case 64:
829 case 32:
830 return IRDMA_IRD_HW_SIZE_64;
831 case 16:
832 case 8:
833 return IRDMA_IRD_HW_SIZE_16;
834 case 4:
835 default:
836 break;
837 }
838
839 return IRDMA_IRD_HW_SIZE_4;
840}
841
842/**
843 * irdma_sc_qp_setctx_roce_gen_2 - set qp's context
844 * @qp: sc qp
845 * @qp_ctx: context ptr
846 * @info: ctx info
847 */
848static void irdma_sc_qp_setctx_roce_gen_2(struct irdma_sc_qp *qp,
849 __le64 *qp_ctx,
850 struct irdma_qp_host_ctx_info *info)
851{
852 struct irdma_roce_offload_info *roce_info;
853 struct irdma_udp_offload_info *udp;
854 u8 push_mode_en;
855 u32 push_idx;
856
857 roce_info = info->roce_info;
858 udp = info->udp_info;
859 qp->user_pri = info->user_pri;
860 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
861 push_mode_en = 0;
862 push_idx = 0;
863 } else {
864 push_mode_en = 1;
865 push_idx = qp->push_idx;
866 }
867 set_64bit_val(wqe_words: qp_ctx, byte_index: 0,
868 FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
869 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
870 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
871 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
872 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
873 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
874 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
875 FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
876 FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
877 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
878 FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
879 FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
880 FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
881 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
882 set_64bit_val(wqe_words: qp_ctx, byte_index: 8, val: qp->sq_pa);
883 set_64bit_val(wqe_words: qp_ctx, byte_index: 16, val: qp->rq_pa);
884 if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
885 !(udp->tos & 0x03))
886 udp->tos |= ECN_CODE_PT_VAL;
887 set_64bit_val(wqe_words: qp_ctx, byte_index: 24,
888 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
889 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
890 FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
891 FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
892 FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
893 set_64bit_val(wqe_words: qp_ctx, byte_index: 32,
894 FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
895 FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
896 set_64bit_val(wqe_words: qp_ctx, byte_index: 40,
897 FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
898 FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
899 set_64bit_val(wqe_words: qp_ctx, byte_index: 48,
900 FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
901 FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
902 FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
903 set_64bit_val(wqe_words: qp_ctx, byte_index: 56,
904 FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
905 FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
906 FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
907 FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
908 set_64bit_val(wqe_words: qp_ctx, byte_index: 64,
909 FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
910 FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
911 set_64bit_val(wqe_words: qp_ctx, byte_index: 80,
912 FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
913 FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
914 set_64bit_val(wqe_words: qp_ctx, byte_index: 88,
915 FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
916 set_64bit_val(wqe_words: qp_ctx, byte_index: 96,
917 FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
918 FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
919 set_64bit_val(wqe_words: qp_ctx, byte_index: 112,
920 FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
921 set_64bit_val(wqe_words: qp_ctx, byte_index: 128,
922 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
923 FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
924 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
925 FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
926 set_64bit_val(wqe_words: qp_ctx, byte_index: 136,
927 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
928 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
929 set_64bit_val(wqe_words: qp_ctx, byte_index: 144,
930 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
931 set_64bit_val(wqe_words: qp_ctx, byte_index: 152, val: ether_addr_to_u64(addr: roce_info->mac_addr) << 16);
932 set_64bit_val(wqe_words: qp_ctx, byte_index: 160,
933 FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
934 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
935 FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
936 FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
937 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
938 FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
939 FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
940 FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
941 FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
942 FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
943 FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
944 FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
945 FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
946 set_64bit_val(wqe_words: qp_ctx, byte_index: 168,
947 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
948 set_64bit_val(wqe_words: qp_ctx, byte_index: 176,
949 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
950 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
951 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
952 set_64bit_val(wqe_words: qp_ctx, byte_index: 184,
953 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
954 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
955 set_64bit_val(wqe_words: qp_ctx, byte_index: 192,
956 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
957 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
958 set_64bit_val(wqe_words: qp_ctx, byte_index: 200,
959 FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
960 FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
961 set_64bit_val(wqe_words: qp_ctx, byte_index: 208,
962 FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
963
964 print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
965 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
966}
967
968/**
969 * irdma_sc_get_encoded_ird_size_gen_3 - get encoded IRD size for GEN 3
970 * @ird_size: IRD size
971 * The ird from the connection is rounded to a supported HW setting and then encoded
972 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
973 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input.
974 */
975static u8 irdma_sc_get_encoded_ird_size_gen_3(u16 ird_size)
976{
977 switch (ird_size ?
978 roundup_pow_of_two(2 * ird_size) : 4) {
979 case 4096:
980 return IRDMA_IRD_HW_SIZE_4096_GEN3;
981 case 2048:
982 return IRDMA_IRD_HW_SIZE_2048_GEN3;
983 case 1024:
984 return IRDMA_IRD_HW_SIZE_1024_GEN3;
985 case 512:
986 return IRDMA_IRD_HW_SIZE_512_GEN3;
987 case 256:
988 return IRDMA_IRD_HW_SIZE_256_GEN3;
989 case 128:
990 return IRDMA_IRD_HW_SIZE_128_GEN3;
991 case 64:
992 return IRDMA_IRD_HW_SIZE_64_GEN3;
993 case 32:
994 return IRDMA_IRD_HW_SIZE_32_GEN3;
995 case 16:
996 return IRDMA_IRD_HW_SIZE_16_GEN3;
997 case 8:
998 return IRDMA_IRD_HW_SIZE_8_GEN3;
999 case 4:
1000 default:
1001 break;
1002 }
1003
1004 return IRDMA_IRD_HW_SIZE_4_GEN3;
1005}
1006
1007/**
1008 * irdma_sc_qp_setctx_roce_gen_3 - set qp's context
1009 * @qp: sc qp
1010 * @qp_ctx: context ptr
1011 * @info: ctx info
1012 */
1013static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
1014 __le64 *qp_ctx,
1015 struct irdma_qp_host_ctx_info *info)
1016{
1017 struct irdma_roce_offload_info *roce_info = info->roce_info;
1018 struct irdma_udp_offload_info *udp = info->udp_info;
1019 u64 qw0, qw3, qw7 = 0, qw8 = 0;
1020 u8 push_mode_en;
1021 u32 push_idx;
1022
1023 qp->user_pri = info->user_pri;
1024 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
1025 push_mode_en = 0;
1026 push_idx = 0;
1027 } else {
1028 push_mode_en = 1;
1029 push_idx = qp->push_idx;
1030 }
1031
1032 qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
1033 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
1034 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
1035 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
1036 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
1037 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
1038 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
1039 FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
1040 FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
1041 FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
1042 FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
1043 FIELD_PREP(IRDMAQPC_USE_SRQ, !qp->qp_uk.srq_uk ? 0 : 1) |
1044 FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag);
1045 set_64bit_val(wqe_words: qp_ctx, byte_index: 0, val: qw0);
1046 set_64bit_val(wqe_words: qp_ctx, byte_index: 8, val: qp->sq_pa);
1047 set_64bit_val(wqe_words: qp_ctx, byte_index: 16, val: qp->rq_pa);
1048 qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
1049 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
1050 FIELD_PREP(IRDMAQPC_TTL, udp->ttl) |
1051 FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
1052 FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
1053 FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port);
1054 set_64bit_val(wqe_words: qp_ctx, byte_index: 24, val: qw3);
1055 set_64bit_val(wqe_words: qp_ctx, byte_index: 32,
1056 FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
1057 FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
1058 set_64bit_val(wqe_words: qp_ctx, byte_index: 40,
1059 FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
1060 FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
1061 set_64bit_val(wqe_words: qp_ctx, byte_index: 48,
1062 FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
1063 FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
1064 FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
1065 qw7 = FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
1066 FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
1067 FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label);
1068 set_64bit_val(wqe_words: qp_ctx, byte_index: 56, val: qw7);
1069 qw8 = FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
1070 FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp);
1071 set_64bit_val(wqe_words: qp_ctx, byte_index: 64, val: qw8);
1072 set_64bit_val(wqe_words: qp_ctx, byte_index: 80,
1073 FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
1074 FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
1075 set_64bit_val(wqe_words: qp_ctx, byte_index: 88,
1076 FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
1077 set_64bit_val(wqe_words: qp_ctx, byte_index: 96,
1078 FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
1079 FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
1080 set_64bit_val(wqe_words: qp_ctx, byte_index: 112,
1081 FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
1082 set_64bit_val(wqe_words: qp_ctx, byte_index: 128,
1083 FIELD_PREP(IRDMAQPC_MINRNR_TIMER, udp->min_rnr_timer) |
1084 FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
1085 FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
1086 FIELD_PREP(IRDMAQPC_RNRNAK_TMR, udp->rnr_nak_tmr) |
1087 FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
1088 set_64bit_val(wqe_words: qp_ctx, byte_index: 136,
1089 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
1090 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
1091 set_64bit_val(wqe_words: qp_ctx, byte_index: 152,
1092 FIELD_PREP(IRDMAQPC_MACADDRESS,
1093 ether_addr_to_u64(roce_info->mac_addr)) |
1094 FIELD_PREP(IRDMAQPC_LOCALACKTIMEOUT,
1095 roce_info->local_ack_timeout));
1096 set_64bit_val(wqe_words: qp_ctx, byte_index: 160,
1097 FIELD_PREP(IRDMAQPC_ORDSIZE_GEN3, roce_info->ord_size) |
1098 FIELD_PREP(IRDMAQPC_IRDSIZE_GEN3,
1099 irdma_sc_get_encoded_ird_size_gen_3(roce_info->ird_size)) |
1100 FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
1101 FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
1102 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE,
1103 info->stats_idx_valid) |
1104 FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
1105 FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
1106 FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
1107 FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
1108 FIELD_PREP(IRDMAQPC_FW_CC_ENABLE,
1109 roce_info->fw_cc_enable) |
1110 FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE,
1111 roce_info->udprivcq_en) |
1112 FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
1113 FIELD_PREP(IRDMAQPC_REMOTE_ATOMIC_EN,
1114 info->remote_atomics_en) |
1115 FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
1116 set_64bit_val(wqe_words: qp_ctx, byte_index: 168,
1117 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
1118 set_64bit_val(wqe_words: qp_ctx, byte_index: 176,
1119 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
1120 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
1121 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
1122 set_64bit_val(wqe_words: qp_ctx, byte_index: 184,
1123 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
1124 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
1125 set_64bit_val(wqe_words: qp_ctx, byte_index: 192,
1126 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
1127 FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
1128 set_64bit_val(wqe_words: qp_ctx, byte_index: 200,
1129 FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
1130 FIELD_PREP(IRDMAQPC_SRQ_ID,
1131 !qp->qp_uk.srq_uk ?
1132 0 : qp->qp_uk.srq_uk->srq_id) |
1133 FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
1134 set_64bit_val(wqe_words: qp_ctx, byte_index: 208, val: roce_info->pd_id |
1135 FIELD_PREP(IRDMAQPC_STAT_INDEX_GEN3, info->stats_idx) |
1136 FIELD_PREP(IRDMAQPC_PKT_LIMIT, qp->pkt_limit));
1137
1138 print_hex_dump_debug("WQE: QP_HOST ROCE CTX WQE", DUMP_PREFIX_OFFSET,
1139 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
1140}
1141
1142void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1143 struct irdma_qp_host_ctx_info *info)
1144{
1145 if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
1146 irdma_sc_qp_setctx_roce_gen_2(qp, qp_ctx, info);
1147 else
1148 irdma_sc_qp_setctx_roce_gen_3(qp, qp_ctx, info);
1149}
1150
1151/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
1152 * @cqp: struct for cqp hw
1153 * @scratch: u64 saved to be used during cqp completion
1154 * @post_sq: flag for cqp db to ring
1155 */
1156static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
1157 bool post_sq)
1158{
1159 __le64 *wqe;
1160 u64 hdr;
1161
1162 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1163 if (!wqe)
1164 return -ENOMEM;
1165
1166 hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
1167 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
1168 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1169
1170 dma_wmb(); /* make sure WQE is written before valid bit is set */
1171
1172 set_64bit_val(wqe_words: wqe, byte_index: 24, val: hdr);
1173
1174 print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
1175 DUMP_PREFIX_OFFSET, 16, 8, wqe,
1176 IRDMA_CQP_WQE_SIZE * 8, false);
1177
1178 if (post_sq)
1179 irdma_sc_cqp_post_sq(cqp);
1180 return 0;
1181}
1182
1183/**
1184 * irdma_sc_add_local_mac_entry - add mac enry
1185 * @cqp: struct for cqp hw
1186 * @info:mac addr info
1187 * @scratch: u64 saved to be used during cqp completion
1188 * @post_sq: flag for cqp db to ring
1189 */
1190static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
1191 struct irdma_local_mac_entry_info *info,
1192 u64 scratch, bool post_sq)
1193{
1194 __le64 *wqe;
1195 u64 header;
1196
1197 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1198 if (!wqe)
1199 return -ENOMEM;
1200
1201 set_64bit_val(wqe_words: wqe, byte_index: 32, val: ether_addr_to_u64(addr: info->mac_addr));
1202
1203 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
1204 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
1205 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
1206 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1207
1208 dma_wmb(); /* make sure WQE is written before valid bit is set */
1209
1210 set_64bit_val(wqe_words: wqe, byte_index: 24, val: header);
1211
1212 print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
1213 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1214
1215 if (post_sq)
1216 irdma_sc_cqp_post_sq(cqp);
1217 return 0;
1218}
1219
1220/**
1221 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
1222 * @cqp: struct for cqp hw
1223 * @scratch: u64 saved to be used during cqp completion
1224 * @entry_idx: index of mac entry
1225 * @ignore_ref_count: to force mac adde delete
1226 * @post_sq: flag for cqp db to ring
1227 */
1228static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
1229 u16 entry_idx, u8 ignore_ref_count,
1230 bool post_sq)
1231{
1232 __le64 *wqe;
1233 u64 header;
1234
1235 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1236 if (!wqe)
1237 return -ENOMEM;
1238 header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
1239 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
1240 IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
1241 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
1242 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
1243 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
1244
1245 dma_wmb(); /* make sure WQE is written before valid bit is set */
1246
1247 set_64bit_val(wqe_words: wqe, byte_index: 24, val: header);
1248
1249 print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
1250 DUMP_PREFIX_OFFSET, 16, 8, wqe,
1251 IRDMA_CQP_WQE_SIZE * 8, false);
1252
1253 if (post_sq)
1254 irdma_sc_cqp_post_sq(cqp);
1255 return 0;
1256}
1257
1258/**
1259 * irdma_sc_qp_setctx - set qp's context
1260 * @qp: sc qp
1261 * @qp_ctx: context ptr
1262 * @info: ctx info
1263 */
1264void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1265 struct irdma_qp_host_ctx_info *info)
1266{
1267 struct irdma_iwarp_offload_info *iw;
1268 struct irdma_tcp_offload_info *tcp;
1269 struct irdma_sc_dev *dev;
1270 u8 push_mode_en;
1271 u32 push_idx;
1272 u64 qw0, qw3, qw7 = 0, qw16 = 0;
1273 u64 mac = 0;
1274
1275 iw = info->iwarp_info;
1276 tcp = info->tcp_info;
1277 dev = qp->dev;
1278 if (iw->rcv_mark_en) {
1279 qp->pfpdu.marker_len = 4;
1280 qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
1281 }
1282 qp->user_pri = info->user_pri;
1283 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
1284 push_mode_en = 0;
1285 push_idx = 0;
1286 } else {
1287 push_mode_en = 1;
1288 push_idx = qp->push_idx;
1289 }
1290 qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
1291 FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
1292 FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
1293 FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
1294 FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
1295 FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
1296 FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
1297
1298 set_64bit_val(wqe_words: qp_ctx, byte_index: 8, val: qp->sq_pa);
1299 set_64bit_val(wqe_words: qp_ctx, byte_index: 16, val: qp->rq_pa);
1300
1301 qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
1302 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
1303 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1304 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
1305 qp->src_mac_addr_idx);
1306 set_64bit_val(wqe_words: qp_ctx, byte_index: 136,
1307 FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
1308 FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
1309 set_64bit_val(wqe_words: qp_ctx, byte_index: 168,
1310 FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
1311 set_64bit_val(wqe_words: qp_ctx, byte_index: 176,
1312 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
1313 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
1314 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
1315 FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
1316 if (info->iwarp_info_valid) {
1317 qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
1318 FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
1319 FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
1320 FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
1321 FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
1322 FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
1323 FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
1324 iw->err_rq_idx_valid);
1325 qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
1326 qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
1327 FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
1328 set_64bit_val(wqe_words: qp_ctx, byte_index: 144,
1329 FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
1330 FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
1331
1332 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1333 mac = ether_addr_to_u64(addr: iw->mac_addr);
1334
1335 set_64bit_val(wqe_words: qp_ctx, byte_index: 152,
1336 val: mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
1337 set_64bit_val(wqe_words: qp_ctx, byte_index: 160,
1338 FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
1339 FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
1340 FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
1341 FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
1342 FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
1343 FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
1344 FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
1345 FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
1346 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
1347 FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
1348 FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
1349 FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
1350 FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
1351 FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
1352 FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
1353 FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
1354 }
1355 if (info->tcp_info_valid) {
1356 qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
1357 FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
1358 FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
1359 tcp->insert_vlan_tag) |
1360 FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
1361 FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
1362 FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
1363 FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
1364
1365 if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
1366 tcp->tos |= ECN_CODE_PT_VAL;
1367
1368 qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
1369 FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
1370 FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
1371 FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
1372 FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
1373 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
1374 qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
1375
1376 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
1377 }
1378 set_64bit_val(wqe_words: qp_ctx, byte_index: 32,
1379 FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
1380 FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
1381 set_64bit_val(wqe_words: qp_ctx, byte_index: 40,
1382 FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
1383 FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
1384 set_64bit_val(wqe_words: qp_ctx, byte_index: 48,
1385 FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
1386 FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
1387 FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
1388 FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
1389 qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
1390 FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
1391