1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/iopoll.h>
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/workqueue.h>
40#include <net/addrconf.h>
41#include <rdma/ib_addr.h>
42#include <rdma/ib_cache.h>
43#include <rdma/ib_umem.h>
44#include <rdma/uverbs_ioctl.h>
45
46#include "hclge_main.h"
47#include "hns_roce_common.h"
48#include "hns_roce_device.h"
49#include "hns_roce_cmd.h"
50#include "hns_roce_hem.h"
51#include "hns_roce_hw_v2.h"
52#include "hns_roce_bond.h"
53
54#define CREATE_TRACE_POINTS
55#include "hns_roce_trace.h"
56
57enum {
58 CMD_RST_PRC_OTHERS,
59 CMD_RST_PRC_SUCCESS,
60 CMD_RST_PRC_EBUSY,
61};
62
63enum ecc_resource_type {
64 ECC_RESOURCE_QPC,
65 ECC_RESOURCE_CQC,
66 ECC_RESOURCE_MPT,
67 ECC_RESOURCE_SRQC,
68 ECC_RESOURCE_GMV,
69 ECC_RESOURCE_QPC_TIMER,
70 ECC_RESOURCE_CQC_TIMER,
71 ECC_RESOURCE_SCCC,
72 ECC_RESOURCE_COUNT,
73};
74
75static const struct {
76 const char *name;
77 u8 read_bt0_op;
78 u8 write_bt0_op;
79} fmea_ram_res[] = {
80 { "ECC_RESOURCE_QPC",
81 HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
82 { "ECC_RESOURCE_CQC",
83 HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
84 { "ECC_RESOURCE_MPT",
85 HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
86 { "ECC_RESOURCE_SRQC",
87 HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
88 /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
89 { "ECC_RESOURCE_GMV",
90 0, 0 },
91 { "ECC_RESOURCE_QPC_TIMER",
92 HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
93 { "ECC_RESOURCE_CQC_TIMER",
94 HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
95 { "ECC_RESOURCE_SCCC",
96 HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
97};
98
99static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
100 struct ib_sge *sg)
101{
102 dseg->lkey = cpu_to_le32(sg->lkey);
103 dseg->addr = cpu_to_le64(sg->addr);
104 dseg->len = cpu_to_le32(sg->length);
105}
106
107/*
108 * mapped-value = 1 + real-value
109 * The hns wr opcode real value is start from 0, In order to distinguish between
110 * initialized and uninitialized map values, we plus 1 to the actual value when
111 * defining the mapping, so that the validity can be identified by checking the
112 * mapped value is greater than 0.
113 */
114#define HR_OPC_MAP(ib_key, hr_key) \
115 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
116
117static const u32 hns_roce_op_code[] = {
118 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
119 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
120 HR_OPC_MAP(SEND, SEND),
121 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
122 HR_OPC_MAP(RDMA_READ, RDMA_READ),
123 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
124 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
125 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
126 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
127 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
128 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
129};
130
131static u32 to_hr_opcode(u32 ib_opcode)
132{
133 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
134 return HNS_ROCE_V2_WQE_OP_MASK;
135
136 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
137 HNS_ROCE_V2_WQE_OP_MASK;
138}
139
140static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
141 const struct ib_reg_wr *wr)
142{
143 struct hns_roce_wqe_frmr_seg *fseg =
144 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
145 struct hns_roce_mr *mr = to_hr_mr(ibmr: wr->mr);
146 u64 pbl_ba;
147
148 /* use ib_access_flags */
149 hr_reg_write_bool(fseg, FRMR_BIND_EN, 0);
150 hr_reg_write_bool(fseg, FRMR_ATOMIC,
151 wr->access & IB_ACCESS_REMOTE_ATOMIC);
152 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
153 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
154 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
155
156 /* Data structure reuse may lead to confusion */
157 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
158 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
159 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
160
161 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
162 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
163 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
164 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
165
166 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
167 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
168 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
169 hr_reg_clear(fseg, FRMR_BLK_MODE);
170 hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
171 hr_reg_clear(fseg, FRMR_ZBVA);
172}
173
174static void set_atomic_seg(const struct ib_send_wr *wr,
175 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
176 unsigned int valid_num_sge)
177{
178 struct hns_roce_v2_wqe_data_seg *dseg =
179 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
180 struct hns_roce_wqe_atomic_seg *aseg =
181 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
182
183 set_data_seg_v2(dseg, sg: wr->sg_list);
184
185 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
186 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
187 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
188 } else {
189 aseg->fetchadd_swap_data =
190 cpu_to_le64(atomic_wr(wr)->compare_add);
191 aseg->cmp_data = 0;
192 }
193
194 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
195}
196
197static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
198 const struct ib_send_wr *wr,
199 unsigned int *sge_idx, u32 msg_len)
200{
201 struct ib_device *ibdev = &(to_hr_dev(ib_dev: qp->ibqp.device))->ib_dev;
202 unsigned int left_len_in_pg;
203 unsigned int idx = *sge_idx;
204 unsigned int i = 0;
205 unsigned int len;
206 void *addr;
207 void *dseg;
208
209 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
210 ibdev_err(ibdev,
211 format: "no enough extended sge space for inline data.\n");
212 return -EINVAL;
213 }
214
215 dseg = hns_roce_get_extend_sge(hr_qp: qp, n: idx & (qp->sge.sge_cnt - 1));
216 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
217 len = wr->sg_list[0].length;
218 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
219
220 /* When copying data to extended sge space, the left length in page may
221 * not long enough for current user's sge. So the data should be
222 * splited into several parts, one in the first page, and the others in
223 * the subsequent pages.
224 */
225 while (1) {
226 if (len <= left_len_in_pg) {
227 memcpy(dseg, addr, len);
228
229 idx += len / HNS_ROCE_SGE_SIZE;
230
231 i++;
232 if (i >= wr->num_sge)
233 break;
234
235 left_len_in_pg -= len;
236 len = wr->sg_list[i].length;
237 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
238 dseg += len;
239 } else {
240 memcpy(dseg, addr, left_len_in_pg);
241
242 len -= left_len_in_pg;
243 addr += left_len_in_pg;
244 idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
245 dseg = hns_roce_get_extend_sge(hr_qp: qp,
246 n: idx & (qp->sge.sge_cnt - 1));
247 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
248 }
249 }
250
251 *sge_idx = idx;
252
253 return 0;
254}
255
256static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
257 unsigned int *sge_ind, unsigned int cnt)
258{
259 struct hns_roce_v2_wqe_data_seg *dseg;
260 unsigned int idx = *sge_ind;
261
262 while (cnt > 0) {
263 dseg = hns_roce_get_extend_sge(hr_qp: qp, n: idx & (qp->sge.sge_cnt - 1));
264 if (likely(sge->length)) {
265 set_data_seg_v2(dseg, sg: sge);
266 idx++;
267 cnt--;
268 }
269 sge++;
270 }
271
272 *sge_ind = idx;
273}
274
275static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
276{
277 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: qp->ibqp.device);
278 int mtu = ib_mtu_enum_to_int(mtu: qp->path_mtu);
279
280 if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
281 ibdev_err(ibdev: &hr_dev->ib_dev,
282 format: "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
283 len, qp->max_inline_data, mtu);
284 return false;
285 }
286
287 return true;
288}
289
290static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
291 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
292 unsigned int *sge_idx)
293{
294 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: qp->ibqp.device);
295 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
296 struct ib_device *ibdev = &hr_dev->ib_dev;
297 unsigned int curr_idx = *sge_idx;
298 void *dseg = rc_sq_wqe;
299 unsigned int i;
300 int ret;
301
302 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
303 ibdev_err(ibdev, format: "invalid inline parameters!\n");
304 return -EINVAL;
305 }
306
307 if (!check_inl_data_len(qp, len: msg_len))
308 return -EINVAL;
309
310 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
311
312 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
313 hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
314
315 for (i = 0; i < wr->num_sge; i++) {
316 memcpy(dseg, ((void *)wr->sg_list[i].addr),
317 wr->sg_list[i].length);
318 dseg += wr->sg_list[i].length;
319 }
320 } else {
321 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
322
323 ret = fill_ext_sge_inl_data(qp, wr, sge_idx: &curr_idx, msg_len);
324 if (ret)
325 return ret;
326
327 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
328 }
329
330 *sge_idx = curr_idx;
331
332 return 0;
333}
334
335static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
336 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
337 unsigned int *sge_ind,
338 unsigned int valid_num_sge)
339{
340 struct hns_roce_v2_wqe_data_seg *dseg =
341 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
342 struct hns_roce_qp *qp = to_hr_qp(ibqp);
343 int j = 0;
344 int i;
345
346 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
347 !!(wr->send_flags & IB_SEND_INLINE));
348 if (wr->send_flags & IB_SEND_INLINE)
349 return set_rc_inl(qp, wr, rc_sq_wqe, sge_idx: sge_ind);
350
351 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
352 for (i = 0; i < wr->num_sge; i++) {
353 if (likely(wr->sg_list[i].length)) {
354 set_data_seg_v2(dseg, sg: wr->sg_list + i);
355 dseg++;
356 }
357 }
358 } else {
359 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
360 if (likely(wr->sg_list[i].length)) {
361 set_data_seg_v2(dseg, sg: wr->sg_list + i);
362 dseg++;
363 j++;
364 }
365 }
366
367 set_extend_sge(qp, sge: wr->sg_list + i, sge_ind,
368 cnt: valid_num_sge - HNS_ROCE_SGE_IN_WQE);
369 }
370
371 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
372
373 return 0;
374}
375
376static int check_send_valid(struct hns_roce_dev *hr_dev,
377 struct hns_roce_qp *hr_qp)
378{
379 if (unlikely(hr_qp->state == IB_QPS_RESET ||
380 hr_qp->state == IB_QPS_INIT ||
381 hr_qp->state == IB_QPS_RTR))
382 return -EINVAL;
383 else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
384 return -EIO;
385
386 return 0;
387}
388
389static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
390 unsigned int *sge_len)
391{
392 unsigned int valid_num = 0;
393 unsigned int len = 0;
394 int i;
395
396 for (i = 0; i < wr->num_sge; i++) {
397 if (likely(wr->sg_list[i].length)) {
398 len += wr->sg_list[i].length;
399 valid_num++;
400 }
401 }
402
403 *sge_len = len;
404 return valid_num;
405}
406
407static __le32 get_immtdata(const struct ib_send_wr *wr)
408{
409 switch (wr->opcode) {
410 case IB_WR_SEND_WITH_IMM:
411 case IB_WR_RDMA_WRITE_WITH_IMM:
412 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
413 default:
414 return 0;
415 }
416}
417
418static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
419 const struct ib_send_wr *wr)
420{
421 u32 ib_op = wr->opcode;
422
423 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
424 return -EINVAL;
425
426 ud_sq_wqe->immtdata = get_immtdata(wr);
427
428 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
429
430 return 0;
431}
432
433static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
434 struct hns_roce_ah *ah)
435{
436 struct ib_device *ib_dev = ah->ibah.device;
437 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
438
439 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
440 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
441 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
442 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
443 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
444
445 ud_sq_wqe->sgid_index = ah->av.gid_index;
446
447 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
448 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
449
450 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
451 return 0;
452
453 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
454 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
455
456 return 0;
457}
458
459static inline int set_ud_wqe(struct hns_roce_qp *qp,
460 const struct ib_send_wr *wr,
461 void *wqe, unsigned int *sge_idx,
462 unsigned int owner_bit)
463{
464 struct hns_roce_ah *ah = to_hr_ah(ibah: ud_wr(wr)->ah);
465 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
466 unsigned int curr_idx = *sge_idx;
467 unsigned int valid_num_sge;
468 u32 msg_len = 0;
469 int ret;
470
471 valid_num_sge = calc_wr_sge_num(wr, sge_len: &msg_len);
472
473 ret = set_ud_opcode(ud_sq_wqe, wr);
474 if (WARN_ON_ONCE(ret))
475 return ret;
476
477 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
478
479 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
480 !!(wr->send_flags & IB_SEND_SIGNALED));
481 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
482 !!(wr->send_flags & IB_SEND_SOLICITED));
483
484 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
485 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
486 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
487 curr_idx & (qp->sge.sge_cnt - 1));
488
489 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
490 qp->qkey : ud_wr(wr)->remote_qkey);
491 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
492
493 ret = fill_ud_av(ud_sq_wqe, ah);
494 if (ret)
495 return ret;
496
497 qp->sl = to_hr_ah(ibah: ud_wr(wr)->ah)->av.sl;
498
499 set_extend_sge(qp, sge: wr->sg_list, sge_ind: &curr_idx, cnt: valid_num_sge);
500
501 /*
502 * The pipeline can sequentially post all valid WQEs into WQ buffer,
503 * including new WQEs waiting for the doorbell to update the PI again.
504 * Therefore, the owner bit of WQE MUST be updated after all fields
505 * and extSGEs have been written into DDR instead of cache.
506 */
507 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
508 dma_wmb();
509
510 *sge_idx = curr_idx;
511 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
512
513 return 0;
514}
515
516static int set_rc_opcode(struct hns_roce_dev *hr_dev,
517 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
518 const struct ib_send_wr *wr)
519{
520 u32 ib_op = wr->opcode;
521 int ret = 0;
522
523 rc_sq_wqe->immtdata = get_immtdata(wr);
524
525 switch (ib_op) {
526 case IB_WR_RDMA_READ:
527 case IB_WR_RDMA_WRITE:
528 case IB_WR_RDMA_WRITE_WITH_IMM:
529 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
530 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
531 break;
532 case IB_WR_SEND:
533 case IB_WR_SEND_WITH_IMM:
534 break;
535 case IB_WR_ATOMIC_CMP_AND_SWP:
536 case IB_WR_ATOMIC_FETCH_AND_ADD:
537 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
538 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
539 break;
540 case IB_WR_REG_MR:
541 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
542 set_frmr_seg(rc_sq_wqe, wr: reg_wr(wr));
543 else
544 ret = -EOPNOTSUPP;
545 break;
546 case IB_WR_SEND_WITH_INV:
547 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
548 break;
549 default:
550 ret = -EINVAL;
551 }
552
553 if (unlikely(ret))
554 return ret;
555
556 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
557
558 return ret;
559}
560
561static inline int set_rc_wqe(struct hns_roce_qp *qp,
562 const struct ib_send_wr *wr,
563 void *wqe, unsigned int *sge_idx,
564 unsigned int owner_bit)
565{
566 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: qp->ibqp.device);
567 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
568 unsigned int curr_idx = *sge_idx;
569 unsigned int valid_num_sge;
570 u32 msg_len = 0;
571 int ret;
572
573 valid_num_sge = calc_wr_sge_num(wr, sge_len: &msg_len);
574
575 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
576
577 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
578 if (WARN_ON_ONCE(ret))
579 return ret;
580
581 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
582 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
583
584 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
585 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
586
587 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
588 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
589
590 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
591 curr_idx & (qp->sge.sge_cnt - 1));
592
593 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
594 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
595 if (msg_len != ATOMIC_WR_LEN)
596 return -EINVAL;
597 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
598 } else if (wr->opcode != IB_WR_REG_MR) {
599 ret = set_rwqe_data_seg(ibqp: &qp->ibqp, wr, rc_sq_wqe,
600 sge_ind: &curr_idx, valid_num_sge);
601 if (ret)
602 return ret;
603 }
604
605 /*
606 * The pipeline can sequentially post all valid WQEs into WQ buffer,
607 * including new WQEs waiting for the doorbell to update the PI again.
608 * Therefore, the owner bit of WQE MUST be updated after all fields
609 * and extSGEs have been written into DDR instead of cache.
610 */
611 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
612 dma_wmb();
613
614 *sge_idx = curr_idx;
615 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
616
617 return ret;
618}
619
620static inline void update_sq_db(struct hns_roce_dev *hr_dev,
621 struct hns_roce_qp *qp)
622{
623 if (unlikely(qp->state == IB_QPS_ERR)) {
624 flush_cqe(dev: hr_dev, qp);
625 } else {
626 struct hns_roce_v2_db sq_db = {};
627
628 hr_reg_write(&sq_db, DB_TAG, qp->qpn);
629 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
630 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
631 hr_reg_write(&sq_db, DB_SL, qp->sl);
632
633 hns_roce_write64(hr_dev, val: (__le32 *)&sq_db, dest: qp->sq.db_reg);
634 }
635}
636
637static inline void update_rq_db(struct hns_roce_dev *hr_dev,
638 struct hns_roce_qp *qp)
639{
640 if (unlikely(qp->state == IB_QPS_ERR)) {
641 flush_cqe(dev: hr_dev, qp);
642 } else {
643 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
644 *qp->rdb.db_record =
645 qp->rq.head & V2_DB_PRODUCER_IDX_M;
646 } else {
647 struct hns_roce_v2_db rq_db = {};
648
649 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
650 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
651 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
652
653 hns_roce_write64(hr_dev, val: (__le32 *)&rq_db,
654 dest: qp->rq.db_reg);
655 }
656 }
657}
658
659static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
660 u64 __iomem *dest)
661{
662#define HNS_ROCE_WRITE_TIMES 8
663 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
664 struct hnae3_handle *handle = priv->handle;
665 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
666 int i;
667
668 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
669 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
670 writeq_relaxed(*(val + i), dest + i);
671}
672
673static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
674 void *wqe)
675{
676#define HNS_ROCE_SL_SHIFT 2
677 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
678
679 if (unlikely(qp->state == IB_QPS_ERR)) {
680 flush_cqe(dev: hr_dev, qp);
681 return;
682 }
683 /* All kinds of DirectWQE have the same header field layout */
684 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
685 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
686 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
687 qp->sl >> HNS_ROCE_SL_SHIFT);
688 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
689
690 hns_roce_write512(hr_dev, val: wqe, dest: qp->sq.db_reg);
691}
692
693static int hns_roce_v2_post_send(struct ib_qp *ibqp,
694 const struct ib_send_wr *wr,
695 const struct ib_send_wr **bad_wr)
696{
697 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: ibqp->device);
698 struct ib_device *ibdev = &hr_dev->ib_dev;
699 struct hns_roce_qp *qp = to_hr_qp(ibqp);
700 unsigned long flags = 0;
701 unsigned int owner_bit;
702 unsigned int sge_idx;
703 unsigned int wqe_idx;
704 void *wqe = NULL;
705 u32 nreq;
706 int ret;
707
708 spin_lock_irqsave(&qp->sq.lock, flags);
709
710 ret = check_send_valid(hr_dev, hr_qp: qp);
711 if (unlikely(ret)) {
712 *bad_wr = wr;
713 nreq = 0;
714 goto out;
715 }
716
717 sge_idx = qp->next_sge;
718
719 for (nreq = 0; wr; ++nreq, wr = wr->next) {
720 if (hns_roce_wq_overflow(hr_wq: &qp->sq, nreq, ib_cq: qp->ibqp.send_cq)) {
721 ret = -ENOMEM;
722 *bad_wr = wr;
723 goto out;
724 }
725
726 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
727
728 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
729 ibdev_err(ibdev, format: "num_sge = %d > qp->sq.max_gs = %u.\n",
730 wr->num_sge, qp->sq.max_gs);
731 ret = -EINVAL;
732 *bad_wr = wr;
733 goto out;
734 }
735
736 wqe = hns_roce_get_send_wqe(hr_qp: qp, n: wqe_idx);
737 qp->sq.wrid[wqe_idx] = wr->wr_id;
738 owner_bit =
739 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
740
741 /* RC and UD share the same DirectWQE field layout */
742 ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
743
744 /* Corresponding to the QP type, wqe process separately */
745 if (ibqp->qp_type == IB_QPT_RC)
746 ret = set_rc_wqe(qp, wr, wqe, sge_idx: &sge_idx, owner_bit);
747 else
748 ret = set_ud_wqe(qp, wr, wqe, sge_idx: &sge_idx, owner_bit);
749
750 trace_hns_sq_wqe(qpn: qp->qpn, idx: wqe_idx, wqe, len: 1 << qp->sq.wqe_shift,
751 id: wr->wr_id, type: TRACE_SQ);
752 if (unlikely(ret)) {
753 *bad_wr = wr;
754 goto out;
755 }
756 }
757
758out:
759 if (likely(nreq)) {
760 qp->sq.head += nreq;
761 qp->next_sge = sge_idx;
762
763 if (nreq == 1 && !ret &&
764 (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
765 write_dwqe(hr_dev, qp, wqe);
766 else
767 update_sq_db(hr_dev, qp);
768 }
769
770 spin_unlock_irqrestore(lock: &qp->sq.lock, flags);
771
772 return ret;
773}
774
775static int check_recv_valid(struct hns_roce_dev *hr_dev,
776 struct hns_roce_qp *hr_qp)
777{
778 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
779 return -EIO;
780
781 if (hr_qp->state == IB_QPS_RESET)
782 return -EINVAL;
783
784 return 0;
785}
786
787static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
788 u32 max_sge, bool rsv)
789{
790 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
791 u32 i, cnt;
792
793 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
794 /* Skip zero-length sge */
795 if (!wr->sg_list[i].length)
796 continue;
797 set_data_seg_v2(dseg: dseg + cnt, sg: wr->sg_list + i);
798 cnt++;
799 }
800
801 /* Fill a reserved sge to make hw stop reading remaining segments */
802 if (rsv) {
803 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
804 dseg[cnt].addr = 0;
805 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
806 } else {
807 /* Clear remaining segments to make ROCEE ignore sges */
808 if (cnt < max_sge)
809 memset(dseg + cnt, 0,
810 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
811 }
812}
813
814static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
815 u32 wqe_idx, u32 max_sge)
816{
817 void *wqe = NULL;
818
819 wqe = hns_roce_get_recv_wqe(hr_qp, n: wqe_idx);
820 fill_recv_sge_to_wqe(wr, wqe, max_sge, rsv: hr_qp->rq.rsv_sge);
821
822 trace_hns_rq_wqe(qpn: hr_qp->qpn, idx: wqe_idx, wqe, len: 1 << hr_qp->rq.wqe_shift,
823 id: wr->wr_id, type: TRACE_RQ);
824}
825
826static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
827 const struct ib_recv_wr *wr,
828 const struct ib_recv_wr **bad_wr)
829{
830 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: ibqp->device);
831 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
832 struct ib_device *ibdev = &hr_dev->ib_dev;
833 u32 wqe_idx, nreq, max_sge;
834 unsigned long flags;
835 int ret;
836
837 spin_lock_irqsave(&hr_qp->rq.lock, flags);
838
839 ret = check_recv_valid(hr_dev, hr_qp);
840 if (unlikely(ret)) {
841 *bad_wr = wr;
842 nreq = 0;
843 goto out;
844 }
845
846 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
847 for (nreq = 0; wr; ++nreq, wr = wr->next) {
848 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
849 hr_qp->ibqp.recv_cq))) {
850 ret = -ENOMEM;
851 *bad_wr = wr;
852 goto out;
853 }
854
855 if (unlikely(wr->num_sge > max_sge)) {
856 ibdev_err(ibdev, format: "num_sge = %d >= max_sge = %u.\n",
857 wr->num_sge, max_sge);
858 ret = -EINVAL;
859 *bad_wr = wr;
860 goto out;
861 }
862
863 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
864 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
865 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
866 }
867
868out:
869 if (likely(nreq)) {
870 hr_qp->rq.head += nreq;
871
872 update_rq_db(hr_dev, qp: hr_qp);
873 }
874 spin_unlock_irqrestore(lock: &hr_qp->rq.lock, flags);
875
876 return ret;
877}
878
879static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
880{
881 return hns_roce_buf_offset(buf: srq->buf_mtr.kmem, offset: n << srq->wqe_shift);
882}
883
884static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
885{
886 return hns_roce_buf_offset(buf: idx_que->mtr.kmem,
887 offset: n << idx_que->entry_shift);
888}
889
890static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
891{
892 /* always called with interrupts disabled. */
893 spin_lock(lock: &srq->lock);
894
895 bitmap_clear(map: srq->idx_que.bitmap, start: wqe_index, nbits: 1);
896 srq->idx_que.tail++;
897
898 spin_unlock(lock: &srq->lock);
899}
900
901static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
902{
903 struct hns_roce_idx_que *idx_que = &srq->idx_que;
904
905 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
906}
907
908static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
909 const struct ib_recv_wr *wr)
910{
911 struct ib_device *ib_dev = srq->ibsrq.device;
912
913 if (unlikely(wr->num_sge > max_sge)) {
914 ibdev_err(ibdev: ib_dev,
915 format: "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
916 wr->num_sge, max_sge);
917 return -EINVAL;
918 }
919
920 if (unlikely(hns_roce_srqwq_overflow(srq))) {
921 ibdev_err(ibdev: ib_dev,
922 format: "failed to check srqwq status, srqwq is full.\n");
923 return -ENOMEM;
924 }
925
926 return 0;
927}
928
929static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
930{
931 struct hns_roce_idx_que *idx_que = &srq->idx_que;
932 u32 pos;
933
934 pos = find_first_zero_bit(addr: idx_que->bitmap, size: srq->wqe_cnt);
935 if (unlikely(pos == srq->wqe_cnt))
936 return -ENOSPC;
937
938 bitmap_set(map: idx_que->bitmap, start: pos, nbits: 1);
939 *wqe_idx = pos;
940 return 0;
941}
942
943static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
944{
945 struct hns_roce_idx_que *idx_que = &srq->idx_que;
946 unsigned int head;
947 __le32 *buf;
948
949 head = idx_que->head & (srq->wqe_cnt - 1);
950
951 buf = get_idx_buf(idx_que, n: head);
952 *buf = cpu_to_le32(wqe_idx);
953
954 idx_que->head++;
955}
956
957static void update_srq_db(struct hns_roce_srq *srq)
958{
959 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: srq->ibsrq.device);
960 struct hns_roce_v2_db db = {};
961
962 hr_reg_write(&db, DB_TAG, srq->srqn);
963 hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
964 hr_reg_write(&db, DB_PI, srq->idx_que.head);
965
966 hns_roce_write64(hr_dev, val: (__le32 *)&db, dest: srq->db_reg);
967}
968
969static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
970 const struct ib_recv_wr *wr,
971 const struct ib_recv_wr **bad_wr)
972{
973 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
974 unsigned long flags;
975 int ret = 0;
976 u32 max_sge;
977 u32 wqe_idx;
978 void *wqe;
979 u32 nreq;
980
981 spin_lock_irqsave(&srq->lock, flags);
982
983 max_sge = srq->max_gs - srq->rsv_sge;
984 for (nreq = 0; wr; ++nreq, wr = wr->next) {
985 ret = check_post_srq_valid(srq, max_sge, wr);
986 if (ret) {
987 *bad_wr = wr;
988 break;
989 }
990
991 ret = get_srq_wqe_idx(srq, wqe_idx: &wqe_idx);
992 if (unlikely(ret)) {
993 *bad_wr = wr;
994 break;
995 }
996
997 wqe = get_srq_wqe_buf(srq, n: wqe_idx);
998 fill_recv_sge_to_wqe(wr, wqe, max_sge, rsv: srq->rsv_sge);
999 fill_wqe_idx(srq, wqe_idx);
1000 srq->wrid[wqe_idx] = wr->wr_id;
1001
1002 trace_hns_srq_wqe(qpn: srq->srqn, idx: wqe_idx, wqe, len: 1 << srq->wqe_shift,
1003 id: wr->wr_id, type: TRACE_SRQ);
1004 }
1005
1006 if (likely(nreq)) {
1007 if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)
1008 *srq->rdb.db_record = srq->idx_que.head &
1009 V2_DB_PRODUCER_IDX_M;
1010 else
1011 update_srq_db(srq);
1012 }
1013
1014 spin_unlock_irqrestore(lock: &srq->lock, flags);
1015
1016 return ret;
1017}
1018
1019static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1020 unsigned long instance_stage,
1021 unsigned long reset_stage)
1022{
1023 /* When hardware reset has been completed once or more, we should stop
1024 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
1025 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
1026 * stage of soft reset process, we should exit with error, and then
1027 * HNAE3_INIT_CLIENT related process can rollback the operation like
1028 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
1029 * process will exit with error to notify NIC driver to reschedule soft
1030 * reset process once again.
1031 */
1032 hr_dev->is_reset = true;
1033 hr_dev->dis_db = true;
1034
1035 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1036 instance_stage == HNS_ROCE_STATE_INIT)
1037 return CMD_RST_PRC_EBUSY;
1038
1039 return CMD_RST_PRC_SUCCESS;
1040}
1041
1042static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1043 unsigned long instance_stage,
1044 unsigned long reset_stage)
1045{
1046#define HW_RESET_TIMEOUT_US 1000000
1047#define HW_RESET_SLEEP_US 1000
1048
1049 struct hns_roce_v2_priv *priv = hr_dev->priv;
1050 struct hnae3_handle *handle = priv->handle;
1051 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1052 unsigned long val;
1053 int ret;
1054
1055 /* When hardware reset is detected, we should stop sending mailbox&cmq&
1056 * doorbell to hardware. If now in .init_instance() function, we should
1057 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
1058 * process, we should exit with error, and then HNAE3_INIT_CLIENT
1059 * related process can rollback the operation like notifing hardware to
1060 * free resources, HNAE3_INIT_CLIENT related process will exit with
1061 * error to notify NIC driver to reschedule soft reset process once
1062 * again.
1063 */
1064 hr_dev->dis_db = true;
1065
1066 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
1067 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
1068 HW_RESET_TIMEOUT_US, false, handle);
1069 if (!ret)
1070 hr_dev->is_reset = true;
1071
1072 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1073 instance_stage == HNS_ROCE_STATE_INIT)
1074 return CMD_RST_PRC_EBUSY;
1075
1076 return CMD_RST_PRC_SUCCESS;
1077}
1078
1079static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1080{
1081 struct hns_roce_v2_priv *priv = hr_dev->priv;
1082 struct hnae3_handle *handle = priv->handle;
1083 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1084
1085 /* When software reset is detected at .init_instance() function, we
1086 * should stop sending mailbox&cmq&doorbell to hardware, and exit
1087 * with error.
1088 */
1089 hr_dev->dis_db = true;
1090 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1091 hr_dev->is_reset = true;
1092
1093 return CMD_RST_PRC_EBUSY;
1094}
1095
1096static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1097 struct hnae3_handle *handle)
1098{
1099 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1100 unsigned long instance_stage; /* the current instance stage */
1101 unsigned long reset_stage; /* the current reset stage */
1102 unsigned long reset_cnt;
1103 bool sw_resetting;
1104 bool hw_resetting;
1105
1106 /* Get information about reset from NIC driver or RoCE driver itself,
1107 * the meaning of the following variables from NIC driver are described
1108 * as below:
1109 * reset_cnt -- The count value of completed hardware reset.
1110 * hw_resetting -- Whether hardware device is resetting now.
1111 * sw_resetting -- Whether NIC's software reset process is running now.
1112 */
1113 instance_stage = handle->rinfo.instance_state;
1114 reset_stage = handle->rinfo.reset_state;
1115 reset_cnt = ops->ae_dev_reset_cnt(handle);
1116 if (reset_cnt != hr_dev->reset_cnt)
1117 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1118 reset_stage);
1119
1120 hw_resetting = ops->get_cmdq_stat(handle);
1121 if (hw_resetting)
1122 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1123 reset_stage);
1124
1125 sw_resetting = ops->ae_dev_resetting(handle);
1126 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1127 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1128
1129 return CMD_RST_PRC_OTHERS;
1130}
1131
1132static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1133{
1134 struct hns_roce_v2_priv *priv = hr_dev->priv;
1135 struct hnae3_handle *handle = priv->handle;
1136 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1137
1138 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1139 return true;
1140
1141 if (ops->get_hw_reset_stat(handle))
1142 return true;
1143
1144 if (ops->ae_dev_resetting(handle))
1145 return true;
1146
1147 return false;
1148}
1149
1150static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1151{
1152 struct hns_roce_v2_priv *priv = hr_dev->priv;
1153 u32 status;
1154
1155 if (hr_dev->is_reset)
1156 status = CMD_RST_PRC_SUCCESS;
1157 else
1158 status = check_aedev_reset_status(hr_dev, handle: priv->handle);
1159
1160 *busy = (status == CMD_RST_PRC_EBUSY);
1161
1162 return status == CMD_RST_PRC_OTHERS;
1163}
1164
1165static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1166 struct hns_roce_v2_cmq_ring *ring)
1167{
1168 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1169
1170 ring->desc = dma_alloc_coherent(dev: hr_dev->dev, size,
1171 dma_handle: &ring->desc_dma_addr, GFP_KERNEL);
1172 if (!ring->desc)
1173 return -ENOMEM;
1174
1175 return 0;
1176}
1177
1178static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1179 struct hns_roce_v2_cmq_ring *ring)
1180{
1181 dma_free_coherent(dev: hr_dev->dev,
1182 size: ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1183 cpu_addr: ring->desc, dma_handle: ring->desc_dma_addr);
1184
1185 ring->desc_dma_addr = 0;
1186}
1187
1188static int init_csq(struct hns_roce_dev *hr_dev,
1189 struct hns_roce_v2_cmq_ring *csq)
1190{
1191 dma_addr_t dma;
1192 int ret;
1193
1194 csq->desc_num = CMD_CSQ_DESC_NUM;
1195 spin_lock_init(&csq->lock);
1196 csq->flag = TYPE_CSQ;
1197 csq->head = 0;
1198
1199 ret = hns_roce_alloc_cmq_desc(hr_dev, ring: csq);
1200 if (ret)
1201 return ret;
1202
1203 dma = csq->desc_dma_addr;
1204 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1205 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1206 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1207 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1208
1209 /* Make sure to write CI first and then PI */
1210 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1211 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1212
1213 return 0;
1214}
1215
1216static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1217{
1218 struct hns_roce_v2_priv *priv = hr_dev->priv;
1219 int ret;
1220
1221 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1222
1223 ret = init_csq(hr_dev, csq: &priv->cmq.csq);
1224 if (ret)
1225 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1226
1227 return ret;
1228}
1229
1230static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1231{
1232 struct hns_roce_v2_priv *priv = hr_dev->priv;
1233
1234 hns_roce_free_cmq_desc(hr_dev, ring: &priv->cmq.csq);
1235}
1236
1237static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1238 enum hns_roce_opcode_type opcode,
1239 bool is_read)
1240{
1241 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1242 desc->opcode = cpu_to_le16(opcode);
1243 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1244 if (is_read)
1245 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1246 else
1247 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1248}
1249
1250static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1251{
1252 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1253 struct hns_roce_v2_priv *priv = hr_dev->priv;
1254
1255 return tail == priv->cmq.csq.head;
1256}
1257
1258static void update_cmdq_status(struct hns_roce_dev *hr_dev)
1259{
1260 struct hns_roce_v2_priv *priv = hr_dev->priv;
1261 struct hnae3_handle *handle = priv->handle;
1262
1263 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
1264 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
1265 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
1266}
1267
1268static int hns_roce_cmd_err_convert_errno(u16 desc_ret)
1269{
1270 struct hns_roce_cmd_errcode errcode_table[] = {
1271 {.return_status: CMD_EXEC_SUCCESS, .errno: 0},
1272 {CMD_NO_AUTH, -EPERM},
1273 {CMD_NOT_EXIST, -EOPNOTSUPP},
1274 {CMD_CRQ_FULL, -EXFULL},
1275 {CMD_NEXT_ERR, -ENOSR},
1276 {CMD_NOT_EXEC, -ENOTBLK},
1277 {CMD_PARA_ERR, -EINVAL},
1278 {CMD_RESULT_ERR, -ERANGE},
1279 {CMD_TIMEOUT, -ETIME},
1280 {CMD_HILINK_ERR, -ENOLINK},
1281 {CMD_INFO_ILLEGAL, -ENXIO},
1282 {CMD_INVALID, -EBADR},
1283 };
1284 u16 i;
1285
1286 for (i = 0; i < ARRAY_SIZE(errcode_table); i++)
1287 if (desc_ret == errcode_table[i].return_status)
1288 return errcode_table[i].errno;
1289 return -EIO;
1290}
1291
1292static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
1293{
1294 static const struct hns_roce_cmdq_tx_timeout_map cmdq_tx_timeout[] = {
1295 {HNS_ROCE_OPC_POST_MB, HNS_ROCE_OPC_POST_MB_TIMEOUT},
1296 };
1297 int i;
1298
1299 for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout); i++)
1300 if (cmdq_tx_timeout[i].opcode == opcode)
1301 return cmdq_tx_timeout[i].tx_timeout;
1302
1303 return tx_timeout;
1304}
1305
1306static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
1307{
1308 u32 timeout = 0;
1309
1310 do {
1311 if (hns_roce_cmq_csq_done(hr_dev))
1312 break;
1313 udelay(usec: 1);
1314 } while (++timeout < tx_timeout);
1315}
1316
1317static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
1318 struct hns_roce_cmq_desc *desc,
1319 int num, u32 tx_timeout)
1320{
1321 struct hns_roce_v2_priv *priv = hr_dev->priv;
1322 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1323 u16 desc_ret;
1324 u32 tail;
1325 int ret;
1326 int i;
1327
1328 tail = csq->head;
1329
1330 for (i = 0; i < num; i++) {
1331 trace_hns_cmdq_req(hr_dev, desc: &desc[i]);
1332
1333 csq->desc[csq->head++] = desc[i];
1334 if (csq->head == csq->desc_num)
1335 csq->head = 0;
1336 }
1337
1338 /* Write to hardware */
1339 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1340
1341 atomic64_inc(v: &hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
1342
1343 hns_roce_wait_csq_done(hr_dev, tx_timeout);
1344 if (hns_roce_cmq_csq_done(hr_dev)) {
1345 ret = 0;
1346 for (i = 0; i < num; i++) {
1347 trace_hns_cmdq_resp(hr_dev, desc: &csq->desc[tail]);
1348
1349 /* check the result of hardware write back */
1350 desc_ret = le16_to_cpu(csq->desc[tail++].retval);
1351 if (tail == csq->desc_num)
1352 tail = 0;
1353 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1354 continue;
1355
1356 ret = hns_roce_cmd_err_convert_errno(desc_ret);
1357 }
1358 } else {
1359 /* FW/HW reset or incorrect number of desc */
1360 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1361 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
1362 csq->head, tail);
1363 csq->head = tail;
1364
1365 update_cmdq_status(hr_dev);
1366
1367 ret = -EAGAIN;
1368 }
1369
1370 if (ret)
1371 atomic64_inc(v: &hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
1372
1373 return ret;
1374}
1375
1376static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1377 struct hns_roce_cmq_desc *desc, int num)
1378{
1379 struct hns_roce_v2_priv *priv = hr_dev->priv;
1380 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1381 u16 opcode = le16_to_cpu(desc->opcode);
1382 u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, tx_timeout: priv->cmq.tx_timeout);
1383 u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
1384 u32 rsv_tail;
1385 int ret;
1386 int i;
1387
1388 while (try_cnt) {
1389 try_cnt--;
1390
1391 spin_lock_bh(lock: &csq->lock);
1392 rsv_tail = csq->head;
1393 ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
1394 if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
1395 try_cnt) {
1396 spin_unlock_bh(lock: &csq->lock);
1397 mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
1398 continue;
1399 }
1400
1401 for (i = 0; i < num; i++) {
1402 desc[i] = csq->desc[rsv_tail++];
1403 if (rsv_tail == csq->desc_num)
1404 rsv_tail = 0;
1405 }
1406 spin_unlock_bh(lock: &csq->lock);
1407 break;
1408 }
1409
1410 if (ret)
1411 dev_err_ratelimited(hr_dev->dev,
1412 "Cmdq IO error, opcode = 0x%x, return = %d.\n",
1413 opcode, ret);
1414
1415 return ret;
1416}
1417
1418static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1419 struct hns_roce_cmq_desc *desc, int num)
1420{
1421 bool busy;
1422 int ret;
1423
1424 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1425 return -EIO;
1426
1427 if (!v2_chk_mbox_is_avail(hr_dev, busy: &busy))
1428 return busy ? -EBUSY : 0;
1429
1430 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1431 if (ret) {
1432 if (!v2_chk_mbox_is_avail(hr_dev, busy: &busy))
1433 return busy ? -EBUSY : 0;
1434 }
1435
1436 return ret;
1437}
1438
1439static enum hns_roce_opcode_type
1440 get_bond_opcode(enum hns_roce_bond_cmd_type bond_type)
1441{
1442 switch (bond_type) {
1443 case HNS_ROCE_SET_BOND:
1444 return HNS_ROCE_OPC_SET_BOND_INFO;
1445 case HNS_ROCE_CHANGE_BOND:
1446 return HNS_ROCE_OPC_CHANGE_ACTIVE_PORT;
1447 case HNS_ROCE_CLEAR_BOND:
1448 return HNS_ROCE_OPC_CLEAR_BOND_INFO;
1449 default:
1450 WARN(true, "Invalid bond type %d!\n", bond_type);
1451 return HNS_ROCE_OPC_SET_BOND_INFO;
1452 }
1453}
1454
1455static enum hns_roce_bond_hashtype
1456 get_bond_hashtype(enum netdev_lag_hash netdev_hashtype)
1457{
1458 switch (netdev_hashtype) {
1459 case NETDEV_LAG_HASH_L2:
1460 return BOND_HASH_L2;
1461 case NETDEV_LAG_HASH_L34:
1462 return BOND_HASH_L34;
1463 case NETDEV_LAG_HASH_L23:
1464 return BOND_HASH_L23;
1465 default:
1466 WARN(true, "Invalid hash type %d!\n", netdev_hashtype);
1467 return BOND_HASH_L2;
1468 }
1469}
1470
1471int hns_roce_cmd_bond(struct hns_roce_bond_group *bond_grp,
1472 enum hns_roce_bond_cmd_type bond_type)
1473{
1474 enum hns_roce_opcode_type opcode = get_bond_opcode(bond_type);
1475 struct hns_roce_bond_info *slave_info;
1476 struct hns_roce_cmq_desc desc = {};
1477 int ret;
1478
1479 slave_info = (struct hns_roce_bond_info *)desc.data;
1480 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode, is_read: false);
1481
1482 slave_info->bond_id = cpu_to_le32(bond_grp->bond_id);
1483 if (bond_type == HNS_ROCE_CLEAR_BOND)
1484 goto out;
1485
1486 if (bond_grp->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1487 slave_info->bond_mode = cpu_to_le32(BOND_MODE_1);
1488 if (bond_grp->active_slave_num != 1)
1489 ibdev_warn(ibdev: &bond_grp->main_hr_dev->ib_dev,
1490 format: "active slave cnt(%u) in Mode 1 is invalid.\n",
1491 bond_grp->active_slave_num);
1492 } else {
1493 slave_info->bond_mode = cpu_to_le32(BOND_MODE_2_4);
1494 slave_info->hash_policy =
1495 cpu_to_le32(get_bond_hashtype(bond_grp->hash_type));
1496 }
1497
1498 slave_info->active_slave_cnt = cpu_to_le32(bond_grp->active_slave_num);
1499 slave_info->active_slave_mask = cpu_to_le32(bond_grp->active_slave_map);
1500 slave_info->slave_mask = cpu_to_le32(bond_grp->slave_map);
1501
1502out:
1503 ret = hns_roce_cmq_send(hr_dev: bond_grp->main_hr_dev, desc: &desc, num: 1);
1504 if (ret)
1505 ibdev_err(ibdev: &bond_grp->main_hr_dev->ib_dev,
1506 format: "cmq bond type(%d) failed, ret = %d.\n",
1507 bond_type, ret);
1508
1509 return ret;
1510}
1511
1512static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
1513 dma_addr_t base_addr, u8 cmd, unsigned long tag)
1514{
1515 struct hns_roce_cmd_mailbox *mbox;
1516 int ret;
1517
1518 mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1519 if (IS_ERR(ptr: mbox))
1520 return PTR_ERR(ptr: mbox);
1521
1522 ret = hns_roce_cmd_mbox(hr_dev, in_param: base_addr, out_param: mbox->dma, cmd, tag);
1523 hns_roce_free_cmd_mailbox(hr_dev, mailbox: mbox);
1524 return ret;
1525}
1526
1527static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1528{
1529 struct hns_roce_query_version *resp;
1530 struct hns_roce_cmq_desc desc;
1531 int ret;
1532
1533 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_QUERY_HW_VER, is_read: true);
1534 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1535 if (ret)
1536 return ret;
1537
1538 resp = (struct hns_roce_query_version *)desc.data;
1539 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1540 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1541
1542 return 0;
1543}
1544
1545static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1546 struct hnae3_handle *handle)
1547{
1548 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1549 unsigned long end;
1550
1551 hr_dev->dis_db = true;
1552
1553 dev_warn(hr_dev->dev,
1554 "func clear is pending, device in resetting state.\n");
1555 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1556 while (end) {
1557 if (!ops->get_hw_reset_stat(handle)) {
1558 hr_dev->is_reset = true;
1559 dev_info(hr_dev->dev,
1560 "func clear success after reset.\n");
1561 return;
1562 }
1563 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1564 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1565 }
1566
1567 dev_warn(hr_dev->dev, "func clear failed.\n");
1568}
1569
1570static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1571 struct hnae3_handle *handle)
1572{
1573 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1574 unsigned long end;
1575
1576 hr_dev->dis_db = true;
1577
1578 dev_warn(hr_dev->dev,
1579 "func clear is pending, device in resetting state.\n");
1580 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1581 while (end) {
1582 if (ops->ae_dev_reset_cnt(handle) !=
1583 hr_dev->reset_cnt) {
1584 hr_dev->is_reset = true;
1585 dev_info(hr_dev->dev,
1586 "func clear success after sw reset\n");
1587 return;
1588 }
1589 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1590 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1591 }
1592
1593 dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
1594}
1595
1596static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1597 int flag)
1598{
1599 struct hns_roce_v2_priv *priv = hr_dev->priv;
1600 struct hnae3_handle *handle = priv->handle;
1601 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1602
1603 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1604 hr_dev->dis_db = true;
1605 hr_dev->is_reset = true;
1606 dev_info(hr_dev->dev, "func clear success after reset.\n");
1607 return;
1608 }
1609
1610 if (ops->get_hw_reset_stat(handle)) {
1611 func_clr_hw_resetting_state(hr_dev, handle);
1612 return;
1613 }
1614
1615 if (ops->ae_dev_resetting(handle) &&
1616 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1617 func_clr_sw_resetting_state(hr_dev, handle);
1618 return;
1619 }
1620
1621 if (retval && !flag)
1622 dev_warn(hr_dev->dev,
1623 "func clear read failed, ret = %d.\n", retval);
1624
1625 dev_warn(hr_dev->dev, "func clear failed.\n");
1626}
1627
1628static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1629{
1630 bool fclr_write_fail_flag = false;
1631 struct hns_roce_func_clear *resp;
1632 struct hns_roce_cmq_desc desc;
1633 unsigned long end;
1634 int ret = 0;
1635
1636 if (check_device_is_in_reset(hr_dev))
1637 goto out;
1638
1639 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_FUNC_CLEAR, is_read: false);
1640 resp = (struct hns_roce_func_clear *)desc.data;
1641 resp->rst_funcid_en = cpu_to_le32(vf_id);
1642
1643 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1644 if (ret) {
1645 fclr_write_fail_flag = true;
1646 dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
1647 ret);
1648 goto out;
1649 }
1650
1651 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1652 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1653 while (end) {
1654 if (check_device_is_in_reset(hr_dev))
1655 goto out;
1656 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1657 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1658
1659 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_FUNC_CLEAR,
1660 is_read: true);
1661
1662 resp->rst_funcid_en = cpu_to_le32(vf_id);
1663 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1664 if (ret)
1665 continue;
1666
1667 if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) {
1668 if (vf_id == 0)
1669 hr_dev->is_reset = true;
1670 return;
1671 }
1672 }
1673
1674out:
1675 hns_roce_func_clr_rst_proc(hr_dev, retval: ret, flag: fclr_write_fail_flag);
1676}
1677
1678static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1679{
1680 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1681 struct hns_roce_cmq_desc desc[2];
1682 struct hns_roce_cmq_req *req_a;
1683
1684 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1685 hns_roce_cmq_setup_basic_desc(desc: &desc[0], opcode, is_read: false);
1686 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1687 hns_roce_cmq_setup_basic_desc(desc: &desc[1], opcode, is_read: false);
1688 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1689
1690 return hns_roce_cmq_send(hr_dev, desc, num: 2);
1691}
1692
1693static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1694{
1695 int ret;
1696 int i;
1697
1698 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1699 return;
1700
1701 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1702 __hns_roce_function_clear(hr_dev, vf_id: i);
1703
1704 if (i == 0)
1705 continue;
1706
1707 ret = hns_roce_free_vf_resource(hr_dev, vf_id: i);
1708 if (ret)
1709 ibdev_err(ibdev: &hr_dev->ib_dev,
1710 format: "failed to free vf resource, vf_id = %d, ret = %d.\n",
1711 i, ret);
1712 }
1713}
1714
1715static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1716{
1717 struct hns_roce_cmq_desc desc;
1718 int ret;
1719
1720 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1721 is_read: false);
1722 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1723 if (ret)
1724 ibdev_err(ibdev: &hr_dev->ib_dev,
1725 format: "failed to clear extended doorbell info, ret = %d.\n",
1726 ret);
1727
1728 return ret;
1729}
1730
1731static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1732{
1733 struct hns_roce_query_fw_info *resp;
1734 struct hns_roce_cmq_desc desc;
1735 int ret;
1736
1737 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_QUERY_FW_VER, is_read: true);
1738 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1739 if (ret)
1740 return ret;
1741
1742 resp = (struct hns_roce_query_fw_info *)desc.data;
1743 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1744
1745 return 0;
1746}
1747
1748static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1749{
1750 struct hns_roce_cmq_desc desc;
1751 int ret;
1752
1753 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
1754 hr_dev->func_num = 1;
1755 return 0;
1756 }
1757
1758 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_QUERY_FUNC_INFO,
1759 is_read: true);
1760 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1761 if (ret) {
1762 hr_dev->func_num = 1;
1763 return ret;
1764 }
1765
1766 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1767 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1768
1769 return 0;
1770}
1771
1772static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
1773 u64 *stats, u32 port, int *num_counters)
1774{
1775#define CNT_PER_DESC 3
1776 struct hns_roce_cmq_desc *desc;
1777 int bd_idx, cnt_idx;
1778 __le64 *cnt_data;
1779 int desc_num;
1780 int ret;
1781 int i;
1782
1783 if (port > hr_dev->caps.num_ports)
1784 return -EINVAL;
1785
1786 desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC);
1787 desc = kcalloc(desc_num, sizeof(*desc), GFP_KERNEL);
1788 if (!desc)
1789 return -ENOMEM;
1790
1791 for (i = 0; i < desc_num; i++) {
1792 hns_roce_cmq_setup_basic_desc(desc: &desc[i],
1793 opcode: HNS_ROCE_OPC_QUERY_COUNTER, is_read: true);
1794 if (i != desc_num - 1)
1795 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1796 }
1797
1798 ret = hns_roce_cmq_send(hr_dev, desc, num: desc_num);
1799 if (ret) {
1800 ibdev_err(ibdev: &hr_dev->ib_dev,
1801 format: "failed to get counter, ret = %d.\n", ret);
1802 goto err_out;
1803 }
1804
1805 for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
1806 bd_idx = i / CNT_PER_DESC;
1807 if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC &&
1808 !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT)))
1809 break;
1810
1811 cnt_data = (__le64 *)&desc[bd_idx].data[0];
1812 cnt_idx = i % CNT_PER_DESC;
1813 stats[i] = le64_to_cpu(cnt_data[cnt_idx]);
1814 }
1815 *num_counters = i;
1816
1817err_out:
1818 kfree(objp: desc);
1819 return ret;
1820}
1821
1822static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1823{
1824 struct hns_roce_cmq_desc desc;
1825 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1826 u32 clock_cycles_of_1us;
1827
1828 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1829 is_read: false);
1830
1831 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
1832 clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
1833 else
1834 clock_cycles_of_1us = HNS_ROCE_1US_CFG;
1835
1836 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
1837 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1838
1839 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1840}
1841
1842static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1843{
1844 struct hns_roce_cmq_desc desc[2];
1845 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1846 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1847 struct hns_roce_caps *caps = &hr_dev->caps;
1848 enum hns_roce_opcode_type opcode;
1849 u32 func_num;
1850 int ret;
1851
1852 if (is_vf) {
1853 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1854 func_num = 1;
1855 } else {
1856 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1857 func_num = hr_dev->func_num;
1858 }
1859
1860 hns_roce_cmq_setup_basic_desc(desc: &desc[0], opcode, is_read: true);
1861 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1862 hns_roce_cmq_setup_basic_desc(desc: &desc[1], opcode, is_read: true);
1863
1864 ret = hns_roce_cmq_send(hr_dev, desc, num: 2);
1865 if (ret)
1866 return ret;
1867
1868 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1869 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1870 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1871 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1872 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1873 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1874 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1875 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1876
1877 if (is_vf) {
1878 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1879 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1880 func_num;
1881 } else {
1882 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1883 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1884 func_num;
1885 }
1886
1887 return 0;
1888}
1889
1890static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1891{
1892 struct hns_roce_cmq_desc desc;
1893 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1894 struct hns_roce_caps *caps = &hr_dev->caps;
1895 int ret;
1896
1897 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1898 is_read: true);
1899
1900 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1901 if (ret)
1902 return ret;
1903
1904 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1905 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1906
1907 return 0;
1908}
1909
1910static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1911{
1912 struct device *dev = hr_dev->dev;
1913 int ret;
1914
1915 ret = load_func_res_caps(hr_dev, is_vf: false);
1916 if (ret) {
1917 dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret);
1918 return ret;
1919 }
1920
1921 ret = load_pf_timer_res_caps(hr_dev);
1922 if (ret)
1923 dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1924 ret);
1925
1926 return ret;
1927}
1928
1929static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1930{
1931 struct device *dev = hr_dev->dev;
1932 int ret;
1933
1934 ret = load_func_res_caps(hr_dev, is_vf: true);
1935 if (ret)
1936 dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret);
1937
1938 return ret;
1939}
1940
1941static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1942 u32 vf_id)
1943{
1944 struct hns_roce_vf_switch *swt;
1945 struct hns_roce_cmq_desc desc;
1946 int ret;
1947
1948 swt = (struct hns_roce_vf_switch *)desc.data;
1949 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_SWITCH_PARAMETER_CFG, is_read: true);
1950 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1951 hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
1952 ret = hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1953 if (ret)
1954 return ret;
1955
1956 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1957 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1958 hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
1959 hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
1960 hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
1961
1962 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
1963}
1964
1965static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1966{
1967 u32 vf_id;
1968 int ret;
1969
1970 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1971 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1972 if (ret)
1973 return ret;
1974 }
1975 return 0;
1976}
1977
1978static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1979{
1980 struct hns_roce_cmq_desc desc[2];
1981 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1982 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1983 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1984 struct hns_roce_caps *caps = &hr_dev->caps;
1985
1986 hns_roce_cmq_setup_basic_desc(desc: &desc[0], opcode, is_read: false);
1987 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1988 hns_roce_cmq_setup_basic_desc(desc: &desc[1], opcode, is_read: false);
1989
1990 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1991
1992 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1993 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1994 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1995 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1996 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1997 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1998 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1999 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
2000 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
2001 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
2002 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
2003 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
2004 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
2005 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
2006
2007 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2008 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
2009 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
2010 vf_id * caps->gmv_bt_num);
2011 } else {
2012 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
2013 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
2014 vf_id * caps->sgid_bt_num);
2015 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
2016 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
2017 vf_id * caps->smac_bt_num);
2018 }
2019
2020 return hns_roce_cmq_send(hr_dev, desc, num: 2);
2021}
2022
2023static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
2024{
2025 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2026 u32 vf_id;
2027 int ret;
2028
2029 for (vf_id = 0; vf_id < func_num; vf_id++) {
2030 ret = config_vf_hem_resource(hr_dev, vf_id);
2031 if (ret) {
2032 dev_err(hr_dev->dev,
2033 "failed to config vf-%u hem res, ret = %d.\n",
2034 vf_id, ret);
2035 return ret;
2036 }
2037 }
2038
2039 return 0;
2040}
2041
2042static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
2043{
2044 struct hns_roce_cmq_desc desc;
2045 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2046 struct hns_roce_caps *caps = &hr_dev->caps;
2047
2048 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_CFG_BT_ATTR, is_read: false);
2049
2050 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
2051 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
2052 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
2053 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
2054 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
2055 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
2056
2057 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
2058 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
2059 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
2060 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
2061 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
2062 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
2063
2064 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
2065 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
2066 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
2067 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
2068 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
2069 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
2070
2071 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
2072 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
2073 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
2074 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
2075 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
2076 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
2077
2078 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
2079 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
2080 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
2081 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
2082 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
2083 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
2084
2085 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
2086}
2087
2088static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2089 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2090{
2091 u64 obj_per_chunk;
2092 u64 bt_chunk_size = PAGE_SIZE;
2093 u64 buf_chunk_size = PAGE_SIZE;
2094 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2095
2096 *buf_page_size = 0;
2097 *bt_page_size = 0;
2098
2099 switch (hop_num) {
2100 case 3:
2101 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2102 (bt_chunk_size / BA_BYTE_LEN) *
2103 (bt_chunk_size / BA_BYTE_LEN) *
2104 obj_per_chunk_default;
2105 break;
2106 case 2:
2107 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2108 (bt_chunk_size / BA_BYTE_LEN) *
2109 obj_per_chunk_default;
2110 break;
2111 case 1:
2112 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2113 obj_per_chunk_default;
2114 break;
2115 case HNS_ROCE_HOP_NUM_0:
2116 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2117 break;
2118 default:
2119 pr_err("table %u not support hop_num = %u!\n", hem_type,
2120 hop_num);
2121 return;
2122 }
2123
2124 if (hem_type >= HEM_TYPE_MTT)
2125 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2126 else
2127 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2128}
2129
2130static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2131{
2132 struct hns_roce_caps *caps = &hr_dev->caps;
2133
2134 /* EQ */
2135 caps->eqe_ba_pg_sz = 0;
2136 caps->eqe_buf_pg_sz = 0;
2137
2138 /* Link Table */
2139 caps->llm_buf_pg_sz = 0;
2140
2141 /* MR */
2142 caps->mpt_ba_pg_sz = 0;
2143 caps->mpt_buf_pg_sz = 0;
2144 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2145 caps->pbl_buf_pg_sz = 0;
2146 calc_pg_sz(obj_num: caps->num_mtpts, obj_size: caps->mtpt_entry_sz, hop_num: caps->mpt_hop_num,
2147 ctx_bt_num: caps->mpt_bt_num, buf_page_size: &caps->mpt_buf_pg_sz, bt_page_size: &caps->mpt_ba_pg_sz,
2148 hem_type: HEM_TYPE_MTPT);
2149
2150 /* QP */
2151 caps->qpc_ba_pg_sz = 0;
2152 caps->qpc_buf_pg_sz = 0;
2153 caps->qpc_timer_ba_pg_sz = 0;
2154 caps->qpc_timer_buf_pg_sz = 0;
2155 caps->sccc_ba_pg_sz = 0;
2156 caps->sccc_buf_pg_sz = 0;
2157 caps->mtt_ba_pg_sz = 0;
2158 caps->mtt_buf_pg_sz = 0;
2159 calc_pg_sz(obj_num: caps->num_qps, obj_size: caps->qpc_sz, hop_num: caps->qpc_hop_num,
2160 ctx_bt_num: caps->qpc_bt_num, buf_page_size: &caps->qpc_buf_pg_sz, bt_page_size: &caps->qpc_ba_pg_sz,
2161 hem_type: HEM_TYPE_QPC);
2162
2163 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2164 calc_pg_sz(obj_num: caps->num_qps, obj_size: caps->sccc_sz, hop_num: caps->sccc_hop_num,
2165 ctx_bt_num: caps->sccc_bt_num, buf_page_size: &caps->sccc_buf_pg_sz,
2166 bt_page_size: &caps->sccc_ba_pg_sz, hem_type: HEM_TYPE_SCCC);
2167
2168 /* CQ */
2169 caps->cqc_ba_pg_sz = 0;
2170 caps->cqc_buf_pg_sz = 0;
2171 caps->cqc_timer_ba_pg_sz = 0;
2172 caps->cqc_timer_buf_pg_sz = 0;
2173 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2174 caps->cqe_buf_pg_sz = 0;
2175 calc_pg_sz(obj_num: caps->num_cqs, obj_size: caps->cqc_entry_sz, hop_num: caps->cqc_hop_num,
2176 ctx_bt_num: caps->cqc_bt_num, buf_page_size: &caps->cqc_buf_pg_sz, bt_page_size: &caps->cqc_ba_pg_sz,
2177 hem_type: HEM_TYPE_CQC);
2178 calc_pg_sz(obj_num: caps->max_cqes, obj_size: caps->cqe_sz, hop_num: caps->cqe_hop_num,
2179 ctx_bt_num: 1, buf_page_size: &caps->cqe_buf_pg_sz, bt_page_size: &caps->cqe_ba_pg_sz, hem_type: HEM_TYPE_CQE);
2180
2181 /* SRQ */
2182 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2183 caps->srqc_ba_pg_sz = 0;
2184 caps->srqc_buf_pg_sz = 0;
2185 caps->srqwqe_ba_pg_sz = 0;
2186 caps->srqwqe_buf_pg_sz = 0;
2187 caps->idx_ba_pg_sz = 0;
2188 caps->idx_buf_pg_sz = 0;
2189 calc_pg_sz(obj_num: caps->num_srqs, obj_size: caps->srqc_entry_sz,
2190 hop_num: caps->srqc_hop_num, ctx_bt_num: caps->srqc_bt_num,
2191 buf_page_size: &caps->srqc_buf_pg_sz, bt_page_size: &caps->srqc_ba_pg_sz,
2192 hem_type: HEM_TYPE_SRQC);
2193 calc_pg_sz(obj_num: caps->num_srqwqe_segs, obj_size: caps->mtt_entry_sz,
2194 hop_num: caps->srqwqe_hop_num, ctx_bt_num: 1, buf_page_size: &caps->srqwqe_buf_pg_sz,
2195 bt_page_size: &caps->srqwqe_ba_pg_sz, hem_type: HEM_TYPE_SRQWQE);
2196 calc_pg_sz(obj_num: caps->num_idx_segs, obj_size: caps->idx_entry_sz,
2197 hop_num: caps->idx_hop_num, ctx_bt_num: 1, buf_page_size: &caps->idx_buf_pg_sz,
2198 bt_page_size: &caps->idx_ba_pg_sz, hem_type: HEM_TYPE_IDX);
2199 }
2200
2201 /* GMV */
2202 caps->gmv_ba_pg_sz = 0;
2203 caps->gmv_buf_pg_sz = 0;
2204}
2205
2206/* Apply all loaded caps before setting to hardware */
2207static void apply_func_caps(struct hns_roce_dev *hr_dev)
2208{
2209#define MAX_GID_TBL_LEN 256
2210 struct hns_roce_caps *caps = &hr_dev->caps;
2211 struct hns_roce_v2_priv *priv = hr_dev->priv;
2212
2213 /* The following configurations don't need to be got from firmware. */
2214 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2215 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2216 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2217
2218 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2219 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2220 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2221
2222 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2223 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2224
2225 if (!caps->num_comp_vectors)
2226 caps->num_comp_vectors =
2227 min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
2228 (u32)priv->handle->rinfo.num_vectors -
2229 (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
2230
2231 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2232 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
2233 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2234 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2235
2236 /* The following configurations will be overwritten */
2237 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2238 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2239 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2240
2241 /* The following configurations are not got from firmware */
2242 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2243
2244 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2245
2246 /* It's meaningless to support excessively large gid_table_len,
2247 * as the type of sgid_index in kernel struct ib_global_route
2248 * and userspace struct ibv_global_route are u8/uint8_t (0-255).
2249 */
2250 caps->gid_table_len[0] = min_t(u32, MAX_GID_TBL_LEN,
2251 caps->gmv_bt_num *
2252 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
2253
2254 caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
2255 caps->gmv_entry_sz);
2256 } else {
2257 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2258
2259 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
2260 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2261 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2262 caps->gid_table_len[0] /= func_num;
2263 }
2264
2265 if (hr_dev->is_vf) {
2266 caps->default_aeq_arm_st = 0x3;
2267 caps->default_ceq_arm_st = 0x3;
2268 caps->default_ceq_max_cnt = 0x1;
2269 caps->default_ceq_period = 0x10;
2270 caps->default_aeq_max_cnt = 0x1;
2271 caps->default_aeq_period = 0x10;
2272 }
2273
2274 set_hem_page_size(hr_dev);
2275}
2276
2277static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
2278{
2279 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {};
2280 struct hns_roce_caps *caps = &hr_dev->caps;
2281 struct hns_roce_query_pf_caps_a *resp_a;
2282 struct hns_roce_query_pf_caps_b *resp_b;
2283 struct hns_roce_query_pf_caps_c *resp_c;
2284 struct hns_roce_query_pf_caps_d *resp_d;
2285 struct hns_roce_query_pf_caps_e *resp_e;
2286 struct hns_roce_query_pf_caps_f *resp_f;
2287 enum hns_roce_opcode_type cmd;
2288 int ctx_hop_num;
2289 int pbl_hop_num;
2290 int cmd_num;
2291 int ret;
2292 int i;
2293
2294 cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
2295 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
2296 cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
2297 HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 :
2298 HNS_ROCE_QUERY_PF_CAPS_CMD_NUM;
2299
2300 for (i = 0; i < cmd_num - 1; i++) {
2301 hns_roce_cmq_setup_basic_desc(desc: &desc[i], opcode: cmd, is_read: true);
2302 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2303 }
2304
2305 hns_roce_cmq_setup_basic_desc(desc: &desc[cmd_num - 1], opcode: cmd, is_read: true);
2306 desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2307
2308 ret = hns_roce_cmq_send(hr_dev, desc, num: cmd_num);
2309 if (ret)
2310 return ret;
2311
2312 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2313 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2314 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2315 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2316 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2317 resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data;
2318
2319 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2320 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2321 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2322 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2323 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2324 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2325 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2326 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2327 caps->num_other_vectors = resp_a->num_other_vectors;
2328 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2329 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2330
2331 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2332 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2333 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2334 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2335 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2336 caps->idx_entry_sz = resp_b->idx_entry_sz;
2337 caps->sccc_sz = resp_b->sccc_sz;
2338 caps->max_mtu = resp_b->max_mtu;
2339 caps->min_cqes = resp_b->min_cqes;
2340 caps->min_wqes = resp_b->min_wqes;
2341 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2342 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2343 caps->phy_num_uars = resp_b->phy_num_uars;
2344 ctx_hop_num = resp_b->ctx_hop_num;
2345 pbl_hop_num = resp_b->pbl_hop_num;
2346
2347 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
2348
2349 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS);
2350 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2351 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2352
2353 if (hr_dev->is_vf)
2354 caps->flags &= ~HNS_ROCE_CAP_FLAG_BOND;
2355
2356 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
2357 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
2358 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
2359 caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS);
2360 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
2361 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
2362 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
2363 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2364 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2365
2366 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
2367 caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP);
2368 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2369 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
2370 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
2371 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
2372 caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG);
2373 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
2374 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
2375 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
2376 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS);
2377
2378 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
2379 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
2380 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
2381 caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS);
2382 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
2383 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
2384
2385 caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len);
2386
2387 caps->qpc_hop_num = ctx_hop_num;
2388 caps->sccc_hop_num = ctx_hop_num;
2389 caps->srqc_hop_num = ctx_hop_num;
2390 caps->cqc_hop_num = ctx_hop_num;
2391 caps->mpt_hop_num = ctx_hop_num;
2392 caps->mtt_hop_num = pbl_hop_num;
2393 caps->cqe_hop_num = pbl_hop_num;
2394 caps->srqwqe_hop_num = pbl_hop_num;
2395 caps->idx_hop_num = pbl_hop_num;
2396 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM);
2397 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
2398 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
2399
2400 if (!(caps->page_size_cap & PAGE_SIZE))
2401 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
2402
2403 if (!hr_dev->is_vf) {
2404 caps->cqe_sz = resp_a->cqe_sz;
2405 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
2406 caps->default_aeq_arm_st =
2407 hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
2408 caps->default_ceq_arm_st =
2409 hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
2410 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2411 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2412 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2413 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2414 }
2415
2416 return 0;
2417}
2418
2419static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2420{
2421 struct hns_roce_cmq_desc desc;
2422 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2423
2424 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2425 is_read: false);
2426
2427 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2428 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2429
2430 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
2431}
2432
2433static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2434{
2435 struct hns_roce_caps *caps = &hr_dev->caps;
2436 int ret;
2437
2438 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
2439 return 0;
2440
2441 ret = config_hem_entry_size(hr_dev, type: HNS_ROCE_CFG_QPC_SIZE,
2442 val: caps->qpc_sz);
2443 if (ret) {
2444 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2445 return ret;
2446 }
2447
2448 ret = config_hem_entry_size(hr_dev, type: HNS_ROCE_CFG_SCCC_SIZE,
2449 val: caps->sccc_sz);
2450 if (ret)
2451 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2452
2453 return ret;
2454}
2455
2456static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2457{
2458 struct device *dev = hr_dev->dev;
2459 int ret;
2460
2461 hr_dev->func_num = 1;
2462
2463 ret = hns_roce_query_caps(hr_dev);
2464 if (ret) {
2465 dev_err(dev, "failed to query VF caps, ret = %d.\n", ret);
2466 return ret;
2467 }
2468
2469 ret = hns_roce_query_vf_resource(hr_dev);
2470 if (ret) {
2471 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2472 return ret;
2473 }
2474
2475 apply_func_caps(hr_dev);
2476
2477 ret = hns_roce_v2_set_bt(hr_dev);
2478 if (ret)
2479 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2480
2481 return ret;
2482}
2483
2484static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2485{
2486 struct device *dev = hr_dev->dev;
2487 int ret;
2488
2489 ret = hns_roce_query_func_info(hr_dev);
2490 if (ret) {
2491 dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2492 return ret;
2493 }
2494
2495 ret = hns_roce_config_global_param(hr_dev);
2496 if (ret) {
2497 dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2498 return ret;
2499 }
2500
2501 ret = hns_roce_set_vf_switch_param(hr_dev);
2502 if (ret) {
2503 dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2504 return ret;
2505 }
2506
2507 ret = hns_roce_query_caps(hr_dev);
2508 if (ret) {
2509 dev_err(dev, "failed to query PF caps, ret = %d.\n", ret);
2510 return ret;
2511 }
2512
2513 ret = hns_roce_query_pf_resource(hr_dev);
2514 if (ret) {
2515 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2516 return ret;
2517 }
2518
2519 apply_func_caps(hr_dev);
2520
2521 ret = hns_roce_alloc_vf_resource(hr_dev);
2522 if (ret) {
2523 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2524 return ret;
2525 }
2526
2527 ret = hns_roce_v2_set_bt(hr_dev);
2528 if (ret) {
2529 dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2530 return ret;
2531 }
2532
2533 /* Configure the size of QPC, SCCC, etc. */
2534 return hns_roce_config_entry_size(hr_dev);
2535}
2536
2537static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2538{
2539 struct device *dev = hr_dev->dev;
2540 int ret;
2541
2542 ret = hns_roce_cmq_query_hw_info(hr_dev);
2543 if (ret) {
2544 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2545 return ret;
2546 }
2547
2548 ret = hns_roce_query_fw_ver(hr_dev);
2549 if (ret) {
2550 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2551 return ret;
2552 }
2553
2554 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2555 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2556
2557 if (hr_dev->is_vf)
2558 return hns_roce_v2_vf_profile(hr_dev);
2559 else
2560 return hns_roce_v2_pf_profile(hr_dev);
2561}
2562
2563static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2564{
2565 u32 i, next_ptr, page_num;
2566 __le64 *entry = cfg_buf;
2567 dma_addr_t addr;
2568 u64 val;
2569
2570 page_num = data_buf->npages;
2571 for (i = 0; i < page_num; i++) {
2572 addr = hns_roce_buf_page(buf: data_buf, idx: i);
2573 if (i == (page_num - 1))
2574 next_ptr = 0;
2575 else
2576 next_ptr = i + 1;
2577
2578 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2579 entry[i] = cpu_to_le64(val);
2580 }
2581}
2582
2583static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2584 struct hns_roce_link_table *table)
2585{
2586 struct hns_roce_cmq_desc desc[2];
2587 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2588 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2589 struct hns_roce_buf *buf = table->buf;
2590 enum hns_roce_opcode_type opcode;
2591 dma_addr_t addr;
2592
2593 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2594 hns_roce_cmq_setup_basic_desc(desc: &desc[0], opcode, is_read: false);
2595 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2596 hns_roce_cmq_setup_basic_desc(desc: &desc[1], opcode, is_read: false);
2597
2598 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2599 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2600 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2601 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2602 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2603
2604 addr = to_hr_hw_page_addr(addr: hns_roce_buf_page(buf, idx: 0));
2605 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2606 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2607 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2608 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2609
2610 addr = to_hr_hw_page_addr(addr: hns_roce_buf_page(buf, idx: buf->npages - 1));
2611 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2612 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2613 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2614
2615 return hns_roce_cmq_send(hr_dev, desc, num: 2);
2616}
2617
2618static struct hns_roce_link_table *
2619alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2620{
2621 u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num;
2622 struct hns_roce_v2_priv *priv = hr_dev->priv;
2623 struct hns_roce_link_table *link_tbl;
2624 u32 pg_shift, size, min_size;
2625
2626 link_tbl = &priv->ext_llm;
2627 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2628 size = hr_dev->caps.num_qps * hr_dev->func_num *
2629 HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2630 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(total_sl) << pg_shift;
2631
2632 /* Alloc data table */
2633 size = max(size, min_size);
2634 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, page_shift: pg_shift, flags: 0);
2635 if (IS_ERR(ptr: link_tbl->buf))
2636 return ERR_PTR(error: -ENOMEM);
2637
2638 /* Alloc config table */
2639 size = link_tbl->buf->npages * sizeof(u64);
2640 link_tbl->table.buf = dma_alloc_coherent(dev: hr_dev->dev, size,
2641 dma_handle: &link_tbl->table.map,
2642 GFP_KERNEL);
2643 if (!link_tbl->table.buf) {
2644 hns_roce_buf_free(hr_dev, buf: link_tbl->buf);
2645 return ERR_PTR(error: -ENOMEM);
2646 }
2647
2648 return link_tbl;
2649}
2650
2651static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2652 struct hns_roce_link_table *tbl)
2653{
2654 if (tbl->buf) {
2655 u32 size = tbl->buf->npages * sizeof(u64);
2656
2657 dma_free_coherent(dev: hr_dev->dev, size, cpu_addr: tbl->table.buf,
2658 dma_handle: tbl->table.map);
2659 }
2660
2661 hns_roce_buf_free(hr_dev, buf: tbl->buf);
2662}
2663
2664static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2665{
2666 struct hns_roce_link_table *link_tbl;
2667 int ret;
2668
2669 link_tbl = alloc_link_table_buf(hr_dev);
2670 if (IS_ERR(ptr: link_tbl))
2671 return -ENOMEM;
2672
2673 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2674 ret = -EINVAL;
2675 goto err_alloc;
2676 }
2677
2678 config_llm_table(data_buf: link_tbl->buf, cfg_buf: link_tbl->table.buf);
2679 ret = set_llm_cfg_to_hw(hr_dev, table: link_tbl);
2680 if (ret)
2681 goto err_alloc;
2682
2683 return 0;
2684
2685err_alloc:
2686 free_link_table_buf(hr_dev, tbl: link_tbl);
2687 return ret;
2688}
2689
2690static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2691{
2692 struct hns_roce_v2_priv *priv = hr_dev->priv;
2693
2694 free_link_table_buf(hr_dev, tbl: &priv->ext_llm);
2695}
2696
2697static void free_dip_entry(struct hns_roce_dev *hr_dev)
2698{
2699 struct hns_roce_dip *hr_dip;
2700 unsigned long idx;
2701
2702 xa_lock(&hr_dev->qp_table.dip_xa);
2703
2704 xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) {
2705 __xa_erase(&hr_dev->qp_table.dip_xa, index: hr_dip->dip_idx);
2706 kfree(objp: hr_dip);
2707 }
2708
2709 xa_unlock(&hr_dev->qp_table.dip_xa);
2710}
2711
2712static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
2713{
2714 struct hns_roce_v2_priv *priv = hr_dev->priv;
2715 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2716 struct ib_device *ibdev = &hr_dev->ib_dev;
2717 struct hns_roce_pd *hr_pd;
2718 struct ib_pd *pd;
2719
2720 hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
2721 if (!hr_pd)
2722 return NULL;
2723 pd = &hr_pd->ibpd;
2724 pd->device = ibdev;
2725
2726 if (hns_roce_alloc_pd(pd, NULL)) {
2727 ibdev_err(ibdev, format: "failed to create pd for free mr.\n");
2728 kfree(objp: hr_pd);
2729 return NULL;
2730 }
2731 free_mr->rsv_pd = to_hr_pd(ibpd: pd);
2732 free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
2733 free_mr->rsv_pd->ibpd.uobject = NULL;
2734 free_mr->rsv_pd->ibpd.__internal_mr = NULL;
2735 atomic_set(v: &free_mr->rsv_pd->ibpd.usecnt, i: 0);
2736
2737 return pd;
2738}
2739
2740static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
2741{
2742 struct hns_roce_v2_priv *priv = hr_dev->priv;
2743 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2744 struct ib_device *ibdev = &hr_dev->ib_dev;
2745 struct ib_cq_init_attr cq_init_attr = {};
2746 struct hns_roce_cq *hr_cq;
2747 struct ib_cq *cq;
2748
2749 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
2750
2751 hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
2752 if (!hr_cq)
2753 return NULL;
2754
2755 cq = &hr_cq->ib_cq;
2756 cq->device = ibdev;
2757
2758 if (hns_roce_create_cq(ib_cq: cq, attr: &cq_init_attr, NULL)) {
2759 ibdev_err(ibdev, format: "failed to create cq for free mr.\n");
2760 kfree(objp: hr_cq);
2761 return NULL;
2762 }
2763 free_mr->rsv_cq = to_hr_cq(ib_cq: cq);
2764 free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
2765 free_mr->rsv_cq->ib_cq.uobject = NULL;
2766 free_mr->rsv_cq->ib_cq.comp_handler = NULL;
2767 free_mr->rsv_cq->ib_cq.event_handler = NULL;
2768 free_mr->rsv_cq->ib_cq.cq_context = NULL;
2769 atomic_set(v: &free_mr->rsv_cq->ib_cq.usecnt, i: 0);
2770
2771 return cq;
2772}
2773
2774static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
2775 struct ib_qp_init_attr *init_attr, int i)
2776{
2777 struct hns_roce_v2_priv *priv = hr_dev->priv;
2778 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2779 struct ib_device *ibdev = &hr_dev->ib_dev;
2780 struct hns_roce_qp *hr_qp;
2781 struct ib_qp *qp;
2782 int ret;
2783
2784 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
2785 if (!hr_qp)
2786 return -ENOMEM;
2787
2788 qp = &hr_qp->ibqp;
2789 qp->device = ibdev;
2790
2791 ret = hns_roce_create_qp(ib_qp: qp, init_attr, NULL);
2792 if (ret) {
2793 ibdev_err(ibdev, format: "failed to create qp for free mr.\n");
2794 kfree(objp: hr_qp);
2795 return ret;
2796 }
2797
2798 free_mr->rsv_qp[i] = hr_qp;
2799 free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
2800 free_mr->rsv_qp[i]->ibqp.send_cq = cq;
2801
2802 return 0;
2803}
2804
2805static void free_mr_exit(struct hns_roce_dev *hr_dev)
2806{
2807 struct hns_roce_v2_priv *priv = hr_dev->priv;
2808 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2809 struct ib_qp *qp;
2810 int i;
2811
2812 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2813 if (free_mr->rsv_qp[i]) {
2814 qp = &free_mr->rsv_qp[i]->ibqp;
2815 hns_roce_v2_destroy_qp(ibqp: qp, NULL);
2816 kfree(objp: free_mr->rsv_qp[i]);
2817 free_mr->rsv_qp[i] = NULL;
2818 }
2819 }
2820
2821 if (free_mr->rsv_cq) {
2822 hns_roce_destroy_cq(ib_cq: &free_mr->rsv_cq->ib_cq, NULL);
2823 kfree(objp: free_mr->rsv_cq);
2824 free_mr->rsv_cq = NULL;
2825 }
2826
2827 if (free_mr->rsv_pd) {
2828 hns_roce_dealloc_pd(pd: &free_mr->rsv_pd->ibpd, NULL);
2829 kfree(objp: free_mr->rsv_pd);
2830 free_mr->rsv_pd = NULL;
2831 }
2832
2833 mutex_destroy(lock: &free_mr->mutex);
2834}
2835
2836static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
2837{
2838 struct hns_roce_v2_priv *priv = hr_dev->priv;
2839 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2840 struct ib_qp_init_attr qp_init_attr = {};
2841 struct ib_pd *pd;
2842 struct ib_cq *cq;
2843 int ret;
2844 int i;
2845
2846 pd = free_mr_init_pd(hr_dev);
2847 if (!pd)
2848 return -ENOMEM;
2849
2850 cq = free_mr_init_cq(hr_dev);
2851 if (!cq) {
2852 ret = -ENOMEM;
2853 goto create_failed_cq;
2854 }
2855
2856 qp_init_attr.qp_type = IB_QPT_RC;
2857 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2858 qp_init_attr.send_cq = cq;
2859 qp_init_attr.recv_cq = cq;
2860 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2861 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
2862 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
2863 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
2864 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
2865
2866 ret = free_mr_init_qp(hr_dev, cq, init_attr: &qp_init_attr, i);
2867 if (ret)
2868 goto create_failed_qp;
2869 }
2870
2871 return 0;
2872
2873create_failed_qp:
2874 for (i--; i >= 0; i--) {
2875 hns_roce_v2_destroy_qp(ibqp: &free_mr->rsv_qp[i]->ibqp, NULL);
2876 kfree(objp: free_mr->rsv_qp[i]);
2877 }
2878 hns_roce_destroy_cq(ib_cq: cq, NULL);
2879 kfree(objp: cq);
2880
2881create_failed_cq:
2882 hns_roce_dealloc_pd(pd, NULL);
2883 kfree(objp: pd);
2884
2885 return ret;
2886}
2887
2888static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
2889 struct ib_qp_attr *attr, int sl_num)
2890{
2891 struct hns_roce_v2_priv *priv = hr_dev->priv;
2892 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2893 struct ib_device *ibdev = &hr_dev->ib_dev;
2894 struct hns_roce_qp *hr_qp;
2895 int loopback;
2896 int mask;
2897 int ret;
2898
2899 hr_qp = to_hr_qp(ibqp: &free_mr->rsv_qp[sl_num]->ibqp);
2900 hr_qp->free_mr_en = 1;
2901 hr_qp->ibqp.device = ibdev;
2902 hr_qp->ibqp.qp_type = IB_QPT_RC;
2903
2904 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
2905 attr->qp_state = IB_QPS_INIT;
2906 attr->port_num = 1;
2907 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
2908 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
2909 IB_QPS_INIT, NULL);
2910 if (ret) {
2911 ibdev_err_ratelimited(ibdev, "failed to modify qp to init, ret = %d.\n",
2912 ret);
2913 return ret;
2914 }
2915
2916 loopback = hr_dev->loop_idc;
2917 /* Set qpc lbi = 1 incidate loopback IO */
2918 hr_dev->loop_idc = 1;
2919
2920 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
2921 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
2922 attr->qp_state = IB_QPS_RTR;
2923 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2924 attr->path_mtu = IB_MTU_256;
2925 attr->dest_qp_num = hr_qp->qpn;
2926 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2927
2928 rdma_ah_set_sl(attr: &attr->ah_attr, sl: (u8)sl_num);
2929
2930 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
2931 IB_QPS_RTR, NULL);
2932 hr_dev->loop_idc = loopback;
2933 if (ret) {
2934 ibdev_err(ibdev, format: "failed to modify qp to rtr, ret = %d.\n",
2935 ret);
2936 return ret;
2937 }
2938
2939 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
2940 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
2941 attr->qp_state = IB_QPS_RTS;
2942 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2943 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
2944 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
2945 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
2946 IB_QPS_RTS, NULL);
2947 if (ret)
2948 ibdev_err(ibdev, format: "failed to modify qp to rts, ret = %d.\n",
2949 ret);
2950
2951 return ret;
2952}
2953
2954static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
2955{
2956 struct hns_roce_v2_priv *priv = hr_dev->priv;
2957 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2958 struct ib_qp_attr attr = {};
2959 int ret;
2960 int i;
2961
2962 rdma_ah_set_grh(attr: &attr.ah_attr, NULL, flow_label: 0, sgid_index: 0, hop_limit: 1, traffic_class: 0);
2963 rdma_ah_set_static_rate(attr: &attr.ah_attr, static_rate: 3);
2964 rdma_ah_set_port_num(attr: &attr.ah_attr, port_num: 1);
2965
2966 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2967 ret = free_mr_modify_rsv_qp(hr_dev, attr: &attr, sl_num: i);
2968 if (ret)
2969 return ret;
2970 }
2971
2972 return 0;
2973}
2974
2975static int free_mr_init(struct hns_roce_dev *hr_dev)
2976{
2977 struct hns_roce_v2_priv *priv = hr_dev->priv;
2978 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2979 int ret;
2980
2981 mutex_init(&free_mr->mutex);
2982
2983 ret = free_mr_alloc_res(hr_dev);
2984 if (ret) {
2985 mutex_destroy(lock: &free_mr->mutex);
2986 return ret;
2987 }
2988
2989 ret = free_mr_modify_qp(hr_dev);
2990 if (ret)
2991 goto err_modify_qp;
2992
2993 return 0;
2994
2995err_modify_qp:
2996 free_mr_exit(hr_dev);
2997
2998 return ret;
2999}
3000
3001static int get_hem_table(struct hns_roce_dev *hr_dev)
3002{
3003 unsigned int qpc_count;
3004 unsigned int cqc_count;
3005 unsigned int gmv_count;
3006 int ret;
3007 int i;
3008
3009 /* Alloc memory for source address table buffer space chunk */
3010 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
3011 gmv_count++) {
3012 ret = hns_roce_table_get(hr_dev, table: &hr_dev->gmv_table, obj: gmv_count);
3013 if (ret)
3014 goto err_gmv_failed;
3015 }
3016
3017 if (hr_dev->is_vf)
3018 return 0;
3019
3020 /* Alloc memory for QPC Timer buffer space chunk */
3021 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
3022 qpc_count++) {
3023 ret = hns_roce_table_get(hr_dev, table: &hr_dev->qpc_timer_table,
3024 obj: qpc_count);
3025 if (ret) {
3026 dev_err(hr_dev->dev, "QPC Timer get failed\n");
3027 goto err_qpc_timer_failed;
3028 }
3029 }
3030
3031 /* Alloc memory for CQC Timer buffer space chunk */
3032 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
3033 cqc_count++) {
3034 ret = hns_roce_table_get(hr_dev, table: &hr_dev->cqc_timer_table,
3035 obj: cqc_count);
3036 if (ret) {
3037 dev_err(hr_dev->dev, "CQC Timer get failed\n");
3038 goto err_cqc_timer_failed;
3039 }
3040 }
3041
3042 return 0;
3043
3044err_cqc_timer_failed:
3045 for (i = 0; i < cqc_count; i++)
3046 hns_roce_table_put(hr_dev, table: &hr_dev->cqc_timer_table, obj: i);
3047
3048err_qpc_timer_failed:
3049 for (i = 0; i < qpc_count; i++)
3050 hns_roce_table_put(hr_dev, table: &hr_dev->qpc_timer_table, obj: i);
3051
3052err_gmv_failed:
3053 for (i = 0; i < gmv_count; i++)
3054 hns_roce_table_put(hr_dev, table: &hr_dev->gmv_table, obj: i);
3055
3056 return ret;
3057}
3058
3059static void put_hem_table(struct hns_roce_dev *hr_dev)
3060{
3061 int i;
3062
3063 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
3064 hns_roce_table_put(hr_dev, table: &hr_dev->gmv_table, obj: i);
3065
3066 if (hr_dev->is_vf)
3067 return;
3068
3069 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
3070 hns_roce_table_put(hr_dev, table: &hr_dev->qpc_timer_table, obj: i);
3071
3072 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
3073 hns_roce_table_put(hr_dev, table: &hr_dev->cqc_timer_table, obj: i);
3074}
3075
3076static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
3077{
3078 int ret;
3079
3080 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
3081 ret = free_mr_init(hr_dev);
3082 if (ret) {
3083 dev_err(hr_dev->dev, "failed to init free mr!\n");
3084 return ret;
3085 }
3086 }
3087
3088 /* The hns ROCEE requires the extdb info to be cleared before using */
3089 ret = hns_roce_clear_extdb_list_info(hr_dev);
3090 if (ret)
3091 goto err_clear_extdb_failed;
3092
3093 ret = get_hem_table(hr_dev);
3094 if (ret)
3095 goto err_get_hem_table_failed;
3096
3097 if (hr_dev->is_vf)
3098 return 0;
3099
3100 ret = hns_roce_init_link_table(hr_dev);
3101 if (ret) {
3102 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
3103 goto err_llm_init_failed;
3104 }
3105
3106 return 0;
3107
3108err_llm_init_failed:
3109 put_hem_table(hr_dev);
3110err_get_hem_table_failed:
3111 hns_roce_function_clear(hr_dev);
3112err_clear_extdb_failed:
3113 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3114 free_mr_exit(hr_dev);
3115
3116 return ret;
3117}
3118
3119static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
3120{
3121 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3122 free_mr_exit(hr_dev);
3123
3124 hns_roce_function_clear(hr_dev);
3125
3126 if (!hr_dev->is_vf)
3127 hns_roce_free_link_table(hr_dev);
3128
3129 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
3130 free_dip_entry(hr_dev);
3131}
3132
3133static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
3134 struct hns_roce_mbox_msg *mbox_msg)
3135{
3136 struct hns_roce_cmq_desc desc;
3137 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
3138
3139 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_POST_MB, is_read: false);
3140
3141 mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
3142 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
3143 mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
3144 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
3145 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
3146 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
3147 mbox_msg->token);
3148
3149 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
3150}
3151
3152static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
3153 u8 *complete_status)
3154{
3155 struct hns_roce_mbox_status *mb_st;
3156 struct hns_roce_cmq_desc desc;
3157 unsigned long end;
3158 int ret = -EBUSY;
3159 u32 status;
3160 bool busy;
3161
3162 mb_st = (struct hns_roce_mbox_status *)desc.data;
3163 end = msecs_to_jiffies(m: timeout) + jiffies;
3164 while (v2_chk_mbox_is_avail(hr_dev, busy: &busy)) {
3165 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
3166 return -EIO;
3167
3168 status = 0;
3169 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_QUERY_MB_ST,
3170 is_read: true);
3171 ret = __hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
3172 if (!ret) {
3173 status = le32_to_cpu(mb_st->mb_status_hw_run);
3174 /* No pending message exists in ROCEE mbox. */
3175 if (!(status & MB_ST_HW_RUN_M))
3176 break;
3177 } else if (!v2_chk_mbox_is_avail(hr_dev, busy: &busy)) {
3178 break;
3179 }
3180
3181 if (time_after(jiffies, end)) {
3182 dev_err_ratelimited(hr_dev->dev,
3183 "failed to wait mbox status 0x%x\n",
3184 status);
3185 return -ETIMEDOUT;
3186 }
3187
3188 cond_resched();
3189 ret = -EBUSY;
3190 }
3191
3192 if (!ret) {
3193 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
3194 } else if (!v2_chk_mbox_is_avail(hr_dev, busy: &busy)) {
3195 /* Ignore all errors if the mbox is unavailable. */
3196 ret = 0;
3197 *complete_status = MB_ST_COMPLETE_M;
3198 }
3199
3200 return ret;
3201}
3202
3203static int v2_post_mbox(struct hns_roce_dev *hr_dev,
3204 struct hns_roce_mbox_msg *mbox_msg)
3205{
3206 u8 status = 0;
3207 int ret;
3208
3209 /* Waiting for the mbox to be idle */
3210 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
3211 complete_status: &status);
3212 if (unlikely(ret)) {
3213 dev_err_ratelimited(hr_dev->dev,
3214 "failed to check post mbox status = 0x%x, ret = %d.\n",
3215 status, ret);
3216 return ret;
3217 }
3218
3219 /* Post new message to mbox */
3220 ret = hns_roce_mbox_post(hr_dev, mbox_msg);
3221 if (ret)
3222 dev_err_ratelimited(hr_dev->dev,
3223 "failed to post mailbox, ret = %d.\n", ret);
3224
3225 return ret;
3226}
3227
3228static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
3229{
3230 u8 status = 0;
3231 int ret;
3232
3233 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
3234 complete_status: &status);
3235 if (!ret) {
3236 if (status != MB_ST_COMPLETE_SUCC)
3237 return -EBUSY;
3238 } else {
3239 dev_err_ratelimited(hr_dev->dev,
3240 "failed to check mbox status = 0x%x, ret = %d.\n",
3241 status, ret);
3242 }
3243
3244 return ret;
3245}
3246
3247static void copy_gid(void *dest, const union ib_gid *gid)
3248{
3249#define GID_SIZE 4
3250 const union ib_gid *src = gid;
3251 __le32 (*p)[GID_SIZE] = dest;
3252 int i;
3253
3254 if (!gid)
3255 src = &zgid;
3256
3257 for (i = 0; i < GID_SIZE; i++)
3258 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
3259}
3260
3261static int config_sgid_table(struct hns_roce_dev *hr_dev,
3262 int gid_index, const union ib_gid *gid,
3263 enum hns_roce_sgid_type sgid_type)
3264{
3265 struct hns_roce_cmq_desc desc;
3266 struct hns_roce_cfg_sgid_tb *sgid_tb =
3267 (struct hns_roce_cfg_sgid_tb *)desc.data;
3268
3269 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_CFG_SGID_TB, is_read: false);
3270
3271 hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index);
3272 hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type);
3273
3274 copy_gid(dest: &sgid_tb->vf_sgid_l, gid);
3275
3276 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
3277}
3278
3279static int config_gmv_table(struct hns_roce_dev *hr_dev,
3280 int gid_index, const union ib_gid *gid,
3281 enum hns_roce_sgid_type sgid_type,
3282 const struct ib_gid_attr *attr)
3283{
3284 struct hns_roce_cmq_desc desc[2];
3285 struct hns_roce_cfg_gmv_tb_a *tb_a =
3286 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
3287 struct hns_roce_cfg_gmv_tb_b *tb_b =
3288 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
3289
3290 u16 vlan_id = VLAN_CFI_MASK;
3291 u8 mac[ETH_ALEN] = {};
3292 int ret;
3293
3294 if (gid) {
3295 ret = rdma_read_gid_l2_fields(attr, vlan_id: &vlan_id, smac: mac);
3296 if (ret)
3297 return ret;
3298 }
3299
3300 hns_roce_cmq_setup_basic_desc(desc: &desc[0], opcode: HNS_ROCE_OPC_CFG_GMV_TBL, is_read: false);
3301 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
3302
3303 hns_roce_cmq_setup_basic_desc(desc: &desc[1], opcode: HNS_ROCE_OPC_CFG_GMV_TBL, is_read: false);
3304
3305 copy_gid(dest: &tb_a->vf_sgid_l, gid);
3306
3307 hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type);
3308 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK);
3309 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id);
3310
3311 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
3312
3313 hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]);
3314 hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index);
3315
3316 return hns_roce_cmq_send(hr_dev, desc, num: 2);
3317}
3318
3319static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
3320 const union ib_gid *gid,
3321 const struct ib_gid_attr *attr)
3322{
3323 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
3324 int ret;
3325
3326 if (gid) {
3327 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3328 if (ipv6_addr_v4mapped(a: (void *)gid))
3329 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
3330 else
3331 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
3332 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
3333 sgid_type = GID_TYPE_FLAG_ROCE_V1;
3334 }
3335 }
3336
3337 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
3338 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
3339 else
3340 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
3341
3342 if (ret)
3343 ibdev_err(ibdev: &hr_dev->ib_dev, format: "failed to set gid, ret = %d!\n",
3344 ret);
3345
3346 return ret;
3347}
3348
3349static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3350 const u8 *addr)
3351{
3352 struct hns_roce_cmq_desc desc;
3353 struct hns_roce_cfg_smac_tb *smac_tb =
3354 (struct hns_roce_cfg_smac_tb *)desc.data;
3355 u16 reg_smac_h;
3356 u32 reg_smac_l;
3357
3358 hns_roce_cmq_setup_basic_desc(desc: &desc, opcode: HNS_ROCE_OPC_CFG_SMAC_TB, is_read: false);
3359
3360 reg_smac_l = *(u32 *)(&addr[0]);
3361 reg_smac_h = *(u16 *)(&addr[4]);
3362
3363 hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port);
3364 hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h);
3365 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3366
3367 return hns_roce_cmq_send(hr_dev, desc: &desc, num: 1);
3368}
3369
3370static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3371 struct hns_roce_v2_mpt_entry *mpt_entry,
3372 struct hns_roce_mr *mr)
3373{
3374 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3375 struct ib_device *ibdev = &hr_dev->ib_dev;
3376 dma_addr_t pbl_ba;
3377 int ret;
3378 int i;
3379
3380 ret = hns_roce_mtr_find(hr_dev, mtr: &mr->pbl_mtr, offset: 0, mtt_buf: pages,
3381 min_t(int, ARRAY_SIZE(pages), mr->npages));
3382 if (ret) {
3383 ibdev_err(ibdev, format: "failed to find PBL mtr, ret = %d.\n", ret);
3384 return ret;
3385 }
3386
3387 /* Aligned to the hardware address access unit */
3388 for (i = 0; i < ARRAY_SIZE(pages); i++)
3389 pages[i] >>= MPT_PBL_BUF_ADDR_S;
3390
3391 pbl_ba = hns_roce_get_mtr_ba(mtr: &mr->pbl_mtr);
3392
3393 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3394 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S);
3395 hr_reg_write(mpt_entry, MPT_PBL_BA_H,
3396 upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
3397
3398 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3399 hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
3400
3401 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3402 hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
3403 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3404 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3405
3406 return 0;
3407}
3408
3409static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3410 void *mb_buf, struct hns_roce_mr *mr)
3411{
3412 struct hns_roce_v2_mpt_entry *mpt_entry;
3413
3414 mpt_entry = mb_buf;
3415 memset(mpt_entry, 0, sizeof(*mpt_entry));
3416
3417 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3418 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3419
3420 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3421 mr->access & IB_ACCESS_REMOTE_ATOMIC);
3422 hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3423 mr->access & IB_ACCESS_REMOTE_READ);
3424 hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3425 mr->access & IB_ACCESS_REMOTE_WRITE);
3426 hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3427 mr->access & IB_ACCESS_LOCAL_WRITE);
3428
3429 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3430 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3431 mpt_entry->lkey = cpu_to_le32(mr->key);
3432 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3433 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3434
3435 if (mr->type != MR_TYPE_MR)
3436 hr_reg_enable(mpt_entry, MPT_PA);
3437
3438 if (mr->type == MR_TYPE_DMA)
3439 return 0;
3440
3441 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3442 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3443
3444 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3445 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3446 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3447
3448 return set_mtpt_pbl(hr_dev, mpt_entry, mr);
3449}
3450
3451static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3452 struct hns_roce_mr *mr, int flags,
3453 void *mb_buf)
3454{
3455 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3456 u32 mr_access_flags = mr->access;
3457 int ret = 0;
3458
3459 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3460 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3461
3462 if (flags & IB_MR_REREG_ACCESS) {
3463 hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
3464 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3465 hr_reg_write(mpt_entry, MPT_RR_EN,
3466 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3467 hr_reg_write(mpt_entry, MPT_RW_EN,
3468 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3469 hr_reg_write(mpt_entry, MPT_LW_EN,
3470 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3471 }
3472
3473 if (flags & IB_MR_REREG_TRANS) {
3474 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3475 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3476 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3477 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3478
3479 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3480 }
3481
3482 return ret;
3483}
3484
3485static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
3486{
3487 dma_addr_t pbl_ba = hns_roce_get_mtr_ba(mtr: &mr->pbl_mtr);
3488 struct hns_roce_v2_mpt_entry *mpt_entry;
3489
3490 mpt_entry = mb_buf;
3491 memset(mpt_entry, 0, sizeof(*mpt_entry));
3492
3493 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3494 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3495
3496 hr_reg_enable(mpt_entry, MPT_RA_EN);
3497 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3498
3499 hr_reg_enable(mpt_entry, MPT_FRE);
3500 hr_reg_enable(mpt_entry, MPT_BPD);
3501 hr_reg_clear(mpt_entry, MPT_PA);
3502
3503 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
3504 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3505 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3506 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3507 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3508
3509 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3510
3511 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >>
3512 MPT_PBL_BA_ADDR_S));
3513 hr_reg_write(mpt_entry, MPT_PBL_BA_H,
3514 upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
3515
3516 return 0;
3517}
3518
3519static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
3520{
3521 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: hr_qp->ibqp.device);
3522 struct ib_device *ibdev = &hr_dev->ib_dev;
3523 const struct ib_send_wr *bad_wr;
3524 struct ib_rdma_wr rdma_wr = {};
3525 struct ib_send_wr *send_wr;
3526 int ret;
3527
3528 send_wr = &rdma_wr.wr;
3529 send_wr->opcode = IB_WR_RDMA_WRITE;
3530
3531 ret = hns_roce_v2_post_send(ibqp: &hr_qp->ibqp, wr: send_wr, bad_wr: &bad_wr);
3532 if (ret) {
3533 ibdev_err_ratelimited(ibdev, "failed to post wqe for free mr, ret = %d.\n",
3534 ret);
3535 return ret;
3536 }
3537
3538 return 0;
3539}
3540
3541static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3542 struct ib_wc *wc);
3543
3544static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
3545{
3546 struct hns_roce_v2_priv *priv = hr_dev->priv;
3547 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
3548 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
3549 struct ib_device *ibdev = &hr_dev->ib_dev;
3550 struct hns_roce_qp *hr_qp;
3551 unsigned long end;
3552 int cqe_cnt = 0;
3553 int npolled;
3554 int ret;
3555 int i;
3556
3557 /*
3558 * If the device initialization is not complete or in the uninstall
3559 * process, then there is no need to execute free mr.
3560 */
3561 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
3562 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
3563 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
3564 return;
3565
3566 mutex_lock(&free_mr->mutex);
3567
3568 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
3569 hr_qp = free_mr->rsv_qp[i];
3570
3571 ret = free_mr_post_send_lp_wqe(hr_qp);
3572 if (ret) {
3573 ibdev_err_ratelimited(ibdev,
3574 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
3575 hr_qp->qpn, ret);
3576 break;
3577 }
3578
3579 cqe_cnt++;
3580 }
3581
3582 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
3583 while (cqe_cnt) {
3584 npolled = hns_roce_v2_poll_cq(ibcq: &free_mr->rsv_cq->ib_cq, num_entries: cqe_cnt, wc);
3585 if (npolled < 0) {
3586 ibdev_err_ratelimited(ibdev,
3587 "failed to poll cqe for free mr, remain %d cqe.\n",
3588 cqe_cnt);
3589 goto out;
3590 }
3591
3592 if (time_after(jiffies, end)) {
3593 ibdev_err_ratelimited(ibdev,
3594 "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
3595 cqe_cnt);
3596 goto out;
3597 }
3598 cqe_cnt -= npolled;
3599 }
3600
3601out:
3602 mutex_unlock(lock: &free_mr->mutex);
3603}
3604
3605static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
3606{
3607 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3608 free_mr_send_cmd_to_hw(hr_dev);
3609}
3610
3611static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3612{
3613 return hns_roce_buf_offset(buf: hr_cq->mtr.kmem, offset: n * hr_cq->cqe_size);
3614}
3615
3616static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3617{
3618 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n: n & hr_cq->ib_cq.cqe);
3619
3620 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3621 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3622 NULL;
3623}
3624
3625static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3626 struct hns_roce_cq *hr_cq)
3627{
3628 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3629 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3630 } else {
3631 struct hns_roce_v2_db cq_db = {};
3632
3633 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3634 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3635 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3636 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3637
3638 hns_roce_write64(hr_dev, val: (__le32 *)&cq_db, dest: hr_cq->db_reg);
3639 }
3640}
3641
3642static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3643 struct hns_roce_srq *srq)
3644{
3645 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev: hr_cq->ib_cq.device);
3646 struct hns_roce_v2_cqe *cqe, *dest;
3647 u32 prod_index;
3648 int nfreed = 0;
3649 int wqe_index;
3650 u8 owner_bit;
3651
3652 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, n: prod_index);
3653 ++prod_index) {
3654 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3655 break;
3656 }
3657
3658 /*
3659 * Now backwards through the CQ, removing CQ entries
3660 * that match our QP by overwriting them with next entries.
3661 */
3662 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3663 cqe = get_cqe_v2(hr_cq, n: prod_index & hr_cq->ib_cq.cqe);
3664 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3665 if (srq && hr_reg_read(cqe, CQE_S_R)) {
3666 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3667 hns_roce_free_srq_wqe(srq, wqe_index);
3668 }
3669 ++nfreed;
3670 } else if (nfreed) {
3671 dest = get_cqe_v2(hr_cq, n: (prod_index + nfreed) &
3672 hr_cq->ib_cq.cqe);
3673 owner_bit = hr_reg_read(dest, CQE_OWNER);
3674 memcpy(dest, cqe, hr_cq->cqe_size);
3675 hr_reg_write(dest, CQE_OWNER, owner_bit);
3676 }
3677 }
3678
3679 if (nfreed) {
3680 hr_cq->cons_index += nfreed;
3681 update_cq_db(hr_dev, hr_cq);
3682 }
3683}
3684
3685static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3686 struct hns_roce_srq *srq)
3687{
3688 spin_lock_irq(lock: &hr_cq->lock);
3689 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3690 spin_unlock_irq(lock: &hr_cq->lock);
3691}
3692
3693static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3694 struct hns_roce_cq *hr_cq, void *mb_buf,
3695 u64 *mtts, dma_addr_t dma_handle)
3696{
3697 struct hns_roce_v2_cq_context *cq_context;
3698
3699 cq_context = mb_buf;
3700 memset(cq_context, 0, sizeof(*cq_context));
3701
3702 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3703 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
3704 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3705 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3706 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3707
3708 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3709 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3710
3711 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3712 hr_reg_enable(cq_context, CQC_STASH);
3713
3714 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3715 to_hr_hw_page_addr(mtts[0]));
3716 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3717 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3718 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3719 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3720