1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
3/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4/* Kai Shen <kaishen@linux.alibaba.com> */
5/* Copyright (c) 2020-2022, Alibaba Group. */
6
7#include "erdma_verbs.h"
8
9#define MAX_POLL_CHUNK_SIZE 16
10
11void notify_eq(struct erdma_eq *eq)
12{
13 u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
14 FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
15
16 *eq->dbrec = db_data;
17 writeq(val: db_data, addr: eq->db);
18
19 atomic64_inc(v: &eq->notify_num);
20}
21
22void *get_next_valid_eqe(struct erdma_eq *eq)
23{
24 u64 *eqe = get_queue_entry(qbuf: eq->qbuf, idx: eq->ci, depth: eq->depth, EQE_SHIFT);
25 u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
26
27 return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
28}
29
30void erdma_aeq_event_handler(struct erdma_dev *dev)
31{
32 struct erdma_aeqe *aeqe;
33 u32 cqn, qpn;
34 struct erdma_qp *qp;
35 struct erdma_cq *cq;
36 struct ib_event event;
37 u32 poll_cnt = 0;
38
39 memset(&event, 0, sizeof(event));
40
41 while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
42 aeqe = get_next_valid_eqe(eq: &dev->aeq);
43 if (!aeqe)
44 break;
45
46 dma_rmb();
47
48 dev->aeq.ci++;
49 atomic64_inc(v: &dev->aeq.event_num);
50 poll_cnt++;
51
52 if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
53 le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
54 cqn = le32_to_cpu(aeqe->event_data0);
55 cq = find_cq_by_cqn(dev, id: cqn);
56 if (!cq)
57 continue;
58
59 event.device = cq->ibcq.device;
60 event.element.cq = &cq->ibcq;
61 event.event = IB_EVENT_CQ_ERR;
62 if (cq->ibcq.event_handler)
63 cq->ibcq.event_handler(&event,
64 cq->ibcq.cq_context);
65 } else {
66 qpn = le32_to_cpu(aeqe->event_data0);
67 qp = find_qp_by_qpn(dev, id: qpn);
68 if (!qp)
69 continue;
70
71 event.device = qp->ibqp.device;
72 event.element.qp = &qp->ibqp;
73 event.event = IB_EVENT_QP_FATAL;
74 if (qp->ibqp.event_handler)
75 qp->ibqp.event_handler(&event,
76 qp->ibqp.qp_context);
77 }
78 }
79
80 notify_eq(eq: &dev->aeq);
81}
82
83int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
84{
85 u32 buf_size = depth << EQE_SHIFT;
86
87 eq->qbuf = dma_alloc_coherent(dev: &dev->pdev->dev, size: buf_size,
88 dma_handle: &eq->qbuf_dma_addr, GFP_KERNEL);
89 if (!eq->qbuf)
90 return -ENOMEM;
91
92 eq->dbrec = dma_pool_zalloc(pool: dev->db_pool, GFP_KERNEL, handle: &eq->dbrec_dma);
93 if (!eq->dbrec)
94 goto err_free_qbuf;
95
96 spin_lock_init(&eq->lock);
97 atomic64_set(v: &eq->event_num, i: 0);
98 atomic64_set(v: &eq->notify_num, i: 0);
99 eq->ci = 0;
100 eq->depth = depth;
101
102 return 0;
103
104err_free_qbuf:
105 dma_free_coherent(dev: &dev->pdev->dev, size: buf_size, cpu_addr: eq->qbuf,
106 dma_handle: eq->qbuf_dma_addr);
107
108 return -ENOMEM;
109}
110
111void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
112{
113 dma_pool_free(pool: dev->db_pool, vaddr: eq->dbrec, addr: eq->dbrec_dma);
114 dma_free_coherent(dev: &dev->pdev->dev, size: eq->depth << EQE_SHIFT, cpu_addr: eq->qbuf,
115 dma_handle: eq->qbuf_dma_addr);
116}
117
118int erdma_aeq_init(struct erdma_dev *dev)
119{
120 struct erdma_eq *eq = &dev->aeq;
121 int ret;
122
123 ret = erdma_eq_common_init(dev, eq: &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
124 if (ret)
125 return ret;
126
127 eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
128
129 erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
130 upper_32_bits(eq->qbuf_dma_addr));
131 erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
132 lower_32_bits(eq->qbuf_dma_addr));
133 erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, value: eq->depth);
134 erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, value: eq->dbrec_dma);
135
136 return 0;
137}
138
139void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
140{
141 struct erdma_dev *dev = ceq_cb->dev;
142 struct erdma_cq *cq;
143 u32 poll_cnt = 0;
144 u64 *ceqe;
145 int cqn;
146
147 if (!ceq_cb->ready)
148 return;
149
150 while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
151 ceqe = get_next_valid_eqe(eq: &ceq_cb->eq);
152 if (!ceqe)
153 break;
154
155 dma_rmb();
156 ceq_cb->eq.ci++;
157 poll_cnt++;
158 cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
159
160 cq = find_cq_by_cqn(dev, id: cqn);
161 if (!cq)
162 continue;
163
164 if (rdma_is_kernel_res(res: &cq->ibcq.res))
165 cq->kern_cq.cmdsn++;
166
167 if (cq->ibcq.comp_handler)
168 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
169 }
170
171 notify_eq(eq: &ceq_cb->eq);
172}
173
174static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
175{
176 struct erdma_eq_cb *ceq_cb = data;
177
178 tasklet_schedule(t: &ceq_cb->tasklet);
179
180 return IRQ_HANDLED;
181}
182
183static void erdma_intr_ceq_task(unsigned long data)
184{
185 erdma_ceq_completion_handler(ceq_cb: (struct erdma_eq_cb *)data);
186}
187
188static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
189{
190 struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
191 int err;
192
193 snprintf(buf: eqc->irq.name, ERDMA_IRQNAME_SIZE, fmt: "erdma-ceq%u@pci:%s", ceqn,
194 pci_name(pdev: dev->pdev));
195 eqc->irq.msix_vector = pci_irq_vector(dev: dev->pdev, nr: ceqn + 1);
196
197 tasklet_init(t: &dev->ceqs[ceqn].tasklet, func: erdma_intr_ceq_task,
198 data: (unsigned long)&dev->ceqs[ceqn]);
199
200 cpumask_set_cpu(cpu: cpumask_local_spread(i: ceqn + 1, node: dev->attrs.numa_node),
201 dstp: &eqc->irq.affinity_hint_mask);
202
203 err = request_irq(irq: eqc->irq.msix_vector, handler: erdma_intr_ceq_handler, flags: 0,
204 name: eqc->irq.name, dev: eqc);
205 if (err) {
206 dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
207 return err;
208 }
209
210 irq_set_affinity_hint(irq: eqc->irq.msix_vector,
211 m: &eqc->irq.affinity_hint_mask);
212
213 return 0;
214}
215
216static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
217{
218 struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
219
220 irq_set_affinity_hint(irq: eqc->irq.msix_vector, NULL);
221 free_irq(eqc->irq.msix_vector, eqc);
222}
223
224static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
225{
226 struct erdma_cmdq_create_eq_req req;
227
228 erdma_cmdq_build_reqhdr(hdr: &req.hdr, mod: CMDQ_SUBMOD_COMMON,
229 op: CMDQ_OPCODE_CREATE_EQ);
230 req.eqn = eqn;
231 req.depth = ilog2(eq->depth);
232 req.qbuf_addr = eq->qbuf_dma_addr;
233 req.qtype = ERDMA_EQ_TYPE_CEQ;
234 /* Vector index is the same as EQN. */
235 req.vector_idx = eqn;
236 req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
237 req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
238
239 return erdma_post_cmd_wait(cmdq: &dev->cmdq, req: &req, req_size: sizeof(req), NULL, NULL,
240 sleepable: false);
241}
242
243static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
244{
245 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
246 int ret;
247
248 ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
249 if (ret)
250 return ret;
251
252 eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
253 (ceqn + 1) * ERDMA_DB_SIZE;
254 dev->ceqs[ceqn].dev = dev;
255 dev->ceqs[ceqn].ready = true;
256
257 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
258 ret = create_eq_cmd(dev, eqn: ceqn + 1, eq);
259 if (ret) {
260 erdma_eq_destroy(dev, eq);
261 dev->ceqs[ceqn].ready = false;
262 }
263
264 return ret;
265}
266
267static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
268{
269 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
270 struct erdma_cmdq_destroy_eq_req req;
271 int err;
272
273 dev->ceqs[ceqn].ready = 0;
274
275 erdma_cmdq_build_reqhdr(hdr: &req.hdr, mod: CMDQ_SUBMOD_COMMON,
276 op: CMDQ_OPCODE_DESTROY_EQ);
277 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
278 req.eqn = ceqn + 1;
279 req.qtype = ERDMA_EQ_TYPE_CEQ;
280 req.vector_idx = ceqn + 1;
281
282 err = erdma_post_cmd_wait(cmdq: &dev->cmdq, req: &req, req_size: sizeof(req), NULL, NULL,
283 sleepable: false);
284 if (err)
285 return;
286
287 erdma_eq_destroy(dev, eq);
288}
289
290int erdma_ceqs_init(struct erdma_dev *dev)
291{
292 u32 i, j;
293 int err;
294
295 for (i = 0; i < dev->attrs.irq_num - 1; i++) {
296 err = erdma_ceq_init_one(dev, ceqn: i);
297 if (err)
298 goto out_err;
299
300 err = erdma_set_ceq_irq(dev, ceqn: i);
301 if (err) {
302 erdma_ceq_uninit_one(dev, ceqn: i);
303 goto out_err;
304 }
305 }
306
307 return 0;
308
309out_err:
310 for (j = 0; j < i; j++) {
311 erdma_free_ceq_irq(dev, ceqn: j);
312 erdma_ceq_uninit_one(dev, ceqn: j);
313 }
314
315 return err;
316}
317
318void erdma_ceqs_uninit(struct erdma_dev *dev)
319{
320 u32 i;
321
322 for (i = 0; i < dev->attrs.irq_num - 1; i++) {
323 erdma_free_ceq_irq(dev, ceqn: i);
324 erdma_ceq_uninit_one(dev, ceqn: i);
325 }
326}
327

source code of linux/drivers/infiniband/hw/erdma/erdma_eq.c