1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#include <net/mana/gdma.h>
5#include <net/mana/hw_channel.h>
6
7static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8{
9 struct gdma_resource *r = &hwc->inflight_msg_res;
10 unsigned long flags;
11 u32 index;
12
13 down(sem: &hwc->sema);
14
15 spin_lock_irqsave(&r->lock, flags);
16
17 index = find_first_zero_bit(addr: hwc->inflight_msg_res.map,
18 size: hwc->inflight_msg_res.size);
19
20 bitmap_set(map: hwc->inflight_msg_res.map, start: index, nbits: 1);
21
22 spin_unlock_irqrestore(lock: &r->lock, flags);
23
24 *msg_id = index;
25
26 return 0;
27}
28
29static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30{
31 struct gdma_resource *r = &hwc->inflight_msg_res;
32 unsigned long flags;
33
34 spin_lock_irqsave(&r->lock, flags);
35 bitmap_clear(map: hwc->inflight_msg_res.map, start: msg_id, nbits: 1);
36 spin_unlock_irqrestore(lock: &r->lock, flags);
37
38 up(sem: &hwc->sema);
39}
40
41static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42 const struct gdma_resp_hdr *resp_msg,
43 u32 resp_len)
44{
45 if (resp_len < sizeof(*resp_msg))
46 return -EPROTO;
47
48 if (resp_len > caller_ctx->output_buflen)
49 return -EPROTO;
50
51 return 0;
52}
53
54static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55 const struct gdma_resp_hdr *resp_msg)
56{
57 struct hwc_caller_ctx *ctx;
58 int err;
59
60 if (!test_bit(resp_msg->response.hwc_msg_id,
61 hwc->inflight_msg_res.map)) {
62 dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
63 resp_msg->response.hwc_msg_id);
64 return;
65 }
66
67 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
68 err = mana_hwc_verify_resp_msg(caller_ctx: ctx, resp_msg, resp_len);
69 if (err)
70 goto out;
71
72 ctx->status_code = resp_msg->status;
73
74 memcpy(ctx->output_buf, resp_msg, resp_len);
75out:
76 ctx->error = err;
77 complete(&ctx->comp_event);
78}
79
80static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81 struct hwc_work_request *req)
82{
83 struct device *dev = hwc_rxq->hwc->dev;
84 struct gdma_sge *sge;
85 int err;
86
87 sge = &req->sge;
88 sge->address = (u64)req->buf_sge_addr;
89 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90 sge->size = req->buf_len;
91
92 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93 req->wqe_req.sgl = sge;
94 req->wqe_req.num_sge = 1;
95 req->wqe_req.client_data_unit = 0;
96
97 err = mana_gd_post_and_ring(queue: hwc_rxq->gdma_wq, wqe: &req->wqe_req, NULL);
98 if (err)
99 dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100 return err;
101}
102
103static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104 struct gdma_event *event)
105{
106 struct hw_channel_context *hwc = ctx;
107 struct gdma_dev *gd = hwc->gdma_dev;
108 union hwc_init_type_data type_data;
109 union hwc_init_eq_id_db eq_db;
110 u32 type, val;
111
112 switch (event->type) {
113 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114 eq_db.as_uint32 = event->details[0];
115 hwc->cq->gdma_eq->id = eq_db.eq_id;
116 gd->doorbell = eq_db.doorbell;
117 break;
118
119 case GDMA_EQE_HWC_INIT_DATA:
120 type_data.as_uint32 = event->details[0];
121 type = type_data.type;
122 val = type_data.value;
123
124 switch (type) {
125 case HWC_INIT_DATA_CQID:
126 hwc->cq->gdma_cq->id = val;
127 break;
128
129 case HWC_INIT_DATA_RQID:
130 hwc->rxq->gdma_wq->id = val;
131 break;
132
133 case HWC_INIT_DATA_SQID:
134 hwc->txq->gdma_wq->id = val;
135 break;
136
137 case HWC_INIT_DATA_QUEUE_DEPTH:
138 hwc->hwc_init_q_depth_max = (u16)val;
139 break;
140
141 case HWC_INIT_DATA_MAX_REQUEST:
142 hwc->hwc_init_max_req_msg_size = val;
143 break;
144
145 case HWC_INIT_DATA_MAX_RESPONSE:
146 hwc->hwc_init_max_resp_msg_size = val;
147 break;
148
149 case HWC_INIT_DATA_MAX_NUM_CQS:
150 gd->gdma_context->max_num_cqs = val;
151 break;
152
153 case HWC_INIT_DATA_PDID:
154 hwc->gdma_dev->pdid = val;
155 break;
156
157 case HWC_INIT_DATA_GPA_MKEY:
158 hwc->rxq->msg_buf->gpa_mkey = val;
159 hwc->txq->msg_buf->gpa_mkey = val;
160 break;
161
162 case HWC_INIT_DATA_PF_DEST_RQ_ID:
163 hwc->pf_dest_vrq_id = val;
164 break;
165
166 case HWC_INIT_DATA_PF_DEST_CQ_ID:
167 hwc->pf_dest_vrcq_id = val;
168 break;
169 }
170
171 break;
172
173 case GDMA_EQE_HWC_INIT_DONE:
174 complete(&hwc->hwc_init_eqe_comp);
175 break;
176
177 case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
178 type_data.as_uint32 = event->details[0];
179 type = type_data.type;
180 val = type_data.value;
181
182 switch (type) {
183 case HWC_DATA_CFG_HWC_TIMEOUT:
184 hwc->hwc_timeout = val;
185 break;
186
187 default:
188 dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
189 break;
190 }
191
192 break;
193
194 default:
195 dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
196 /* Ignore unknown events, which should never happen. */
197 break;
198 }
199}
200
201static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
202 const struct hwc_rx_oob *rx_oob)
203{
204 struct hw_channel_context *hwc = ctx;
205 struct hwc_wq *hwc_rxq = hwc->rxq;
206 struct hwc_work_request *rx_req;
207 struct gdma_resp_hdr *resp;
208 struct gdma_wqe *dma_oob;
209 struct gdma_queue *rq;
210 struct gdma_sge *sge;
211 u64 rq_base_addr;
212 u64 rx_req_idx;
213 u8 *wqe;
214
215 if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
216 return;
217
218 rq = hwc_rxq->gdma_wq;
219 wqe = mana_gd_get_wqe_ptr(wq: rq, wqe_offset: rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
220 dma_oob = (struct gdma_wqe *)wqe;
221
222 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
223
224 /* Select the RX work request for virtual address and for reposting. */
225 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
226 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
227
228 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
229 resp = (struct gdma_resp_hdr *)rx_req->buf_va;
230
231 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
232 dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
233 resp->response.hwc_msg_id);
234 return;
235 }
236
237 mana_hwc_handle_resp(hwc, resp_len: rx_oob->tx_oob_data_size, resp_msg: resp);
238
239 /* Do no longer use 'resp', because the buffer is posted to the HW
240 * in the below mana_hwc_post_rx_wqe().
241 */
242 resp = NULL;
243
244 mana_hwc_post_rx_wqe(hwc_rxq, req: rx_req);
245}
246
247static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
248 const struct hwc_rx_oob *rx_oob)
249{
250 struct hw_channel_context *hwc = ctx;
251 struct hwc_wq *hwc_txq = hwc->txq;
252
253 WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
254}
255
256static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
257 enum gdma_queue_type type, u64 queue_size,
258 struct gdma_queue **queue)
259{
260 struct gdma_queue_spec spec = {};
261
262 if (type != GDMA_SQ && type != GDMA_RQ)
263 return -EINVAL;
264
265 spec.type = type;
266 spec.monitor_avl_buf = false;
267 spec.queue_size = queue_size;
268
269 return mana_gd_create_hwc_queue(gd: hwc->gdma_dev, spec: &spec, queue_ptr: queue);
270}
271
272static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
273 u64 queue_size,
274 void *ctx, gdma_cq_callback *cb,
275 struct gdma_queue *parent_eq,
276 struct gdma_queue **queue)
277{
278 struct gdma_queue_spec spec = {};
279
280 spec.type = GDMA_CQ;
281 spec.monitor_avl_buf = false;
282 spec.queue_size = queue_size;
283 spec.cq.context = ctx;
284 spec.cq.callback = cb;
285 spec.cq.parent_eq = parent_eq;
286
287 return mana_gd_create_hwc_queue(gd: hwc->gdma_dev, spec: &spec, queue_ptr: queue);
288}
289
290static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
291 u64 queue_size,
292 void *ctx, gdma_eq_callback *cb,
293 struct gdma_queue **queue)
294{
295 struct gdma_queue_spec spec = {};
296
297 spec.type = GDMA_EQ;
298 spec.monitor_avl_buf = false;
299 spec.queue_size = queue_size;
300 spec.eq.context = ctx;
301 spec.eq.callback = cb;
302 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
303 spec.eq.msix_index = 0;
304
305 return mana_gd_create_hwc_queue(gd: hwc->gdma_dev, spec: &spec, queue_ptr: queue);
306}
307
308static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
309{
310 struct hwc_rx_oob comp_data = {};
311 struct gdma_comp *completions;
312 struct hwc_cq *hwc_cq = ctx;
313 int comp_read, i;
314
315 WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
316
317 completions = hwc_cq->comp_buf;
318 comp_read = mana_gd_poll_cq(cq: q_self, comp: completions, num_cqe: hwc_cq->queue_depth);
319 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
320
321 for (i = 0; i < comp_read; ++i) {
322 comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
323
324 if (completions[i].is_sq)
325 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
326 completions[i].wq_num,
327 &comp_data);
328 else
329 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
330 completions[i].wq_num,
331 &comp_data);
332 }
333
334 mana_gd_ring_cq(cq: q_self, SET_ARM_BIT);
335}
336
337static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
338{
339 kfree(objp: hwc_cq->comp_buf);
340
341 if (hwc_cq->gdma_cq)
342 mana_gd_destroy_queue(gc, queue: hwc_cq->gdma_cq);
343
344 if (hwc_cq->gdma_eq)
345 mana_gd_destroy_queue(gc, queue: hwc_cq->gdma_eq);
346
347 kfree(objp: hwc_cq);
348}
349
350static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
351 gdma_eq_callback *callback, void *ctx,
352 hwc_rx_event_handler_t *rx_ev_hdlr,
353 void *rx_ev_ctx,
354 hwc_tx_event_handler_t *tx_ev_hdlr,
355 void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
356{
357 struct gdma_queue *eq, *cq;
358 struct gdma_comp *comp_buf;
359 struct hwc_cq *hwc_cq;
360 u32 eq_size, cq_size;
361 int err;
362
363 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
364 if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
365 eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
366
367 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
368 if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
369 cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
370
371 hwc_cq = kzalloc(size: sizeof(*hwc_cq), GFP_KERNEL);
372 if (!hwc_cq)
373 return -ENOMEM;
374
375 err = mana_hwc_create_gdma_eq(hwc, queue_size: eq_size, ctx, cb: callback, queue: &eq);
376 if (err) {
377 dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
378 goto out;
379 }
380 hwc_cq->gdma_eq = eq;
381
382 err = mana_hwc_create_gdma_cq(hwc, queue_size: cq_size, ctx: hwc_cq, cb: mana_hwc_comp_event,
383 parent_eq: eq, queue: &cq);
384 if (err) {
385 dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
386 goto out;
387 }
388 hwc_cq->gdma_cq = cq;
389
390 comp_buf = kcalloc(n: q_depth, size: sizeof(*comp_buf), GFP_KERNEL);
391 if (!comp_buf) {
392 err = -ENOMEM;
393 goto out;
394 }
395
396 hwc_cq->hwc = hwc;
397 hwc_cq->comp_buf = comp_buf;
398 hwc_cq->queue_depth = q_depth;
399 hwc_cq->rx_event_handler = rx_ev_hdlr;
400 hwc_cq->rx_event_ctx = rx_ev_ctx;
401 hwc_cq->tx_event_handler = tx_ev_hdlr;
402 hwc_cq->tx_event_ctx = tx_ev_ctx;
403
404 *hwc_cq_ptr = hwc_cq;
405 return 0;
406out:
407 mana_hwc_destroy_cq(gc: hwc->gdma_dev->gdma_context, hwc_cq);
408 return err;
409}
410
411static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
412 u32 max_msg_size,
413 struct hwc_dma_buf **dma_buf_ptr)
414{
415 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
416 struct hwc_work_request *hwc_wr;
417 struct hwc_dma_buf *dma_buf;
418 struct gdma_mem_info *gmi;
419 void *virt_addr;
420 u32 buf_size;
421 u8 *base_pa;
422 int err;
423 u16 i;
424
425 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
426 if (!dma_buf)
427 return -ENOMEM;
428
429 dma_buf->num_reqs = q_depth;
430
431 buf_size = PAGE_ALIGN(q_depth * max_msg_size);
432
433 gmi = &dma_buf->mem_info;
434 err = mana_gd_alloc_memory(gc, length: buf_size, gmi);
435 if (err) {
436 dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
437 goto out;
438 }
439
440 virt_addr = dma_buf->mem_info.virt_addr;
441 base_pa = (u8 *)dma_buf->mem_info.dma_handle;
442
443 for (i = 0; i < q_depth; i++) {
444 hwc_wr = &dma_buf->reqs[i];
445
446 hwc_wr->buf_va = virt_addr + i * max_msg_size;
447 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
448
449 hwc_wr->buf_len = max_msg_size;
450 }
451
452 *dma_buf_ptr = dma_buf;
453 return 0;
454out:
455 kfree(objp: dma_buf);
456 return err;
457}
458
459static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
460 struct hwc_dma_buf *dma_buf)
461{
462 if (!dma_buf)
463 return;
464
465 mana_gd_free_memory(gmi: &dma_buf->mem_info);
466
467 kfree(objp: dma_buf);
468}
469
470static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
471 struct hwc_wq *hwc_wq)
472{
473 mana_hwc_dealloc_dma_buf(hwc, dma_buf: hwc_wq->msg_buf);
474
475 if (hwc_wq->gdma_wq)
476 mana_gd_destroy_queue(gc: hwc->gdma_dev->gdma_context,
477 queue: hwc_wq->gdma_wq);
478
479 kfree(objp: hwc_wq);
480}
481
482static int mana_hwc_create_wq(struct hw_channel_context *hwc,
483 enum gdma_queue_type q_type, u16 q_depth,
484 u32 max_msg_size, struct hwc_cq *hwc_cq,
485 struct hwc_wq **hwc_wq_ptr)
486{
487 struct gdma_queue *queue;
488 struct hwc_wq *hwc_wq;
489 u32 queue_size;
490 int err;
491
492 WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
493
494 if (q_type == GDMA_RQ)
495 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
496 else
497 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
498
499 if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
500 queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
501
502 hwc_wq = kzalloc(size: sizeof(*hwc_wq), GFP_KERNEL);
503 if (!hwc_wq)
504 return -ENOMEM;
505
506 err = mana_hwc_create_gdma_wq(hwc, type: q_type, queue_size, queue: &queue);
507 if (err)
508 goto out;
509
510 hwc_wq->hwc = hwc;
511 hwc_wq->gdma_wq = queue;
512 hwc_wq->queue_depth = q_depth;
513 hwc_wq->hwc_cq = hwc_cq;
514
515 err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
516 dma_buf_ptr: &hwc_wq->msg_buf);
517 if (err)
518 goto out;
519
520 *hwc_wq_ptr = hwc_wq;
521 return 0;
522out:
523 if (err)
524 mana_hwc_destroy_wq(hwc, hwc_wq);
525 return err;
526}
527
528static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
529 struct hwc_work_request *req,
530 u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
531 bool dest_pf)
532{
533 struct device *dev = hwc_txq->hwc->dev;
534 struct hwc_tx_oob *tx_oob;
535 struct gdma_sge *sge;
536 int err;
537
538 if (req->msg_size == 0 || req->msg_size > req->buf_len) {
539 dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
540 req->msg_size, req->buf_len);
541 return -EINVAL;
542 }
543
544 tx_oob = &req->tx_oob;
545
546 tx_oob->vrq_id = dest_virt_rq_id;
547 tx_oob->dest_vfid = 0;
548 tx_oob->vrcq_id = dest_virt_rcq_id;
549 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
550 tx_oob->loopback = false;
551 tx_oob->lso_override = false;
552 tx_oob->dest_pf = dest_pf;
553 tx_oob->vsq_id = hwc_txq->gdma_wq->id;
554
555 sge = &req->sge;
556 sge->address = (u64)req->buf_sge_addr;
557 sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
558 sge->size = req->msg_size;
559
560 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
561 req->wqe_req.sgl = sge;
562 req->wqe_req.num_sge = 1;
563 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
564 req->wqe_req.inline_oob_data = tx_oob;
565 req->wqe_req.client_data_unit = 0;
566
567 err = mana_gd_post_and_ring(queue: hwc_txq->gdma_wq, wqe: &req->wqe_req, NULL);
568 if (err)
569 dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
570 return err;
571}
572
573static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
574 u16 num_msg)
575{
576 int err;
577
578 sema_init(sem: &hwc->sema, val: num_msg);
579
580 err = mana_gd_alloc_res_map(res_avail: num_msg, r: &hwc->inflight_msg_res);
581 if (err)
582 dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
583 return err;
584}
585
586static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
587 u32 max_req_msg_size, u32 max_resp_msg_size)
588{
589 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
590 struct hwc_wq *hwc_rxq = hwc->rxq;
591 struct hwc_work_request *req;
592 struct hwc_caller_ctx *ctx;
593 int err;
594 int i;
595
596 /* Post all WQEs on the RQ */
597 for (i = 0; i < q_depth; i++) {
598 req = &hwc_rxq->msg_buf->reqs[i];
599 err = mana_hwc_post_rx_wqe(hwc_rxq, req);
600 if (err)
601 return err;
602 }
603
604 ctx = kcalloc(n: q_depth, size: sizeof(*ctx), GFP_KERNEL);
605 if (!ctx)
606 return -ENOMEM;
607
608 for (i = 0; i < q_depth; ++i)
609 init_completion(x: &ctx[i].comp_event);
610
611 hwc->caller_ctx = ctx;
612
613 return mana_gd_test_eq(gc, eq: hwc->cq->gdma_eq);
614}
615
616static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
617 u32 *max_req_msg_size,
618 u32 *max_resp_msg_size)
619{
620 struct hw_channel_context *hwc = gc->hwc.driver_data;
621 struct gdma_queue *rq = hwc->rxq->gdma_wq;
622 struct gdma_queue *sq = hwc->txq->gdma_wq;
623 struct gdma_queue *eq = hwc->cq->gdma_eq;
624 struct gdma_queue *cq = hwc->cq->gdma_cq;
625 int err;
626
627 init_completion(x: &hwc->hwc_init_eqe_comp);
628
629 err = mana_smc_setup_hwc(sc: &gc->shm_channel, reset_vf: false,
630 eq_addr: eq->mem_info.dma_handle,
631 cq_addr: cq->mem_info.dma_handle,
632 rq_addr: rq->mem_info.dma_handle,
633 sq_addr: sq->mem_info.dma_handle,
634 eq_msix_index: eq->eq.msix_index);
635 if (err)
636 return err;
637
638 if (!wait_for_completion_timeout(x: &hwc->hwc_init_eqe_comp, timeout: 60 * HZ))
639 return -ETIMEDOUT;
640
641 *q_depth = hwc->hwc_init_q_depth_max;
642 *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
643 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
644
645 /* Both were set in mana_hwc_init_event_handler(). */
646 if (WARN_ON(cq->id >= gc->max_num_cqs))
647 return -EPROTO;
648
649 gc->cq_table = vcalloc(n: gc->max_num_cqs, size: sizeof(struct gdma_queue *));
650 if (!gc->cq_table)
651 return -ENOMEM;
652
653 gc->cq_table[cq->id] = cq;
654
655 return 0;
656}
657
658static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
659 u32 max_req_msg_size, u32 max_resp_msg_size)
660{
661 int err;
662
663 err = mana_hwc_init_inflight_msg(hwc, num_msg: q_depth);
664 if (err)
665 return err;
666
667 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
668 * queue depth and RQ queue depth.
669 */
670 err = mana_hwc_create_cq(hwc, q_depth: q_depth * 2,
671 callback: mana_hwc_init_event_handler, ctx: hwc,
672 rx_ev_hdlr: mana_hwc_rx_event_handler, rx_ev_ctx: hwc,
673 tx_ev_hdlr: mana_hwc_tx_event_handler, tx_ev_ctx: hwc, hwc_cq_ptr: &hwc->cq);
674 if (err) {
675 dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
676 goto out;
677 }
678
679 err = mana_hwc_create_wq(hwc, q_type: GDMA_RQ, q_depth, max_msg_size: max_req_msg_size,
680 hwc_cq: hwc->cq, hwc_wq_ptr: &hwc->rxq);
681 if (err) {
682 dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
683 goto out;
684 }
685
686 err = mana_hwc_create_wq(hwc, q_type: GDMA_SQ, q_depth, max_msg_size: max_resp_msg_size,
687 hwc_cq: hwc->cq, hwc_wq_ptr: &hwc->txq);
688 if (err) {
689 dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
690 goto out;
691 }
692
693 hwc->num_inflight_msg = q_depth;
694 hwc->max_req_msg_size = max_req_msg_size;
695
696 return 0;
697out:
698 /* mana_hwc_create_channel() will do the cleanup.*/
699 return err;
700}
701
702int mana_hwc_create_channel(struct gdma_context *gc)
703{
704 u32 max_req_msg_size, max_resp_msg_size;
705 struct gdma_dev *gd = &gc->hwc;
706 struct hw_channel_context *hwc;
707 u16 q_depth_max;
708 int err;
709
710 hwc = kzalloc(size: sizeof(*hwc), GFP_KERNEL);
711 if (!hwc)
712 return -ENOMEM;
713
714 gd->gdma_context = gc;
715 gd->driver_data = hwc;
716 hwc->gdma_dev = gd;
717 hwc->dev = gc->dev;
718 hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
719
720 /* HWC's instance number is always 0. */
721 gd->dev_id.as_uint32 = 0;
722 gd->dev_id.type = GDMA_DEVICE_HWC;
723
724 gd->pdid = INVALID_PDID;
725 gd->doorbell = INVALID_DOORBELL;
726
727 /* mana_hwc_init_queues() only creates the required data structures,
728 * and doesn't touch the HWC device.
729 */
730 err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
731 HW_CHANNEL_MAX_REQUEST_SIZE,
732 HW_CHANNEL_MAX_RESPONSE_SIZE);
733 if (err) {
734 dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
735 goto out;
736 }
737
738 err = mana_hwc_establish_channel(gc, q_depth: &q_depth_max, max_req_msg_size: &max_req_msg_size,
739 max_resp_msg_size: &max_resp_msg_size);
740 if (err) {
741 dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
742 goto out;
743 }
744
745 err = mana_hwc_test_channel(hwc: gc->hwc.driver_data,
746 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
747 max_req_msg_size, max_resp_msg_size);
748 if (err) {
749 dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
750 goto out;
751 }
752
753 return 0;
754out:
755 mana_hwc_destroy_channel(gc);
756 return err;
757}
758
759void mana_hwc_destroy_channel(struct gdma_context *gc)
760{
761 struct hw_channel_context *hwc = gc->hwc.driver_data;
762
763 if (!hwc)
764 return;
765
766 /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
767 * non-zero, the HWC worked and we should tear down the HWC here.
768 */
769 if (gc->max_num_cqs > 0) {
770 mana_smc_teardown_hwc(sc: &gc->shm_channel, reset_vf: false);
771 gc->max_num_cqs = 0;
772 }
773
774 kfree(objp: hwc->caller_ctx);
775 hwc->caller_ctx = NULL;
776
777 if (hwc->txq)
778 mana_hwc_destroy_wq(hwc, hwc_wq: hwc->txq);
779
780 if (hwc->rxq)
781 mana_hwc_destroy_wq(hwc, hwc_wq: hwc->rxq);
782
783 if (hwc->cq)
784 mana_hwc_destroy_cq(gc: hwc->gdma_dev->gdma_context, hwc_cq: hwc->cq);
785
786 mana_gd_free_res_map(r: &hwc->inflight_msg_res);
787
788 hwc->num_inflight_msg = 0;
789
790 hwc->gdma_dev->doorbell = INVALID_DOORBELL;
791 hwc->gdma_dev->pdid = INVALID_PDID;
792
793 hwc->hwc_timeout = 0;
794
795 kfree(objp: hwc);
796 gc->hwc.driver_data = NULL;
797 gc->hwc.gdma_context = NULL;
798
799 vfree(addr: gc->cq_table);
800 gc->cq_table = NULL;
801}
802
803int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
804 const void *req, u32 resp_len, void *resp)
805{
806 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
807 struct hwc_work_request *tx_wr;
808 struct hwc_wq *txq = hwc->txq;
809 struct gdma_req_hdr *req_msg;
810 struct hwc_caller_ctx *ctx;
811 u32 dest_vrcq = 0;
812 u32 dest_vrq = 0;
813 u16 msg_id;
814 int err;
815
816 mana_hwc_get_msg_index(hwc, msg_id: &msg_id);
817
818 tx_wr = &txq->msg_buf->reqs[msg_id];
819
820 if (req_len > tx_wr->buf_len) {
821 dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
822 tx_wr->buf_len);
823 err = -EINVAL;
824 goto out;
825 }
826
827 ctx = hwc->caller_ctx + msg_id;
828 ctx->output_buf = resp;
829 ctx->output_buflen = resp_len;
830
831 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
832 if (req)
833 memcpy(req_msg, req, req_len);
834
835 req_msg->req.hwc_msg_id = msg_id;
836
837 tx_wr->msg_size = req_len;
838
839 if (gc->is_pf) {
840 dest_vrq = hwc->pf_dest_vrq_id;
841 dest_vrcq = hwc->pf_dest_vrcq_id;
842 }
843
844 err = mana_hwc_post_tx_wqe(hwc_txq: txq, req: tx_wr, dest_virt_rq_id: dest_vrq, dest_virt_rcq_id: dest_vrcq, dest_pf: false);
845 if (err) {
846 dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
847 goto out;
848 }
849
850 if (!wait_for_completion_timeout(x: &ctx->comp_event,
851 timeout: (msecs_to_jiffies(m: hwc->hwc_timeout) * HZ))) {
852 dev_err(hwc->dev, "HWC: Request timed out!\n");
853 err = -ETIMEDOUT;
854 goto out;
855 }
856
857 if (ctx->error) {
858 err = ctx->error;
859 goto out;
860 }
861
862 if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
863 dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
864 ctx->status_code);
865 err = -EPROTO;
866 goto out;
867 }
868out:
869 mana_hwc_put_msg_index(hwc, msg_id);
870 return err;
871}
872

source code of linux/drivers/net/ethernet/microsoft/mana/hw_channel.c