| 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | /* |
| 3 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. |
| 4 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. |
| 5 | */ |
| 6 | #include <linux/vmalloc.h> |
| 7 | #include "rxe.h" |
| 8 | #include "rxe_loc.h" |
| 9 | #include "rxe_queue.h" |
| 10 | |
| 11 | int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, |
| 12 | int cqe, int comp_vector) |
| 13 | { |
| 14 | int count; |
| 15 | |
| 16 | if (cqe <= 0) { |
| 17 | rxe_dbg_dev(rxe, "cqe(%d) <= 0\n" , cqe); |
| 18 | goto err1; |
| 19 | } |
| 20 | |
| 21 | if (cqe > rxe->attr.max_cqe) { |
| 22 | rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n" , |
| 23 | cqe, rxe->attr.max_cqe); |
| 24 | goto err1; |
| 25 | } |
| 26 | |
| 27 | if (cq) { |
| 28 | count = queue_count(q: cq->queue, type: QUEUE_TYPE_TO_CLIENT); |
| 29 | if (cqe < count) { |
| 30 | rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n" , |
| 31 | cqe, count); |
| 32 | goto err1; |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | return 0; |
| 37 | |
| 38 | err1: |
| 39 | return -EINVAL; |
| 40 | } |
| 41 | |
| 42 | int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, |
| 43 | int comp_vector, struct ib_udata *udata, |
| 44 | struct rxe_create_cq_resp __user *uresp) |
| 45 | { |
| 46 | int err; |
| 47 | enum queue_type type; |
| 48 | |
| 49 | type = QUEUE_TYPE_TO_CLIENT; |
| 50 | cq->queue = rxe_queue_init(rxe, num_elem: &cqe, |
| 51 | elem_size: sizeof(struct rxe_cqe), type); |
| 52 | if (!cq->queue) { |
| 53 | rxe_dbg_dev(rxe, "unable to create cq\n" ); |
| 54 | return -ENOMEM; |
| 55 | } |
| 56 | |
| 57 | err = do_mmap_info(rxe, outbuf: uresp ? &uresp->mi : NULL, udata, |
| 58 | buf: cq->queue->buf, buf_size: cq->queue->buf_size, ip_p: &cq->queue->ip); |
| 59 | if (err) |
| 60 | return err; |
| 61 | |
| 62 | cq->is_user = uresp; |
| 63 | |
| 64 | spin_lock_init(&cq->cq_lock); |
| 65 | cq->ibcq.cqe = cqe; |
| 66 | return 0; |
| 67 | } |
| 68 | |
| 69 | int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, |
| 70 | struct rxe_resize_cq_resp __user *uresp, |
| 71 | struct ib_udata *udata) |
| 72 | { |
| 73 | int err; |
| 74 | |
| 75 | err = rxe_queue_resize(q: cq->queue, num_elem_p: (unsigned int *)&cqe, |
| 76 | elem_size: sizeof(struct rxe_cqe), udata, |
| 77 | outbuf: uresp ? &uresp->mi : NULL, NULL, consumer_lock: &cq->cq_lock); |
| 78 | if (!err) |
| 79 | cq->ibcq.cqe = cqe; |
| 80 | |
| 81 | return err; |
| 82 | } |
| 83 | |
| 84 | /* caller holds reference to cq */ |
| 85 | int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) |
| 86 | { |
| 87 | struct ib_event ev; |
| 88 | int full; |
| 89 | void *addr; |
| 90 | unsigned long flags; |
| 91 | |
| 92 | spin_lock_irqsave(&cq->cq_lock, flags); |
| 93 | |
| 94 | full = queue_full(q: cq->queue, type: QUEUE_TYPE_TO_CLIENT); |
| 95 | if (unlikely(full)) { |
| 96 | rxe_err_cq(cq, "queue full\n" ); |
| 97 | spin_unlock_irqrestore(lock: &cq->cq_lock, flags); |
| 98 | if (cq->ibcq.event_handler) { |
| 99 | ev.device = cq->ibcq.device; |
| 100 | ev.element.cq = &cq->ibcq; |
| 101 | ev.event = IB_EVENT_CQ_ERR; |
| 102 | cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); |
| 103 | } |
| 104 | |
| 105 | return -EBUSY; |
| 106 | } |
| 107 | |
| 108 | addr = queue_producer_addr(q: cq->queue, type: QUEUE_TYPE_TO_CLIENT); |
| 109 | memcpy(addr, cqe, sizeof(*cqe)); |
| 110 | |
| 111 | queue_advance_producer(q: cq->queue, type: QUEUE_TYPE_TO_CLIENT); |
| 112 | |
| 113 | if ((cq->notify & IB_CQ_NEXT_COMP) || |
| 114 | (cq->notify & IB_CQ_SOLICITED && solicited)) { |
| 115 | cq->notify = 0; |
| 116 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
| 117 | } |
| 118 | |
| 119 | spin_unlock_irqrestore(lock: &cq->cq_lock, flags); |
| 120 | |
| 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | void rxe_cq_cleanup(struct rxe_pool_elem *elem) |
| 125 | { |
| 126 | struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); |
| 127 | |
| 128 | if (cq->queue) |
| 129 | rxe_queue_cleanup(queue: cq->queue); |
| 130 | } |
| 131 | |