| 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
| 2 | |
| 3 | /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ |
| 4 | /* Copyright (c) 2008-2019, IBM Corporation */ |
| 5 | |
| 6 | #include <linux/errno.h> |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/net.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | #include <linux/llist.h> |
| 11 | #include <asm/barrier.h> |
| 12 | #include <net/tcp.h> |
| 13 | #include <trace/events/sock.h> |
| 14 | |
| 15 | #include "siw.h" |
| 16 | #include "siw_verbs.h" |
| 17 | #include "siw_mem.h" |
| 18 | |
| 19 | static char siw_qp_state_to_string[SIW_QP_STATE_COUNT][sizeof "TERMINATE" ] = { |
| 20 | [SIW_QP_STATE_IDLE] = "IDLE" , |
| 21 | [SIW_QP_STATE_RTR] = "RTR" , |
| 22 | [SIW_QP_STATE_RTS] = "RTS" , |
| 23 | [SIW_QP_STATE_CLOSING] = "CLOSING" , |
| 24 | [SIW_QP_STATE_TERMINATE] = "TERMINATE" , |
| 25 | [SIW_QP_STATE_ERROR] = "ERROR" |
| 26 | }; |
| 27 | |
| 28 | /* |
| 29 | * iWARP (RDMAP, DDP and MPA) parameters as well as Softiwarp settings on a |
| 30 | * per-RDMAP message basis. Please keep order of initializer. All MPA len |
| 31 | * is initialized to minimum packet size. |
| 32 | */ |
| 33 | struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1] = { |
| 34 | { /* RDMAP_RDMA_WRITE */ |
| 35 | .hdr_len = sizeof(struct iwarp_rdma_write), |
| 36 | .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_write) - 2), |
| 37 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST | |
| 38 | cpu_to_be16(DDP_VERSION << 8) | |
| 39 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 40 | cpu_to_be16(RDMAP_RDMA_WRITE), |
| 41 | .rx_data = siw_proc_write }, |
| 42 | { /* RDMAP_RDMA_READ_REQ */ |
| 43 | .hdr_len = sizeof(struct iwarp_rdma_rreq), |
| 44 | .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rreq) - 2), |
| 45 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | |
| 46 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 47 | cpu_to_be16(RDMAP_RDMA_READ_REQ), |
| 48 | .rx_data = siw_proc_rreq }, |
| 49 | { /* RDMAP_RDMA_READ_RESP */ |
| 50 | .hdr_len = sizeof(struct iwarp_rdma_rresp), |
| 51 | .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rresp) - 2), |
| 52 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_TAGGED | DDP_FLAG_LAST | |
| 53 | cpu_to_be16(DDP_VERSION << 8) | |
| 54 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 55 | cpu_to_be16(RDMAP_RDMA_READ_RESP), |
| 56 | .rx_data = siw_proc_rresp }, |
| 57 | { /* RDMAP_SEND */ |
| 58 | .hdr_len = sizeof(struct iwarp_send), |
| 59 | .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2), |
| 60 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | |
| 61 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 62 | cpu_to_be16(RDMAP_SEND), |
| 63 | .rx_data = siw_proc_send }, |
| 64 | { /* RDMAP_SEND_INVAL */ |
| 65 | .hdr_len = sizeof(struct iwarp_send_inv), |
| 66 | .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2), |
| 67 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | |
| 68 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 69 | cpu_to_be16(RDMAP_SEND_INVAL), |
| 70 | .rx_data = siw_proc_send }, |
| 71 | { /* RDMAP_SEND_SE */ |
| 72 | .hdr_len = sizeof(struct iwarp_send), |
| 73 | .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2), |
| 74 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | |
| 75 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 76 | cpu_to_be16(RDMAP_SEND_SE), |
| 77 | .rx_data = siw_proc_send }, |
| 78 | { /* RDMAP_SEND_SE_INVAL */ |
| 79 | .hdr_len = sizeof(struct iwarp_send_inv), |
| 80 | .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2), |
| 81 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | |
| 82 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 83 | cpu_to_be16(RDMAP_SEND_SE_INVAL), |
| 84 | .rx_data = siw_proc_send }, |
| 85 | { /* RDMAP_TERMINATE */ |
| 86 | .hdr_len = sizeof(struct iwarp_terminate), |
| 87 | .ctrl.mpa_len = htons(sizeof(struct iwarp_terminate) - 2), |
| 88 | .ctrl.ddp_rdmap_ctrl = DDP_FLAG_LAST | cpu_to_be16(DDP_VERSION << 8) | |
| 89 | cpu_to_be16(RDMAP_VERSION << 6) | |
| 90 | cpu_to_be16(RDMAP_TERMINATE), |
| 91 | .rx_data = siw_proc_terminate } |
| 92 | }; |
| 93 | |
| 94 | void siw_qp_llp_data_ready(struct sock *sk) |
| 95 | { |
| 96 | struct siw_qp *qp; |
| 97 | |
| 98 | trace_sk_data_ready(sk); |
| 99 | |
| 100 | read_lock(&sk->sk_callback_lock); |
| 101 | |
| 102 | if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) |
| 103 | goto done; |
| 104 | |
| 105 | qp = sk_to_qp(sk); |
| 106 | |
| 107 | if (likely(!qp->rx_stream.rx_suspend && |
| 108 | down_read_trylock(&qp->state_lock))) { |
| 109 | read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; |
| 110 | |
| 111 | if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) |
| 112 | /* |
| 113 | * Implements data receive operation during |
| 114 | * socket callback. TCP gracefully catches |
| 115 | * the case where there is nothing to receive |
| 116 | * (not calling siw_tcp_rx_data() then). |
| 117 | */ |
| 118 | tcp_read_sock(sk, desc: &rd_desc, recv_actor: siw_tcp_rx_data); |
| 119 | |
| 120 | up_read(sem: &qp->state_lock); |
| 121 | } else { |
| 122 | siw_dbg_qp(qp, "unable to process RX, suspend: %d\n" , |
| 123 | qp->rx_stream.rx_suspend); |
| 124 | } |
| 125 | done: |
| 126 | read_unlock(&sk->sk_callback_lock); |
| 127 | } |
| 128 | |
| 129 | void siw_qp_llp_close(struct siw_qp *qp) |
| 130 | { |
| 131 | siw_dbg_qp(qp, "enter llp close, state = %s\n" , |
| 132 | siw_qp_state_to_string[qp->attrs.state]); |
| 133 | |
| 134 | down_write(sem: &qp->state_lock); |
| 135 | |
| 136 | qp->rx_stream.rx_suspend = 1; |
| 137 | qp->tx_ctx.tx_suspend = 1; |
| 138 | qp->attrs.sk = NULL; |
| 139 | |
| 140 | switch (qp->attrs.state) { |
| 141 | case SIW_QP_STATE_RTS: |
| 142 | case SIW_QP_STATE_RTR: |
| 143 | case SIW_QP_STATE_IDLE: |
| 144 | case SIW_QP_STATE_TERMINATE: |
| 145 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 146 | break; |
| 147 | /* |
| 148 | * SIW_QP_STATE_CLOSING: |
| 149 | * |
| 150 | * This is a forced close. shall the QP be moved to |
| 151 | * ERROR or IDLE ? |
| 152 | */ |
| 153 | case SIW_QP_STATE_CLOSING: |
| 154 | if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) |
| 155 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 156 | else |
| 157 | qp->attrs.state = SIW_QP_STATE_IDLE; |
| 158 | break; |
| 159 | |
| 160 | default: |
| 161 | siw_dbg_qp(qp, "llp close: no state transition needed: %s\n" , |
| 162 | siw_qp_state_to_string[qp->attrs.state]); |
| 163 | break; |
| 164 | } |
| 165 | siw_sq_flush(qp); |
| 166 | siw_rq_flush(qp); |
| 167 | |
| 168 | /* |
| 169 | * Dereference closing CEP |
| 170 | */ |
| 171 | if (qp->cep) { |
| 172 | siw_cep_put(cep: qp->cep); |
| 173 | qp->cep = NULL; |
| 174 | } |
| 175 | |
| 176 | up_write(sem: &qp->state_lock); |
| 177 | |
| 178 | siw_dbg_qp(qp, "llp close exit: state %s\n" , |
| 179 | siw_qp_state_to_string[qp->attrs.state]); |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * socket callback routine informing about newly available send space. |
| 184 | * Function schedules SQ work for processing SQ items. |
| 185 | */ |
| 186 | void siw_qp_llp_write_space(struct sock *sk) |
| 187 | { |
| 188 | struct siw_cep *cep; |
| 189 | |
| 190 | read_lock(&sk->sk_callback_lock); |
| 191 | |
| 192 | cep = sk_to_cep(sk); |
| 193 | if (cep) { |
| 194 | cep->sk_write_space(sk); |
| 195 | |
| 196 | if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) |
| 197 | (void)siw_sq_start(qp: cep->qp); |
| 198 | } |
| 199 | |
| 200 | read_unlock(&sk->sk_callback_lock); |
| 201 | } |
| 202 | |
| 203 | static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) |
| 204 | { |
| 205 | if (irq_size) { |
| 206 | irq_size = roundup_pow_of_two(irq_size); |
| 207 | qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe)); |
| 208 | if (!qp->irq) { |
| 209 | qp->attrs.irq_size = 0; |
| 210 | return -ENOMEM; |
| 211 | } |
| 212 | } |
| 213 | if (orq_size) { |
| 214 | orq_size = roundup_pow_of_two(orq_size); |
| 215 | qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe)); |
| 216 | if (!qp->orq) { |
| 217 | qp->attrs.orq_size = 0; |
| 218 | qp->attrs.irq_size = 0; |
| 219 | vfree(addr: qp->irq); |
| 220 | return -ENOMEM; |
| 221 | } |
| 222 | } |
| 223 | qp->attrs.irq_size = irq_size; |
| 224 | qp->attrs.orq_size = orq_size; |
| 225 | siw_dbg_qp(qp, "ORD %d, IRD %d\n" , orq_size, irq_size); |
| 226 | return 0; |
| 227 | } |
| 228 | |
| 229 | /* |
| 230 | * Send a non signalled READ or WRITE to peer side as negotiated |
| 231 | * with MPAv2 P2P setup protocol. The work request is only created |
| 232 | * as a current active WR and does not consume Send Queue space. |
| 233 | * |
| 234 | * Caller must hold QP state lock. |
| 235 | */ |
| 236 | int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) |
| 237 | { |
| 238 | struct siw_wqe *wqe = tx_wqe(qp); |
| 239 | unsigned long flags; |
| 240 | int rv = 0; |
| 241 | |
| 242 | spin_lock_irqsave(&qp->sq_lock, flags); |
| 243 | |
| 244 | if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { |
| 245 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
| 246 | return -EIO; |
| 247 | } |
| 248 | memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); |
| 249 | |
| 250 | wqe->wr_status = SIW_WR_QUEUED; |
| 251 | wqe->sqe.flags = 0; |
| 252 | wqe->sqe.num_sge = 1; |
| 253 | wqe->sqe.sge[0].length = 0; |
| 254 | wqe->sqe.sge[0].laddr = 0; |
| 255 | wqe->sqe.sge[0].lkey = 0; |
| 256 | /* |
| 257 | * While it must not be checked for inbound zero length |
| 258 | * READ/WRITE, some HW may treat STag 0 special. |
| 259 | */ |
| 260 | wqe->sqe.rkey = 1; |
| 261 | wqe->sqe.raddr = 0; |
| 262 | wqe->processed = 0; |
| 263 | |
| 264 | if (ctrl & MPA_V2_RDMA_WRITE_RTR) |
| 265 | wqe->sqe.opcode = SIW_OP_WRITE; |
| 266 | else if (ctrl & MPA_V2_RDMA_READ_RTR) { |
| 267 | struct siw_sqe *rreq = NULL; |
| 268 | |
| 269 | wqe->sqe.opcode = SIW_OP_READ; |
| 270 | |
| 271 | spin_lock(lock: &qp->orq_lock); |
| 272 | |
| 273 | if (qp->attrs.orq_size) |
| 274 | rreq = orq_get_free(qp); |
| 275 | if (rreq) { |
| 276 | siw_read_to_orq(rreq, sqe: &wqe->sqe); |
| 277 | qp->orq_put++; |
| 278 | } else |
| 279 | rv = -EIO; |
| 280 | |
| 281 | spin_unlock(lock: &qp->orq_lock); |
| 282 | } else |
| 283 | rv = -EINVAL; |
| 284 | |
| 285 | if (rv) |
| 286 | wqe->wr_status = SIW_WR_IDLE; |
| 287 | |
| 288 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
| 289 | |
| 290 | if (!rv) |
| 291 | rv = siw_sq_start(qp); |
| 292 | |
| 293 | return rv; |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Map memory access error to DDP tagged error |
| 298 | */ |
| 299 | enum ddp_ecode siw_tagged_error(enum siw_access_state state) |
| 300 | { |
| 301 | switch (state) { |
| 302 | case E_STAG_INVALID: |
| 303 | return DDP_ECODE_T_INVALID_STAG; |
| 304 | case E_BASE_BOUNDS: |
| 305 | return DDP_ECODE_T_BASE_BOUNDS; |
| 306 | case E_PD_MISMATCH: |
| 307 | return DDP_ECODE_T_STAG_NOT_ASSOC; |
| 308 | case E_ACCESS_PERM: |
| 309 | /* |
| 310 | * RFC 5041 (DDP) lacks an ecode for insufficient access |
| 311 | * permissions. 'Invalid STag' seem to be the closest |
| 312 | * match though. |
| 313 | */ |
| 314 | return DDP_ECODE_T_INVALID_STAG; |
| 315 | default: |
| 316 | WARN_ON(1); |
| 317 | return DDP_ECODE_T_INVALID_STAG; |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | /* |
| 322 | * Map memory access error to RDMAP protection error |
| 323 | */ |
| 324 | enum rdmap_ecode siw_rdmap_error(enum siw_access_state state) |
| 325 | { |
| 326 | switch (state) { |
| 327 | case E_STAG_INVALID: |
| 328 | return RDMAP_ECODE_INVALID_STAG; |
| 329 | case E_BASE_BOUNDS: |
| 330 | return RDMAP_ECODE_BASE_BOUNDS; |
| 331 | case E_PD_MISMATCH: |
| 332 | return RDMAP_ECODE_STAG_NOT_ASSOC; |
| 333 | case E_ACCESS_PERM: |
| 334 | return RDMAP_ECODE_ACCESS_RIGHTS; |
| 335 | default: |
| 336 | return RDMAP_ECODE_UNSPECIFIED; |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype, |
| 341 | u8 ecode, int in_tx) |
| 342 | { |
| 343 | if (!qp->term_info.valid) { |
| 344 | memset(&qp->term_info, 0, sizeof(qp->term_info)); |
| 345 | qp->term_info.layer = layer; |
| 346 | qp->term_info.etype = etype; |
| 347 | qp->term_info.ecode = ecode; |
| 348 | qp->term_info.in_tx = in_tx; |
| 349 | qp->term_info.valid = 1; |
| 350 | } |
| 351 | siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n" , |
| 352 | layer, etype, ecode, in_tx ? "yes" : "no" ); |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * Send a TERMINATE message, as defined in RFC's 5040/5041/5044/6581. |
| 357 | * Sending TERMINATE messages is best effort - such messages |
| 358 | * can only be send if the QP is still connected and it does |
| 359 | * not have another outbound message in-progress, i.e. the |
| 360 | * TERMINATE message must not interfer with an incomplete current |
| 361 | * transmit operation. |
| 362 | */ |
| 363 | void siw_send_terminate(struct siw_qp *qp) |
| 364 | { |
| 365 | struct kvec iov[3]; |
| 366 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR }; |
| 367 | struct iwarp_terminate *term = NULL; |
| 368 | union iwarp_hdr *err_hdr = NULL; |
| 369 | struct socket *s = qp->attrs.sk; |
| 370 | struct siw_rx_stream *srx = &qp->rx_stream; |
| 371 | union iwarp_hdr *rx_hdr = &srx->hdr; |
| 372 | u32 crc = 0; |
| 373 | int num_frags, len_terminate, rv; |
| 374 | |
| 375 | if (!qp->term_info.valid) |
| 376 | return; |
| 377 | |
| 378 | qp->term_info.valid = 0; |
| 379 | |
| 380 | if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) { |
| 381 | siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n" , |
| 382 | tx_type(tx_wqe(qp))); |
| 383 | return; |
| 384 | } |
| 385 | if (!s && qp->cep) |
| 386 | /* QP not yet in RTS. Take socket from connection end point */ |
| 387 | s = qp->cep->sock; |
| 388 | |
| 389 | if (!s) { |
| 390 | siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n" ); |
| 391 | return; |
| 392 | } |
| 393 | |
| 394 | term = kzalloc(sizeof(*term), GFP_KERNEL); |
| 395 | if (!term) |
| 396 | return; |
| 397 | |
| 398 | term->ddp_qn = cpu_to_be32(RDMAP_UNTAGGED_QN_TERMINATE); |
| 399 | term->ddp_mo = 0; |
| 400 | term->ddp_msn = cpu_to_be32(1); |
| 401 | |
| 402 | iov[0].iov_base = term; |
| 403 | iov[0].iov_len = sizeof(*term); |
| 404 | |
| 405 | if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) || |
| 406 | ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) && |
| 407 | (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) { |
| 408 | err_hdr = kzalloc(sizeof(*err_hdr), GFP_KERNEL); |
| 409 | if (!err_hdr) { |
| 410 | kfree(objp: term); |
| 411 | return; |
| 412 | } |
| 413 | } |
| 414 | memcpy(&term->ctrl, &iwarp_pktinfo[RDMAP_TERMINATE].ctrl, |
| 415 | sizeof(struct iwarp_ctrl)); |
| 416 | |
| 417 | __rdmap_term_set_layer(term, layer: qp->term_info.layer); |
| 418 | __rdmap_term_set_etype(term, etype: qp->term_info.etype); |
| 419 | __rdmap_term_set_ecode(term, ecode: qp->term_info.ecode); |
| 420 | |
| 421 | switch (qp->term_info.layer) { |
| 422 | case TERM_ERROR_LAYER_RDMAP: |
| 423 | if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC) |
| 424 | /* No additional DDP/RDMAP header to be included */ |
| 425 | break; |
| 426 | |
| 427 | if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) { |
| 428 | /* |
| 429 | * Complete RDMAP frame will get attached, and |
| 430 | * DDP segment length is valid |
| 431 | */ |
| 432 | term->flag_m = 1; |
| 433 | term->flag_d = 1; |
| 434 | term->flag_r = 1; |
| 435 | |
| 436 | if (qp->term_info.in_tx) { |
| 437 | struct iwarp_rdma_rreq *rreq; |
| 438 | struct siw_wqe *wqe = tx_wqe(qp); |
| 439 | |
| 440 | /* Inbound RREQ error, detected during |
| 441 | * RRESP creation. Take state from |
| 442 | * current TX work queue element to |
| 443 | * reconstruct peers RREQ. |
| 444 | */ |
| 445 | rreq = (struct iwarp_rdma_rreq *)err_hdr; |
| 446 | |
| 447 | memcpy(&rreq->ctrl, |
| 448 | &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl, |
| 449 | sizeof(struct iwarp_ctrl)); |
| 450 | |
| 451 | rreq->rsvd = 0; |
| 452 | rreq->ddp_qn = |
| 453 | htonl(RDMAP_UNTAGGED_QN_RDMA_READ); |
| 454 | |
| 455 | /* Provide RREQ's MSN as kept aside */ |
| 456 | rreq->ddp_msn = htonl(wqe->sqe.sge[0].length); |
| 457 | |
| 458 | rreq->ddp_mo = htonl(wqe->processed); |
| 459 | rreq->sink_stag = htonl(wqe->sqe.rkey); |
| 460 | rreq->sink_to = cpu_to_be64(wqe->sqe.raddr); |
| 461 | rreq->read_size = htonl(wqe->sqe.sge[0].length); |
| 462 | rreq->source_stag = htonl(wqe->sqe.sge[0].lkey); |
| 463 | rreq->source_to = |
| 464 | cpu_to_be64(wqe->sqe.sge[0].laddr); |
| 465 | |
| 466 | iov[1].iov_base = rreq; |
| 467 | iov[1].iov_len = sizeof(*rreq); |
| 468 | |
| 469 | rx_hdr = (union iwarp_hdr *)rreq; |
| 470 | } else { |
| 471 | /* Take RDMAP/DDP information from |
| 472 | * current (failed) inbound frame. |
| 473 | */ |
| 474 | iov[1].iov_base = rx_hdr; |
| 475 | |
| 476 | if (__rdmap_get_opcode(ctrl: &rx_hdr->ctrl) == |
| 477 | RDMAP_RDMA_READ_REQ) |
| 478 | iov[1].iov_len = |
| 479 | sizeof(struct iwarp_rdma_rreq); |
| 480 | else /* SEND type */ |
| 481 | iov[1].iov_len = |
| 482 | sizeof(struct iwarp_send); |
| 483 | } |
| 484 | } else { |
| 485 | /* Do not report DDP hdr information if packet |
| 486 | * layout is unknown |
| 487 | */ |
| 488 | if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) || |
| 489 | (qp->term_info.ecode == RDMAP_ECODE_OPCODE)) |
| 490 | break; |
| 491 | |
| 492 | iov[1].iov_base = rx_hdr; |
| 493 | |
| 494 | /* Only DDP frame will get attached */ |
| 495 | if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) |
| 496 | iov[1].iov_len = |
| 497 | sizeof(struct iwarp_rdma_write); |
| 498 | else |
| 499 | iov[1].iov_len = sizeof(struct iwarp_send); |
| 500 | |
| 501 | term->flag_m = 1; |
| 502 | term->flag_d = 1; |
| 503 | } |
| 504 | term->ctrl.mpa_len = cpu_to_be16(iov[1].iov_len); |
| 505 | break; |
| 506 | |
| 507 | case TERM_ERROR_LAYER_DDP: |
| 508 | /* Report error encountered while DDP processing. |
| 509 | * This can only happen as a result of inbound |
| 510 | * DDP processing |
| 511 | */ |
| 512 | |
| 513 | /* Do not report DDP hdr information if packet |
| 514 | * layout is unknown |
| 515 | */ |
| 516 | if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) && |
| 517 | (qp->term_info.ecode == DDP_ECODE_T_VERSION)) || |
| 518 | ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) && |
| 519 | (qp->term_info.ecode == DDP_ECODE_UT_VERSION))) |
| 520 | break; |
| 521 | |
| 522 | iov[1].iov_base = rx_hdr; |
| 523 | |
| 524 | if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) |
| 525 | iov[1].iov_len = sizeof(struct iwarp_ctrl_tagged); |
| 526 | else |
| 527 | iov[1].iov_len = sizeof(struct iwarp_ctrl_untagged); |
| 528 | |
| 529 | term->flag_m = 1; |
| 530 | term->flag_d = 1; |
| 531 | break; |
| 532 | |
| 533 | default: |
| 534 | break; |
| 535 | } |
| 536 | if (term->flag_m || term->flag_d || term->flag_r) { |
| 537 | iov[2].iov_base = &crc; |
| 538 | iov[2].iov_len = sizeof(crc); |
| 539 | len_terminate = sizeof(*term) + iov[1].iov_len + MPA_CRC_SIZE; |
| 540 | num_frags = 3; |
| 541 | } else { |
| 542 | iov[1].iov_base = &crc; |
| 543 | iov[1].iov_len = sizeof(crc); |
| 544 | len_terminate = sizeof(*term) + MPA_CRC_SIZE; |
| 545 | num_frags = 2; |
| 546 | } |
| 547 | |
| 548 | /* Adjust DDP Segment Length parameter, if valid */ |
| 549 | if (term->flag_m) { |
| 550 | u32 real_ddp_len = be16_to_cpu(rx_hdr->ctrl.mpa_len); |
| 551 | enum rdma_opcode op = __rdmap_get_opcode(ctrl: &rx_hdr->ctrl); |
| 552 | |
| 553 | real_ddp_len -= iwarp_pktinfo[op].hdr_len - MPA_HDR_SIZE; |
| 554 | rx_hdr->ctrl.mpa_len = cpu_to_be16(real_ddp_len); |
| 555 | } |
| 556 | |
| 557 | term->ctrl.mpa_len = |
| 558 | cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE)); |
| 559 | if (qp->tx_ctx.mpa_crc_enabled) { |
| 560 | siw_crc_init(crc: &qp->tx_ctx.mpa_crc); |
| 561 | siw_crc_update(crc: &qp->tx_ctx.mpa_crc, |
| 562 | data: iov[0].iov_base, len: iov[0].iov_len); |
| 563 | if (num_frags == 3) { |
| 564 | siw_crc_update(crc: &qp->tx_ctx.mpa_crc, |
| 565 | data: iov[1].iov_base, len: iov[1].iov_len); |
| 566 | } |
| 567 | siw_crc_final(crc: &qp->tx_ctx.mpa_crc, out: (u8 *)&crc); |
| 568 | } |
| 569 | |
| 570 | rv = kernel_sendmsg(sock: s, msg: &msg, vec: iov, num: num_frags, len: len_terminate); |
| 571 | siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n" , |
| 572 | rv == len_terminate ? "success" : "failure" , |
| 573 | __rdmap_term_layer(term), __rdmap_term_etype(term), |
| 574 | __rdmap_term_ecode(term), rv); |
| 575 | kfree(objp: term); |
| 576 | kfree(objp: err_hdr); |
| 577 | } |
| 578 | |
| 579 | /* |
| 580 | * Handle all attrs other than state |
| 581 | */ |
| 582 | static void siw_qp_modify_nonstate(struct siw_qp *qp, |
| 583 | struct siw_qp_attrs *attrs, |
| 584 | enum siw_qp_attr_mask mask) |
| 585 | { |
| 586 | if (mask & SIW_QP_ATTR_ACCESS_FLAGS) { |
| 587 | if (attrs->flags & SIW_RDMA_BIND_ENABLED) |
| 588 | qp->attrs.flags |= SIW_RDMA_BIND_ENABLED; |
| 589 | else |
| 590 | qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED; |
| 591 | |
| 592 | if (attrs->flags & SIW_RDMA_WRITE_ENABLED) |
| 593 | qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED; |
| 594 | else |
| 595 | qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED; |
| 596 | |
| 597 | if (attrs->flags & SIW_RDMA_READ_ENABLED) |
| 598 | qp->attrs.flags |= SIW_RDMA_READ_ENABLED; |
| 599 | else |
| 600 | qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED; |
| 601 | } |
| 602 | } |
| 603 | |
| 604 | static int siw_qp_nextstate_from_idle(struct siw_qp *qp, |
| 605 | struct siw_qp_attrs *attrs, |
| 606 | enum siw_qp_attr_mask mask) |
| 607 | { |
| 608 | int rv = 0; |
| 609 | |
| 610 | switch (attrs->state) { |
| 611 | case SIW_QP_STATE_RTS: |
| 612 | if (attrs->flags & SIW_MPA_CRC) { |
| 613 | siw_crc_init(crc: &qp->tx_ctx.mpa_crc); |
| 614 | qp->tx_ctx.mpa_crc_enabled = true; |
| 615 | siw_crc_init(crc: &qp->rx_stream.mpa_crc); |
| 616 | qp->rx_stream.mpa_crc_enabled = true; |
| 617 | } |
| 618 | if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) { |
| 619 | siw_dbg_qp(qp, "no socket\n" ); |
| 620 | rv = -EINVAL; |
| 621 | break; |
| 622 | } |
| 623 | if (!(mask & SIW_QP_ATTR_MPA)) { |
| 624 | siw_dbg_qp(qp, "no MPA\n" ); |
| 625 | rv = -EINVAL; |
| 626 | break; |
| 627 | } |
| 628 | /* |
| 629 | * Initialize iWARP TX state |
| 630 | */ |
| 631 | qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0; |
| 632 | qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0; |
| 633 | qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0; |
| 634 | |
| 635 | /* |
| 636 | * Initialize iWARP RX state |
| 637 | */ |
| 638 | qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1; |
| 639 | qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1; |
| 640 | qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1; |
| 641 | |
| 642 | /* |
| 643 | * init IRD free queue, caller has already checked |
| 644 | * limits. |
| 645 | */ |
| 646 | rv = siw_qp_readq_init(qp, irq_size: attrs->irq_size, |
| 647 | orq_size: attrs->orq_size); |
| 648 | if (rv) |
| 649 | break; |
| 650 | |
| 651 | qp->attrs.sk = attrs->sk; |
| 652 | qp->attrs.state = SIW_QP_STATE_RTS; |
| 653 | |
| 654 | siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n" , |
| 655 | attrs->flags & SIW_MPA_CRC ? "y" : "n" , |
| 656 | qp->attrs.orq_size, qp->attrs.irq_size); |
| 657 | break; |
| 658 | |
| 659 | case SIW_QP_STATE_ERROR: |
| 660 | siw_rq_flush(qp); |
| 661 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 662 | if (qp->cep) { |
| 663 | siw_cep_put(cep: qp->cep); |
| 664 | qp->cep = NULL; |
| 665 | } |
| 666 | break; |
| 667 | |
| 668 | default: |
| 669 | break; |
| 670 | } |
| 671 | return rv; |
| 672 | } |
| 673 | |
| 674 | static int siw_qp_nextstate_from_rts(struct siw_qp *qp, |
| 675 | struct siw_qp_attrs *attrs) |
| 676 | { |
| 677 | int drop_conn = 0; |
| 678 | |
| 679 | switch (attrs->state) { |
| 680 | case SIW_QP_STATE_CLOSING: |
| 681 | /* |
| 682 | * Verbs: move to IDLE if SQ and ORQ are empty. |
| 683 | * Move to ERROR otherwise. But first of all we must |
| 684 | * close the connection. So we keep CLOSING or ERROR |
| 685 | * as a transient state, schedule connection drop work |
| 686 | * and wait for the socket state change upcall to |
| 687 | * come back closed. |
| 688 | */ |
| 689 | if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) { |
| 690 | qp->attrs.state = SIW_QP_STATE_CLOSING; |
| 691 | } else { |
| 692 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 693 | siw_sq_flush(qp); |
| 694 | } |
| 695 | siw_rq_flush(qp); |
| 696 | |
| 697 | drop_conn = 1; |
| 698 | break; |
| 699 | |
| 700 | case SIW_QP_STATE_TERMINATE: |
| 701 | qp->attrs.state = SIW_QP_STATE_TERMINATE; |
| 702 | |
| 703 | siw_init_terminate(qp, layer: TERM_ERROR_LAYER_RDMAP, |
| 704 | etype: RDMAP_ETYPE_CATASTROPHIC, |
| 705 | ecode: RDMAP_ECODE_UNSPECIFIED, in_tx: 1); |
| 706 | drop_conn = 1; |
| 707 | break; |
| 708 | |
| 709 | case SIW_QP_STATE_ERROR: |
| 710 | /* |
| 711 | * This is an emergency close. |
| 712 | * |
| 713 | * Any in progress transmit operation will get |
| 714 | * cancelled. |
| 715 | * This will likely result in a protocol failure, |
| 716 | * if a TX operation is in transit. The caller |
| 717 | * could unconditional wait to give the current |
| 718 | * operation a chance to complete. |
| 719 | * Esp., how to handle the non-empty IRQ case? |
| 720 | * The peer was asking for data transfer at a valid |
| 721 | * point in time. |
| 722 | */ |
| 723 | siw_sq_flush(qp); |
| 724 | siw_rq_flush(qp); |
| 725 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 726 | drop_conn = 1; |
| 727 | break; |
| 728 | |
| 729 | default: |
| 730 | break; |
| 731 | } |
| 732 | return drop_conn; |
| 733 | } |
| 734 | |
| 735 | static void siw_qp_nextstate_from_term(struct siw_qp *qp, |
| 736 | struct siw_qp_attrs *attrs) |
| 737 | { |
| 738 | switch (attrs->state) { |
| 739 | case SIW_QP_STATE_ERROR: |
| 740 | siw_rq_flush(qp); |
| 741 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 742 | |
| 743 | if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) |
| 744 | siw_sq_flush(qp); |
| 745 | break; |
| 746 | |
| 747 | default: |
| 748 | break; |
| 749 | } |
| 750 | } |
| 751 | |
| 752 | static int siw_qp_nextstate_from_close(struct siw_qp *qp, |
| 753 | struct siw_qp_attrs *attrs) |
| 754 | { |
| 755 | int rv = 0; |
| 756 | |
| 757 | switch (attrs->state) { |
| 758 | case SIW_QP_STATE_IDLE: |
| 759 | WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE); |
| 760 | qp->attrs.state = SIW_QP_STATE_IDLE; |
| 761 | break; |
| 762 | |
| 763 | case SIW_QP_STATE_CLOSING: |
| 764 | /* |
| 765 | * The LLP may already moved the QP to closing |
| 766 | * due to graceful peer close init |
| 767 | */ |
| 768 | break; |
| 769 | |
| 770 | case SIW_QP_STATE_ERROR: |
| 771 | /* |
| 772 | * QP was moved to CLOSING by LLP event |
| 773 | * not yet seen by user. |
| 774 | */ |
| 775 | qp->attrs.state = SIW_QP_STATE_ERROR; |
| 776 | |
| 777 | if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) |
| 778 | siw_sq_flush(qp); |
| 779 | |
| 780 | siw_rq_flush(qp); |
| 781 | break; |
| 782 | |
| 783 | default: |
| 784 | siw_dbg_qp(qp, "state transition undefined: %s => %s\n" , |
| 785 | siw_qp_state_to_string[qp->attrs.state], |
| 786 | siw_qp_state_to_string[attrs->state]); |
| 787 | |
| 788 | rv = -ECONNABORTED; |
| 789 | } |
| 790 | return rv; |
| 791 | } |
| 792 | |
| 793 | /* |
| 794 | * Caller must hold qp->state_lock |
| 795 | */ |
| 796 | int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs, |
| 797 | enum siw_qp_attr_mask mask) |
| 798 | { |
| 799 | int drop_conn = 0, rv = 0; |
| 800 | |
| 801 | if (!mask) |
| 802 | return 0; |
| 803 | |
| 804 | siw_dbg_qp(qp, "state: %s => %s\n" , |
| 805 | siw_qp_state_to_string[qp->attrs.state], |
| 806 | siw_qp_state_to_string[attrs->state]); |
| 807 | |
| 808 | if (mask != SIW_QP_ATTR_STATE) |
| 809 | siw_qp_modify_nonstate(qp, attrs, mask); |
| 810 | |
| 811 | if (!(mask & SIW_QP_ATTR_STATE)) |
| 812 | return 0; |
| 813 | |
| 814 | switch (qp->attrs.state) { |
| 815 | case SIW_QP_STATE_IDLE: |
| 816 | case SIW_QP_STATE_RTR: |
| 817 | rv = siw_qp_nextstate_from_idle(qp, attrs, mask); |
| 818 | break; |
| 819 | |
| 820 | case SIW_QP_STATE_RTS: |
| 821 | drop_conn = siw_qp_nextstate_from_rts(qp, attrs); |
| 822 | break; |
| 823 | |
| 824 | case SIW_QP_STATE_TERMINATE: |
| 825 | siw_qp_nextstate_from_term(qp, attrs); |
| 826 | break; |
| 827 | |
| 828 | case SIW_QP_STATE_CLOSING: |
| 829 | siw_qp_nextstate_from_close(qp, attrs); |
| 830 | break; |
| 831 | default: |
| 832 | break; |
| 833 | } |
| 834 | if (drop_conn) |
| 835 | siw_qp_cm_drop(qp, schedule: 0); |
| 836 | |
| 837 | return rv; |
| 838 | } |
| 839 | |
| 840 | void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) |
| 841 | { |
| 842 | rreq->id = sqe->id; |
| 843 | rreq->opcode = sqe->opcode; |
| 844 | rreq->sge[0].laddr = sqe->sge[0].laddr; |
| 845 | rreq->sge[0].length = sqe->sge[0].length; |
| 846 | rreq->sge[0].lkey = sqe->sge[0].lkey; |
| 847 | rreq->sge[1].lkey = sqe->sge[1].lkey; |
| 848 | rreq->flags = sqe->flags | SIW_WQE_VALID; |
| 849 | rreq->num_sge = 1; |
| 850 | } |
| 851 | |
| 852 | static int siw_activate_tx_from_sq(struct siw_qp *qp) |
| 853 | { |
| 854 | struct siw_sqe *sqe; |
| 855 | struct siw_wqe *wqe = tx_wqe(qp); |
| 856 | int rv = 1; |
| 857 | |
| 858 | sqe = sq_get_next(qp); |
| 859 | if (!sqe) |
| 860 | return 0; |
| 861 | |
| 862 | memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); |
| 863 | wqe->wr_status = SIW_WR_QUEUED; |
| 864 | |
| 865 | /* First copy SQE to kernel private memory */ |
| 866 | memcpy(&wqe->sqe, sqe, sizeof(*sqe)); |
| 867 | |
| 868 | if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { |
| 869 | rv = -EINVAL; |
| 870 | goto out; |
| 871 | } |
| 872 | if (wqe->sqe.flags & SIW_WQE_INLINE) { |
| 873 | if (wqe->sqe.opcode != SIW_OP_SEND && |
| 874 | wqe->sqe.opcode != SIW_OP_WRITE) { |
| 875 | rv = -EINVAL; |
| 876 | goto out; |
| 877 | } |
| 878 | if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { |
| 879 | rv = -EINVAL; |
| 880 | goto out; |
| 881 | } |
| 882 | wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; |
| 883 | wqe->sqe.sge[0].lkey = 0; |
| 884 | wqe->sqe.num_sge = 1; |
| 885 | } |
| 886 | if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { |
| 887 | /* A READ cannot be fenced */ |
| 888 | if (unlikely(wqe->sqe.opcode == SIW_OP_READ || |
| 889 | wqe->sqe.opcode == |
| 890 | SIW_OP_READ_LOCAL_INV)) { |
| 891 | siw_dbg_qp(qp, "cannot fence read\n" ); |
| 892 | rv = -EINVAL; |
| 893 | goto out; |
| 894 | } |
| 895 | spin_lock(lock: &qp->orq_lock); |
| 896 | |
| 897 | if (qp->attrs.orq_size && !siw_orq_empty(qp)) { |
| 898 | qp->tx_ctx.orq_fence = 1; |
| 899 | rv = 0; |
| 900 | } |
| 901 | spin_unlock(lock: &qp->orq_lock); |
| 902 | |
| 903 | } else if (wqe->sqe.opcode == SIW_OP_READ || |
| 904 | wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { |
| 905 | struct siw_sqe *rreq; |
| 906 | |
| 907 | if (unlikely(!qp->attrs.orq_size)) { |
| 908 | /* We negotiated not to send READ req's */ |
| 909 | rv = -EINVAL; |
| 910 | goto out; |
| 911 | } |
| 912 | wqe->sqe.num_sge = 1; |
| 913 | |
| 914 | spin_lock(lock: &qp->orq_lock); |
| 915 | |
| 916 | rreq = orq_get_free(qp); |
| 917 | if (rreq) { |
| 918 | /* |
| 919 | * Make an immediate copy in ORQ to be ready |
| 920 | * to process loopback READ reply |
| 921 | */ |
| 922 | siw_read_to_orq(rreq, sqe: &wqe->sqe); |
| 923 | qp->orq_put++; |
| 924 | } else { |
| 925 | qp->tx_ctx.orq_fence = 1; |
| 926 | rv = 0; |
| 927 | } |
| 928 | spin_unlock(lock: &qp->orq_lock); |
| 929 | } |
| 930 | |
| 931 | /* Clear SQE, can be re-used by application */ |
| 932 | smp_store_mb(sqe->flags, 0); |
| 933 | qp->sq_get++; |
| 934 | out: |
| 935 | if (unlikely(rv < 0)) { |
| 936 | siw_dbg_qp(qp, "error %d\n" , rv); |
| 937 | wqe->wr_status = SIW_WR_IDLE; |
| 938 | } |
| 939 | return rv; |
| 940 | } |
| 941 | |
| 942 | /* |
| 943 | * Must be called with SQ locked. |
| 944 | * To avoid complete SQ starvation by constant inbound READ requests, |
| 945 | * the active IRQ will not be served after qp->irq_burst, if the |
| 946 | * SQ has pending work. |
| 947 | */ |
| 948 | int siw_activate_tx(struct siw_qp *qp) |
| 949 | { |
| 950 | struct siw_sqe *irqe; |
| 951 | struct siw_wqe *wqe = tx_wqe(qp); |
| 952 | |
| 953 | if (!qp->attrs.irq_size) |
| 954 | return siw_activate_tx_from_sq(qp); |
| 955 | |
| 956 | irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size]; |
| 957 | |
| 958 | if (!(irqe->flags & SIW_WQE_VALID)) |
| 959 | return siw_activate_tx_from_sq(qp); |
| 960 | |
| 961 | /* |
| 962 | * Avoid local WQE processing starvation in case |
| 963 | * of constant inbound READ request stream |
| 964 | */ |
| 965 | if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { |
| 966 | qp->irq_burst = 0; |
| 967 | return siw_activate_tx_from_sq(qp); |
| 968 | } |
| 969 | memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); |
| 970 | wqe->wr_status = SIW_WR_QUEUED; |
| 971 | |
| 972 | /* start READ RESPONSE */ |
| 973 | wqe->sqe.opcode = SIW_OP_READ_RESPONSE; |
| 974 | wqe->sqe.flags = 0; |
| 975 | if (irqe->num_sge) { |
| 976 | wqe->sqe.num_sge = 1; |
| 977 | wqe->sqe.sge[0].length = irqe->sge[0].length; |
| 978 | wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; |
| 979 | wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; |
| 980 | } else { |
| 981 | wqe->sqe.num_sge = 0; |
| 982 | } |
| 983 | |
| 984 | /* Retain original RREQ's message sequence number for |
| 985 | * potential error reporting cases. |
| 986 | */ |
| 987 | wqe->sqe.sge[1].length = irqe->sge[1].length; |
| 988 | |
| 989 | wqe->sqe.rkey = irqe->rkey; |
| 990 | wqe->sqe.raddr = irqe->raddr; |
| 991 | |
| 992 | wqe->processed = 0; |
| 993 | qp->irq_get++; |
| 994 | |
| 995 | /* mark current IRQ entry free */ |
| 996 | smp_store_mb(irqe->flags, 0); |
| 997 | |
| 998 | return 1; |
| 999 | } |
| 1000 | |
| 1001 | /* |
| 1002 | * Check if current CQ state qualifies for calling CQ completion |
| 1003 | * handler. Must be called with CQ lock held. |
| 1004 | */ |
| 1005 | static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) |
| 1006 | { |
| 1007 | u32 cq_notify; |
| 1008 | |
| 1009 | if (!cq->base_cq.comp_handler) |
| 1010 | return false; |
| 1011 | |
| 1012 | /* Read application shared notification state */ |
| 1013 | cq_notify = READ_ONCE(cq->notify->flags); |
| 1014 | |
| 1015 | if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || |
| 1016 | ((cq_notify & SIW_NOTIFY_SOLICITED) && |
| 1017 | (flags & SIW_WQE_SOLICITED))) { |
| 1018 | /* |
| 1019 | * CQ notification is one-shot: Since the |
| 1020 | * current CQE causes user notification, |
| 1021 | * the CQ gets dis-aremd and must be re-aremd |
| 1022 | * by the user for a new notification. |
| 1023 | */ |
| 1024 | WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); |
| 1025 | |
| 1026 | return true; |
| 1027 | } |
| 1028 | return false; |
| 1029 | } |
| 1030 | |
| 1031 | int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, |
| 1032 | enum siw_wc_status status) |
| 1033 | { |
| 1034 | struct siw_cq *cq = qp->scq; |
| 1035 | int rv = 0; |
| 1036 | |
| 1037 | if (cq) { |
| 1038 | u32 sqe_flags = sqe->flags; |
| 1039 | struct siw_cqe *cqe; |
| 1040 | u32 idx; |
| 1041 | unsigned long flags; |
| 1042 | |
| 1043 | spin_lock_irqsave(&cq->lock, flags); |
| 1044 | |
| 1045 | idx = cq->cq_put % cq->num_cqe; |
| 1046 | cqe = &cq->queue[idx]; |
| 1047 | |
| 1048 | if (!READ_ONCE(cqe->flags)) { |
| 1049 | bool notify; |
| 1050 | |
| 1051 | cqe->id = sqe->id; |
| 1052 | cqe->opcode = sqe->opcode; |
| 1053 | cqe->status = status; |
| 1054 | cqe->imm_data = 0; |
| 1055 | cqe->bytes = bytes; |
| 1056 | |
| 1057 | if (rdma_is_kernel_res(res: &cq->base_cq.res)) |
| 1058 | cqe->base_qp = &qp->base_qp; |
| 1059 | else |
| 1060 | cqe->qp_id = qp_id(qp); |
| 1061 | |
| 1062 | /* mark CQE valid for application */ |
| 1063 | WRITE_ONCE(cqe->flags, SIW_WQE_VALID); |
| 1064 | /* recycle SQE */ |
| 1065 | smp_store_mb(sqe->flags, 0); |
| 1066 | |
| 1067 | cq->cq_put++; |
| 1068 | notify = siw_cq_notify_now(cq, flags: sqe_flags); |
| 1069 | |
| 1070 | spin_unlock_irqrestore(lock: &cq->lock, flags); |
| 1071 | |
| 1072 | if (notify) { |
| 1073 | siw_dbg_cq(cq, "Call completion handler\n" ); |
| 1074 | cq->base_cq.comp_handler(&cq->base_cq, |
| 1075 | cq->base_cq.cq_context); |
| 1076 | } |
| 1077 | } else { |
| 1078 | spin_unlock_irqrestore(lock: &cq->lock, flags); |
| 1079 | rv = -ENOMEM; |
| 1080 | siw_cq_event(cq, type: IB_EVENT_CQ_ERR); |
| 1081 | } |
| 1082 | } else { |
| 1083 | /* recycle SQE */ |
| 1084 | smp_store_mb(sqe->flags, 0); |
| 1085 | } |
| 1086 | return rv; |
| 1087 | } |
| 1088 | |
| 1089 | int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, |
| 1090 | u32 inval_stag, enum siw_wc_status status) |
| 1091 | { |
| 1092 | struct siw_cq *cq = qp->rcq; |
| 1093 | int rv = 0; |
| 1094 | |
| 1095 | if (cq) { |
| 1096 | struct siw_cqe *cqe; |
| 1097 | u32 idx; |
| 1098 | unsigned long flags; |
| 1099 | |
| 1100 | spin_lock_irqsave(&cq->lock, flags); |
| 1101 | |
| 1102 | idx = cq->cq_put % cq->num_cqe; |
| 1103 | cqe = &cq->queue[idx]; |
| 1104 | |
| 1105 | if (!READ_ONCE(cqe->flags)) { |
| 1106 | bool notify; |
| 1107 | u8 cqe_flags = SIW_WQE_VALID; |
| 1108 | |
| 1109 | cqe->id = rqe->id; |
| 1110 | cqe->opcode = SIW_OP_RECEIVE; |
| 1111 | cqe->status = status; |
| 1112 | cqe->imm_data = 0; |
| 1113 | cqe->bytes = bytes; |
| 1114 | |
| 1115 | if (rdma_is_kernel_res(res: &cq->base_cq.res)) { |
| 1116 | cqe->base_qp = &qp->base_qp; |
| 1117 | if (inval_stag) { |
| 1118 | cqe_flags |= SIW_WQE_REM_INVAL; |
| 1119 | cqe->inval_stag = inval_stag; |
| 1120 | } |
| 1121 | } else { |
| 1122 | cqe->qp_id = qp_id(qp); |
| 1123 | } |
| 1124 | /* mark CQE valid for application */ |
| 1125 | WRITE_ONCE(cqe->flags, cqe_flags); |
| 1126 | /* recycle RQE */ |
| 1127 | smp_store_mb(rqe->flags, 0); |
| 1128 | |
| 1129 | cq->cq_put++; |
| 1130 | notify = siw_cq_notify_now(cq, flags: SIW_WQE_SIGNALLED); |
| 1131 | |
| 1132 | spin_unlock_irqrestore(lock: &cq->lock, flags); |
| 1133 | |
| 1134 | if (notify) { |
| 1135 | siw_dbg_cq(cq, "Call completion handler\n" ); |
| 1136 | cq->base_cq.comp_handler(&cq->base_cq, |
| 1137 | cq->base_cq.cq_context); |
| 1138 | } |
| 1139 | } else { |
| 1140 | spin_unlock_irqrestore(lock: &cq->lock, flags); |
| 1141 | rv = -ENOMEM; |
| 1142 | siw_cq_event(cq, type: IB_EVENT_CQ_ERR); |
| 1143 | } |
| 1144 | } else { |
| 1145 | /* recycle RQE */ |
| 1146 | smp_store_mb(rqe->flags, 0); |
| 1147 | } |
| 1148 | return rv; |
| 1149 | } |
| 1150 | |
| 1151 | /* |
| 1152 | * siw_sq_flush() |
| 1153 | * |
| 1154 | * Flush SQ and ORQ entries to CQ. |
| 1155 | * |
| 1156 | * Must be called with QP state write lock held. |
| 1157 | * Therefore, SQ and ORQ lock must not be taken. |
| 1158 | */ |
| 1159 | void siw_sq_flush(struct siw_qp *qp) |
| 1160 | { |
| 1161 | struct siw_sqe *sqe; |
| 1162 | struct siw_wqe *wqe = tx_wqe(qp); |
| 1163 | int async_event = 0; |
| 1164 | |
| 1165 | /* |
| 1166 | * Start with completing any work currently on the ORQ |
| 1167 | */ |
| 1168 | while (qp->attrs.orq_size) { |
| 1169 | sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size]; |
| 1170 | if (!READ_ONCE(sqe->flags)) |
| 1171 | break; |
| 1172 | |
| 1173 | if (siw_sqe_complete(qp, sqe, bytes: 0, status: SIW_WC_WR_FLUSH_ERR) != 0) |
| 1174 | break; |
| 1175 | |
| 1176 | WRITE_ONCE(sqe->flags, 0); |
| 1177 | qp->orq_get++; |
| 1178 | } |
| 1179 | /* |
| 1180 | * Flush an in-progress WQE if present |
| 1181 | */ |
| 1182 | if (wqe->wr_status != SIW_WR_IDLE) { |
| 1183 | siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n" , |
| 1184 | tx_type(wqe), wqe->wr_status); |
| 1185 | |
| 1186 | siw_wqe_put_mem(wqe, tx_type(wqe)); |
| 1187 | |
| 1188 | if (tx_type(wqe) != SIW_OP_READ_RESPONSE && |
| 1189 | ((tx_type(wqe) != SIW_OP_READ && |
| 1190 | tx_type(wqe) != SIW_OP_READ_LOCAL_INV) || |
| 1191 | wqe->wr_status == SIW_WR_QUEUED)) |
| 1192 | /* |
| 1193 | * An in-progress Read Request is already in |
| 1194 | * the ORQ |
| 1195 | */ |
| 1196 | siw_sqe_complete(qp, sqe: &wqe->sqe, bytes: wqe->bytes, |
| 1197 | status: SIW_WC_WR_FLUSH_ERR); |
| 1198 | |
| 1199 | wqe->wr_status = SIW_WR_IDLE; |
| 1200 | } |
| 1201 | /* |
| 1202 | * Flush the Send Queue |
| 1203 | */ |
| 1204 | while (qp->attrs.sq_size) { |
| 1205 | sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; |
| 1206 | if (!READ_ONCE(sqe->flags)) |
| 1207 | break; |
| 1208 | |
| 1209 | async_event = 1; |
| 1210 | if (siw_sqe_complete(qp, sqe, bytes: 0, status: SIW_WC_WR_FLUSH_ERR) != 0) |
| 1211 | /* |
| 1212 | * Shall IB_EVENT_SQ_DRAINED be supressed if work |
| 1213 | * completion fails? |
| 1214 | */ |
| 1215 | break; |
| 1216 | |
| 1217 | WRITE_ONCE(sqe->flags, 0); |
| 1218 | qp->sq_get++; |
| 1219 | } |
| 1220 | if (async_event) |
| 1221 | siw_qp_event(qp, type: IB_EVENT_SQ_DRAINED); |
| 1222 | } |
| 1223 | |
| 1224 | /* |
| 1225 | * siw_rq_flush() |
| 1226 | * |
| 1227 | * Flush recv queue entries to CQ. Also |
| 1228 | * takes care of pending active tagged and untagged |
| 1229 | * inbound transfers, which have target memory |
| 1230 | * referenced. |
| 1231 | * |
| 1232 | * Must be called with QP state write lock held. |
| 1233 | * Therefore, RQ lock must not be taken. |
| 1234 | */ |
| 1235 | void siw_rq_flush(struct siw_qp *qp) |
| 1236 | { |
| 1237 | struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; |
| 1238 | |
| 1239 | /* |
| 1240 | * Flush an in-progress untagged operation if present |
| 1241 | */ |
| 1242 | if (wqe->wr_status != SIW_WR_IDLE) { |
| 1243 | siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n" , |
| 1244 | rx_type(wqe), wqe->wr_status); |
| 1245 | |
| 1246 | siw_wqe_put_mem(wqe, rx_type(wqe)); |
| 1247 | |
| 1248 | if (rx_type(wqe) == SIW_OP_RECEIVE) { |
| 1249 | siw_rqe_complete(qp, rqe: &wqe->rqe, bytes: wqe->bytes, |
| 1250 | inval_stag: 0, status: SIW_WC_WR_FLUSH_ERR); |
| 1251 | } else if (rx_type(wqe) != SIW_OP_READ && |
| 1252 | rx_type(wqe) != SIW_OP_READ_RESPONSE && |
| 1253 | rx_type(wqe) != SIW_OP_WRITE) { |
| 1254 | siw_sqe_complete(qp, sqe: &wqe->sqe, bytes: 0, status: SIW_WC_WR_FLUSH_ERR); |
| 1255 | } |
| 1256 | wqe->wr_status = SIW_WR_IDLE; |
| 1257 | } |
| 1258 | wqe = &qp->rx_tagged.wqe_active; |
| 1259 | |
| 1260 | if (wqe->wr_status != SIW_WR_IDLE) { |
| 1261 | siw_wqe_put_mem(wqe, rx_type(wqe)); |
| 1262 | wqe->wr_status = SIW_WR_IDLE; |
| 1263 | } |
| 1264 | /* |
| 1265 | * Flush the Receive Queue |
| 1266 | */ |
| 1267 | while (qp->attrs.rq_size) { |
| 1268 | struct siw_rqe *rqe = |
| 1269 | &qp->recvq[qp->rq_get % qp->attrs.rq_size]; |
| 1270 | |
| 1271 | if (!READ_ONCE(rqe->flags)) |
| 1272 | break; |
| 1273 | |
| 1274 | if (siw_rqe_complete(qp, rqe, bytes: 0, inval_stag: 0, status: SIW_WC_WR_FLUSH_ERR) != 0) |
| 1275 | break; |
| 1276 | |
| 1277 | WRITE_ONCE(rqe->flags, 0); |
| 1278 | qp->rq_get++; |
| 1279 | } |
| 1280 | } |
| 1281 | |
| 1282 | int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) |
| 1283 | { |
| 1284 | int rv = xa_alloc(xa: &sdev->qp_xa, id: &qp->base_qp.qp_num, entry: qp, xa_limit_32b, |
| 1285 | GFP_KERNEL); |
| 1286 | |
| 1287 | if (!rv) { |
| 1288 | kref_init(kref: &qp->ref); |
| 1289 | qp->sdev = sdev; |
| 1290 | siw_dbg_qp(qp, "new QP\n" ); |
| 1291 | } |
| 1292 | return rv; |
| 1293 | } |
| 1294 | |
| 1295 | void siw_free_qp(struct kref *ref) |
| 1296 | { |
| 1297 | struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); |
| 1298 | struct siw_device *sdev = qp->sdev; |
| 1299 | unsigned long flags; |
| 1300 | |
| 1301 | if (qp->cep) |
| 1302 | siw_cep_put(cep: qp->cep); |
| 1303 | |
| 1304 | found = xa_erase(&sdev->qp_xa, index: qp_id(qp)); |
| 1305 | WARN_ON(found != qp); |
| 1306 | spin_lock_irqsave(&sdev->lock, flags); |
| 1307 | list_del(entry: &qp->devq); |
| 1308 | spin_unlock_irqrestore(lock: &sdev->lock, flags); |
| 1309 | |
| 1310 | vfree(addr: qp->sendq); |
| 1311 | vfree(addr: qp->recvq); |
| 1312 | vfree(addr: qp->irq); |
| 1313 | vfree(addr: qp->orq); |
| 1314 | |
| 1315 | siw_put_tx_cpu(cpu: qp->tx_cpu); |
| 1316 | complete(&qp->qp_free); |
| 1317 | atomic_dec(v: &sdev->num_qp); |
| 1318 | } |
| 1319 | |