1 | /* |
2 | * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/in.h> |
35 | #include <linux/device.h> |
36 | #include <linux/dmapool.h> |
37 | #include <linux/ratelimit.h> |
38 | |
39 | #include "rds_single_path.h" |
40 | #include "rds.h" |
41 | #include "ib.h" |
42 | #include "ib_mr.h" |
43 | |
44 | /* |
45 | * Convert IB-specific error message to RDS error message and call core |
46 | * completion handler. |
47 | */ |
48 | static void rds_ib_send_complete(struct rds_message *rm, |
49 | int wc_status, |
50 | void (*complete)(struct rds_message *rm, int status)) |
51 | { |
52 | int notify_status; |
53 | |
54 | switch (wc_status) { |
55 | case IB_WC_WR_FLUSH_ERR: |
56 | return; |
57 | |
58 | case IB_WC_SUCCESS: |
59 | notify_status = RDS_RDMA_SUCCESS; |
60 | break; |
61 | |
62 | case IB_WC_REM_ACCESS_ERR: |
63 | notify_status = RDS_RDMA_REMOTE_ERROR; |
64 | break; |
65 | |
66 | default: |
67 | notify_status = RDS_RDMA_OTHER_ERROR; |
68 | break; |
69 | } |
70 | complete(rm, notify_status); |
71 | } |
72 | |
73 | static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, |
74 | struct rm_data_op *op, |
75 | int wc_status) |
76 | { |
77 | if (op->op_nents) |
78 | ib_dma_unmap_sg(dev: ic->i_cm_id->device, |
79 | sg: op->op_sg, nents: op->op_nents, |
80 | direction: DMA_TO_DEVICE); |
81 | } |
82 | |
83 | static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, |
84 | struct rm_rdma_op *op, |
85 | int wc_status) |
86 | { |
87 | if (op->op_mapped) { |
88 | ib_dma_unmap_sg(dev: ic->i_cm_id->device, |
89 | sg: op->op_sg, nents: op->op_nents, |
90 | direction: op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
91 | op->op_mapped = 0; |
92 | } |
93 | |
94 | /* If the user asked for a completion notification on this |
95 | * message, we can implement three different semantics: |
96 | * 1. Notify when we received the ACK on the RDS message |
97 | * that was queued with the RDMA. This provides reliable |
98 | * notification of RDMA status at the expense of a one-way |
99 | * packet delay. |
100 | * 2. Notify when the IB stack gives us the completion event for |
101 | * the RDMA operation. |
102 | * 3. Notify when the IB stack gives us the completion event for |
103 | * the accompanying RDS messages. |
104 | * Here, we implement approach #3. To implement approach #2, |
105 | * we would need to take an event for the rdma WR. To implement #1, |
106 | * don't call rds_rdma_send_complete at all, and fall back to the notify |
107 | * handling in the ACK processing code. |
108 | * |
109 | * Note: There's no need to explicitly sync any RDMA buffers using |
110 | * ib_dma_sync_sg_for_cpu - the completion for the RDMA |
111 | * operation itself unmapped the RDMA buffers, which takes care |
112 | * of synching. |
113 | */ |
114 | rds_ib_send_complete(container_of(op, struct rds_message, rdma), |
115 | wc_status, complete: rds_rdma_send_complete); |
116 | |
117 | if (op->op_write) |
118 | rds_stats_add(s_send_rdma_bytes, op->op_bytes); |
119 | else |
120 | rds_stats_add(s_recv_rdma_bytes, op->op_bytes); |
121 | } |
122 | |
123 | static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, |
124 | struct rm_atomic_op *op, |
125 | int wc_status) |
126 | { |
127 | /* unmap atomic recvbuf */ |
128 | if (op->op_mapped) { |
129 | ib_dma_unmap_sg(dev: ic->i_cm_id->device, sg: op->op_sg, nents: 1, |
130 | direction: DMA_FROM_DEVICE); |
131 | op->op_mapped = 0; |
132 | } |
133 | |
134 | rds_ib_send_complete(container_of(op, struct rds_message, atomic), |
135 | wc_status, complete: rds_atomic_send_complete); |
136 | |
137 | if (op->op_type == RDS_ATOMIC_TYPE_CSWP) |
138 | rds_ib_stats_inc(s_ib_atomic_cswp); |
139 | else |
140 | rds_ib_stats_inc(s_ib_atomic_fadd); |
141 | } |
142 | |
143 | /* |
144 | * Unmap the resources associated with a struct send_work. |
145 | * |
146 | * Returns the rm for no good reason other than it is unobtainable |
147 | * other than by switching on wr.opcode, currently, and the caller, |
148 | * the event handler, needs it. |
149 | */ |
150 | static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, |
151 | struct rds_ib_send_work *send, |
152 | int wc_status) |
153 | { |
154 | struct rds_message *rm = NULL; |
155 | |
156 | /* In the error case, wc.opcode sometimes contains garbage */ |
157 | switch (send->s_wr.opcode) { |
158 | case IB_WR_SEND: |
159 | if (send->s_op) { |
160 | rm = container_of(send->s_op, struct rds_message, data); |
161 | rds_ib_send_unmap_data(ic, op: send->s_op, wc_status); |
162 | } |
163 | break; |
164 | case IB_WR_RDMA_WRITE: |
165 | case IB_WR_RDMA_READ: |
166 | if (send->s_op) { |
167 | rm = container_of(send->s_op, struct rds_message, rdma); |
168 | rds_ib_send_unmap_rdma(ic, op: send->s_op, wc_status); |
169 | } |
170 | break; |
171 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
172 | case IB_WR_ATOMIC_CMP_AND_SWP: |
173 | if (send->s_op) { |
174 | rm = container_of(send->s_op, struct rds_message, atomic); |
175 | rds_ib_send_unmap_atomic(ic, op: send->s_op, wc_status); |
176 | } |
177 | break; |
178 | default: |
179 | printk_ratelimited(KERN_NOTICE |
180 | "RDS/IB: %s: unexpected opcode 0x%x in WR!\n" , |
181 | __func__, send->s_wr.opcode); |
182 | break; |
183 | } |
184 | |
185 | send->s_wr.opcode = 0xdead; |
186 | |
187 | return rm; |
188 | } |
189 | |
190 | void rds_ib_send_init_ring(struct rds_ib_connection *ic) |
191 | { |
192 | struct rds_ib_send_work *send; |
193 | u32 i; |
194 | |
195 | for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { |
196 | struct ib_sge *sge; |
197 | |
198 | send->s_op = NULL; |
199 | |
200 | send->s_wr.wr_id = i; |
201 | send->s_wr.sg_list = send->s_sge; |
202 | send->s_wr.ex.imm_data = 0; |
203 | |
204 | sge = &send->s_sge[0]; |
205 | sge->addr = ic->i_send_hdrs_dma[i]; |
206 | |
207 | sge->length = sizeof(struct rds_header); |
208 | sge->lkey = ic->i_pd->local_dma_lkey; |
209 | |
210 | send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; |
211 | } |
212 | } |
213 | |
214 | void rds_ib_send_clear_ring(struct rds_ib_connection *ic) |
215 | { |
216 | struct rds_ib_send_work *send; |
217 | u32 i; |
218 | |
219 | for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { |
220 | if (send->s_op && send->s_wr.opcode != 0xdead) |
221 | rds_ib_send_unmap_op(ic, send, wc_status: IB_WC_WR_FLUSH_ERR); |
222 | } |
223 | } |
224 | |
225 | /* |
226 | * The only fast path caller always has a non-zero nr, so we don't |
227 | * bother testing nr before performing the atomic sub. |
228 | */ |
229 | static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) |
230 | { |
231 | if ((atomic_sub_return(i: nr, v: &ic->i_signaled_sends) == 0) && |
232 | waitqueue_active(wq_head: &rds_ib_ring_empty_wait)) |
233 | wake_up(&rds_ib_ring_empty_wait); |
234 | BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); |
235 | } |
236 | |
237 | /* |
238 | * The _oldest/_free ring operations here race cleanly with the alloc/unalloc |
239 | * operations performed in the send path. As the sender allocs and potentially |
240 | * unallocs the next free entry in the ring it doesn't alter which is |
241 | * the next to be freed, which is what this is concerned with. |
242 | */ |
243 | void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) |
244 | { |
245 | struct rds_message *rm = NULL; |
246 | struct rds_connection *conn = ic->conn; |
247 | struct rds_ib_send_work *send; |
248 | u32 completed; |
249 | u32 oldest; |
250 | u32 i = 0; |
251 | int nr_sig = 0; |
252 | |
253 | |
254 | rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n" , |
255 | (unsigned long long)wc->wr_id, wc->status, |
256 | ib_wc_status_msg(wc->status), wc->byte_len, |
257 | be32_to_cpu(wc->ex.imm_data)); |
258 | rds_ib_stats_inc(s_ib_tx_cq_event); |
259 | |
260 | if (wc->wr_id == RDS_IB_ACK_WR_ID) { |
261 | if (time_after(jiffies, ic->i_ack_queued + HZ / 2)) |
262 | rds_ib_stats_inc(s_ib_tx_stalled); |
263 | rds_ib_ack_send_complete(ic); |
264 | return; |
265 | } |
266 | |
267 | oldest = rds_ib_ring_oldest(ring: &ic->i_send_ring); |
268 | |
269 | completed = rds_ib_ring_completed(ring: &ic->i_send_ring, wr_id: wc->wr_id, oldest); |
270 | |
271 | for (i = 0; i < completed; i++) { |
272 | send = &ic->i_sends[oldest]; |
273 | if (send->s_wr.send_flags & IB_SEND_SIGNALED) |
274 | nr_sig++; |
275 | |
276 | rm = rds_ib_send_unmap_op(ic, send, wc_status: wc->status); |
277 | |
278 | if (time_after(jiffies, send->s_queued + HZ / 2)) |
279 | rds_ib_stats_inc(s_ib_tx_stalled); |
280 | |
281 | if (send->s_op) { |
282 | if (send->s_op == rm->m_final_op) { |
283 | /* If anyone waited for this message to get |
284 | * flushed out, wake them up now |
285 | */ |
286 | rds_message_unmapped(rm); |
287 | } |
288 | rds_message_put(rm); |
289 | send->s_op = NULL; |
290 | } |
291 | |
292 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; |
293 | } |
294 | |
295 | rds_ib_ring_free(ring: &ic->i_send_ring, val: completed); |
296 | rds_ib_sub_signaled(ic, nr: nr_sig); |
297 | |
298 | if (test_and_clear_bit(RDS_LL_SEND_FULL, addr: &conn->c_flags) || |
299 | test_bit(0, &conn->c_map_queued)) |
300 | queue_delayed_work(wq: rds_wq, dwork: &conn->c_send_w, delay: 0); |
301 | |
302 | /* We expect errors as the qp is drained during shutdown */ |
303 | if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { |
304 | rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n" , |
305 | &conn->c_laddr, &conn->c_faddr, |
306 | conn->c_tos, wc->status, |
307 | ib_wc_status_msg(wc->status), wc->vendor_err); |
308 | } |
309 | } |
310 | |
311 | /* |
312 | * This is the main function for allocating credits when sending |
313 | * messages. |
314 | * |
315 | * Conceptually, we have two counters: |
316 | * - send credits: this tells us how many WRs we're allowed |
317 | * to submit without overruning the receiver's queue. For |
318 | * each SEND WR we post, we decrement this by one. |
319 | * |
320 | * - posted credits: this tells us how many WRs we recently |
321 | * posted to the receive queue. This value is transferred |
322 | * to the peer as a "credit update" in a RDS header field. |
323 | * Every time we transmit credits to the peer, we subtract |
324 | * the amount of transferred credits from this counter. |
325 | * |
326 | * It is essential that we avoid situations where both sides have |
327 | * exhausted their send credits, and are unable to send new credits |
328 | * to the peer. We achieve this by requiring that we send at least |
329 | * one credit update to the peer before exhausting our credits. |
330 | * When new credits arrive, we subtract one credit that is withheld |
331 | * until we've posted new buffers and are ready to transmit these |
332 | * credits (see rds_ib_send_add_credits below). |
333 | * |
334 | * The RDS send code is essentially single-threaded; rds_send_xmit |
335 | * sets RDS_IN_XMIT to ensure exclusive access to the send ring. |
336 | * However, the ACK sending code is independent and can race with |
337 | * message SENDs. |
338 | * |
339 | * In the send path, we need to update the counters for send credits |
340 | * and the counter of posted buffers atomically - when we use the |
341 | * last available credit, we cannot allow another thread to race us |
342 | * and grab the posted credits counter. Hence, we have to use a |
343 | * spinlock to protect the credit counter, or use atomics. |
344 | * |
345 | * Spinlocks shared between the send and the receive path are bad, |
346 | * because they create unnecessary delays. An early implementation |
347 | * using a spinlock showed a 5% degradation in throughput at some |
348 | * loads. |
349 | * |
350 | * This implementation avoids spinlocks completely, putting both |
351 | * counters into a single atomic, and updating that atomic using |
352 | * atomic_add (in the receive path, when receiving fresh credits), |
353 | * and using atomic_cmpxchg when updating the two counters. |
354 | */ |
355 | int rds_ib_send_grab_credits(struct rds_ib_connection *ic, |
356 | u32 wanted, u32 *adv_credits, int need_posted, int max_posted) |
357 | { |
358 | unsigned int avail, posted, got = 0, advertise; |
359 | long oldval, newval; |
360 | |
361 | *adv_credits = 0; |
362 | if (!ic->i_flowctl) |
363 | return wanted; |
364 | |
365 | try_again: |
366 | advertise = 0; |
367 | oldval = newval = atomic_read(v: &ic->i_credits); |
368 | posted = IB_GET_POST_CREDITS(oldval); |
369 | avail = IB_GET_SEND_CREDITS(oldval); |
370 | |
371 | rdsdebug("wanted=%u credits=%u posted=%u\n" , |
372 | wanted, avail, posted); |
373 | |
374 | /* The last credit must be used to send a credit update. */ |
375 | if (avail && !posted) |
376 | avail--; |
377 | |
378 | if (avail < wanted) { |
379 | struct rds_connection *conn = ic->i_cm_id->context; |
380 | |
381 | /* Oops, there aren't that many credits left! */ |
382 | set_bit(RDS_LL_SEND_FULL, addr: &conn->c_flags); |
383 | got = avail; |
384 | } else { |
385 | /* Sometimes you get what you want, lalala. */ |
386 | got = wanted; |
387 | } |
388 | newval -= IB_SET_SEND_CREDITS(got); |
389 | |
390 | /* |
391 | * If need_posted is non-zero, then the caller wants |
392 | * the posted regardless of whether any send credits are |
393 | * available. |
394 | */ |
395 | if (posted && (got || need_posted)) { |
396 | advertise = min_t(unsigned int, posted, max_posted); |
397 | newval -= IB_SET_POST_CREDITS(advertise); |
398 | } |
399 | |
400 | /* Finally bill everything */ |
401 | if (atomic_cmpxchg(v: &ic->i_credits, old: oldval, new: newval) != oldval) |
402 | goto try_again; |
403 | |
404 | *adv_credits = advertise; |
405 | return got; |
406 | } |
407 | |
408 | void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) |
409 | { |
410 | struct rds_ib_connection *ic = conn->c_transport_data; |
411 | |
412 | if (credits == 0) |
413 | return; |
414 | |
415 | rdsdebug("credits=%u current=%u%s\n" , |
416 | credits, |
417 | IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), |
418 | test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "" ); |
419 | |
420 | atomic_add(IB_SET_SEND_CREDITS(credits), v: &ic->i_credits); |
421 | if (test_and_clear_bit(RDS_LL_SEND_FULL, addr: &conn->c_flags)) |
422 | queue_delayed_work(wq: rds_wq, dwork: &conn->c_send_w, delay: 0); |
423 | |
424 | WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); |
425 | |
426 | rds_ib_stats_inc(s_ib_rx_credit_updates); |
427 | } |
428 | |
429 | void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) |
430 | { |
431 | struct rds_ib_connection *ic = conn->c_transport_data; |
432 | |
433 | if (posted == 0) |
434 | return; |
435 | |
436 | atomic_add(IB_SET_POST_CREDITS(posted), v: &ic->i_credits); |
437 | |
438 | /* Decide whether to send an update to the peer now. |
439 | * If we would send a credit update for every single buffer we |
440 | * post, we would end up with an ACK storm (ACK arrives, |
441 | * consumes buffer, we refill the ring, send ACK to remote |
442 | * advertising the newly posted buffer... ad inf) |
443 | * |
444 | * Performance pretty much depends on how often we send |
445 | * credit updates - too frequent updates mean lots of ACKs. |
446 | * Too infrequent updates, and the peer will run out of |
447 | * credits and has to throttle. |
448 | * For the time being, 16 seems to be a good compromise. |
449 | */ |
450 | if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) |
451 | set_bit(IB_ACK_REQUESTED, addr: &ic->i_ack_flags); |
452 | } |
453 | |
454 | static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, |
455 | struct rds_ib_send_work *send, |
456 | bool notify) |
457 | { |
458 | /* |
459 | * We want to delay signaling completions just enough to get |
460 | * the batching benefits but not so much that we create dead time |
461 | * on the wire. |
462 | */ |
463 | if (ic->i_unsignaled_wrs-- == 0 || notify) { |
464 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; |
465 | send->s_wr.send_flags |= IB_SEND_SIGNALED; |
466 | return 1; |
467 | } |
468 | return 0; |
469 | } |
470 | |
471 | /* |
472 | * This can be called multiple times for a given message. The first time |
473 | * we see a message we map its scatterlist into the IB device so that |
474 | * we can provide that mapped address to the IB scatter gather entries |
475 | * in the IB work requests. We translate the scatterlist into a series |
476 | * of work requests that fragment the message. These work requests complete |
477 | * in order so we pass ownership of the message to the completion handler |
478 | * once we send the final fragment. |
479 | * |
480 | * The RDS core uses the c_send_lock to only enter this function once |
481 | * per connection. This makes sure that the tx ring alloc/unalloc pairs |
482 | * don't get out of sync and confuse the ring. |
483 | */ |
484 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, |
485 | unsigned int hdr_off, unsigned int sg, unsigned int off) |
486 | { |
487 | struct rds_ib_connection *ic = conn->c_transport_data; |
488 | struct ib_device *dev = ic->i_cm_id->device; |
489 | struct rds_ib_send_work *send = NULL; |
490 | struct rds_ib_send_work *first; |
491 | struct rds_ib_send_work *prev; |
492 | const struct ib_send_wr *failed_wr; |
493 | struct scatterlist *scat; |
494 | u32 pos; |
495 | u32 i; |
496 | u32 work_alloc; |
497 | u32 credit_alloc = 0; |
498 | u32 posted; |
499 | u32 adv_credits = 0; |
500 | int send_flags = 0; |
501 | int bytes_sent = 0; |
502 | int ret; |
503 | int flow_controlled = 0; |
504 | int nr_sig = 0; |
505 | |
506 | BUG_ON(off % RDS_FRAG_SIZE); |
507 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
508 | |
509 | /* Do not send cong updates to IB loopback */ |
510 | if (conn->c_loopback |
511 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
512 | rds_cong_map_updated(map: conn->c_fcong, ~(u64) 0); |
513 | scat = &rm->data.op_sg[sg]; |
514 | ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); |
515 | return sizeof(struct rds_header) + ret; |
516 | } |
517 | |
518 | /* FIXME we may overallocate here */ |
519 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
520 | i = 1; |
521 | else |
522 | i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); |
523 | |
524 | work_alloc = rds_ib_ring_alloc(ring: &ic->i_send_ring, val: i, pos: &pos); |
525 | if (work_alloc == 0) { |
526 | set_bit(RDS_LL_SEND_FULL, addr: &conn->c_flags); |
527 | rds_ib_stats_inc(s_ib_tx_ring_full); |
528 | ret = -ENOMEM; |
529 | goto out; |
530 | } |
531 | |
532 | if (ic->i_flowctl) { |
533 | credit_alloc = rds_ib_send_grab_credits(ic, wanted: work_alloc, adv_credits: &posted, need_posted: 0, RDS_MAX_ADV_CREDIT); |
534 | adv_credits += posted; |
535 | if (credit_alloc < work_alloc) { |
536 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc - credit_alloc); |
537 | work_alloc = credit_alloc; |
538 | flow_controlled = 1; |
539 | } |
540 | if (work_alloc == 0) { |
541 | set_bit(RDS_LL_SEND_FULL, addr: &conn->c_flags); |
542 | rds_ib_stats_inc(s_ib_tx_throttle); |
543 | ret = -ENOMEM; |
544 | goto out; |
545 | } |
546 | } |
547 | |
548 | /* map the message the first time we see it */ |
549 | if (!ic->i_data_op) { |
550 | if (rm->data.op_nents) { |
551 | rm->data.op_count = ib_dma_map_sg(dev, |
552 | sg: rm->data.op_sg, |
553 | nents: rm->data.op_nents, |
554 | direction: DMA_TO_DEVICE); |
555 | rdsdebug("ic %p mapping rm %p: %d\n" , ic, rm, rm->data.op_count); |
556 | if (rm->data.op_count == 0) { |
557 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
558 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc); |
559 | ret = -ENOMEM; /* XXX ? */ |
560 | goto out; |
561 | } |
562 | } else { |
563 | rm->data.op_count = 0; |
564 | } |
565 | |
566 | rds_message_addref(rm); |
567 | rm->data.op_dmasg = 0; |
568 | rm->data.op_dmaoff = 0; |
569 | ic->i_data_op = &rm->data; |
570 | |
571 | /* Finalize the header */ |
572 | if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) |
573 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; |
574 | if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) |
575 | rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; |
576 | |
577 | /* If it has a RDMA op, tell the peer we did it. This is |
578 | * used by the peer to release use-once RDMA MRs. */ |
579 | if (rm->rdma.op_active) { |
580 | struct rds_ext_header_rdma ext_hdr; |
581 | |
582 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); |
583 | rds_message_add_extension(hdr: &rm->m_inc.i_hdr, |
584 | RDS_EXTHDR_RDMA, data: &ext_hdr, len: sizeof(ext_hdr)); |
585 | } |
586 | if (rm->m_rdma_cookie) { |
587 | rds_message_add_rdma_dest_extension(hdr: &rm->m_inc.i_hdr, |
588 | r_key: rds_rdma_cookie_key(cookie: rm->m_rdma_cookie), |
589 | offset: rds_rdma_cookie_offset(cookie: rm->m_rdma_cookie)); |
590 | } |
591 | |
592 | /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so |
593 | * we should not do this unless we have a chance of at least |
594 | * sticking the header into the send ring. Which is why we |
595 | * should call rds_ib_ring_alloc first. */ |
596 | rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); |
597 | rds_message_make_checksum(hdr: &rm->m_inc.i_hdr); |
598 | |
599 | /* |
600 | * Update adv_credits since we reset the ACK_REQUIRED bit. |
601 | */ |
602 | if (ic->i_flowctl) { |
603 | rds_ib_send_grab_credits(ic, wanted: 0, adv_credits: &posted, need_posted: 1, RDS_MAX_ADV_CREDIT - adv_credits); |
604 | adv_credits += posted; |
605 | BUG_ON(adv_credits > 255); |
606 | } |
607 | } |
608 | |
609 | /* Sometimes you want to put a fence between an RDMA |
610 | * READ and the following SEND. |
611 | * We could either do this all the time |
612 | * or when requested by the user. Right now, we let |
613 | * the application choose. |
614 | */ |
615 | if (rm->rdma.op_active && rm->rdma.op_fence) |
616 | send_flags = IB_SEND_FENCE; |
617 | |
618 | /* Each frag gets a header. Msgs may be 0 bytes */ |
619 | send = &ic->i_sends[pos]; |
620 | first = send; |
621 | prev = NULL; |
622 | scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; |
623 | i = 0; |
624 | do { |
625 | unsigned int len = 0; |
626 | |
627 | /* Set up the header */ |
628 | send->s_wr.send_flags = send_flags; |
629 | send->s_wr.opcode = IB_WR_SEND; |
630 | send->s_wr.num_sge = 1; |
631 | send->s_wr.next = NULL; |
632 | send->s_queued = jiffies; |
633 | send->s_op = NULL; |
634 | |
635 | send->s_sge[0].addr = ic->i_send_hdrs_dma[pos]; |
636 | |
637 | send->s_sge[0].length = sizeof(struct rds_header); |
638 | send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; |
639 | |
640 | ib_dma_sync_single_for_cpu(dev: ic->rds_ibdev->dev, |
641 | addr: ic->i_send_hdrs_dma[pos], |
642 | size: sizeof(struct rds_header), |
643 | dir: DMA_TO_DEVICE); |
644 | memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, |
645 | sizeof(struct rds_header)); |
646 | |
647 | |
648 | /* Set up the data, if present */ |
649 | if (i < work_alloc |
650 | && scat != &rm->data.op_sg[rm->data.op_count]) { |
651 | len = min(RDS_FRAG_SIZE, |
652 | sg_dma_len(scat) - rm->data.op_dmaoff); |
653 | send->s_wr.num_sge = 2; |
654 | |
655 | send->s_sge[1].addr = sg_dma_address(scat); |
656 | send->s_sge[1].addr += rm->data.op_dmaoff; |
657 | send->s_sge[1].length = len; |
658 | send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; |
659 | |
660 | bytes_sent += len; |
661 | rm->data.op_dmaoff += len; |
662 | if (rm->data.op_dmaoff == sg_dma_len(scat)) { |
663 | scat++; |
664 | rm->data.op_dmasg++; |
665 | rm->data.op_dmaoff = 0; |
666 | } |
667 | } |
668 | |
669 | rds_ib_set_wr_signal_state(ic, send, notify: false); |
670 | |
671 | /* |
672 | * Always signal the last one if we're stopping due to flow control. |
673 | */ |
674 | if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) { |
675 | rds_ib_set_wr_signal_state(ic, send, notify: true); |
676 | send->s_wr.send_flags |= IB_SEND_SOLICITED; |
677 | } |
678 | |
679 | if (send->s_wr.send_flags & IB_SEND_SIGNALED) |
680 | nr_sig++; |
681 | |
682 | rdsdebug("send %p wr %p num_sge %u next %p\n" , send, |
683 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); |
684 | |
685 | if (ic->i_flowctl && adv_credits) { |
686 | struct rds_header *hdr = ic->i_send_hdrs[pos]; |
687 | |
688 | /* add credit and redo the header checksum */ |
689 | hdr->h_credit = adv_credits; |
690 | rds_message_make_checksum(hdr); |
691 | adv_credits = 0; |
692 | rds_ib_stats_inc(s_ib_tx_credit_updates); |
693 | } |
694 | ib_dma_sync_single_for_device(dev: ic->rds_ibdev->dev, |
695 | addr: ic->i_send_hdrs_dma[pos], |
696 | size: sizeof(struct rds_header), |
697 | dir: DMA_TO_DEVICE); |
698 | |
699 | if (prev) |
700 | prev->s_wr.next = &send->s_wr; |
701 | prev = send; |
702 | |
703 | pos = (pos + 1) % ic->i_send_ring.w_nr; |
704 | send = &ic->i_sends[pos]; |
705 | i++; |
706 | |
707 | } while (i < work_alloc |
708 | && scat != &rm->data.op_sg[rm->data.op_count]); |
709 | |
710 | /* Account the RDS header in the number of bytes we sent, but just once. |
711 | * The caller has no concept of fragmentation. */ |
712 | if (hdr_off == 0) |
713 | bytes_sent += sizeof(struct rds_header); |
714 | |
715 | /* if we finished the message then send completion owns it */ |
716 | if (scat == &rm->data.op_sg[rm->data.op_count]) { |
717 | prev->s_op = ic->i_data_op; |
718 | prev->s_wr.send_flags |= IB_SEND_SOLICITED; |
719 | if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) |
720 | nr_sig += rds_ib_set_wr_signal_state(ic, send: prev, notify: true); |
721 | ic->i_data_op = NULL; |
722 | } |
723 | |
724 | /* Put back wrs & credits we didn't use */ |
725 | if (i < work_alloc) { |
726 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc - i); |
727 | work_alloc = i; |
728 | } |
729 | if (ic->i_flowctl && i < credit_alloc) |
730 | rds_ib_send_add_credits(conn, credits: credit_alloc - i); |
731 | |
732 | if (nr_sig) |
733 | atomic_add(i: nr_sig, v: &ic->i_signaled_sends); |
734 | |
735 | /* XXX need to worry about failed_wr and partial sends. */ |
736 | failed_wr = &first->s_wr; |
737 | ret = ib_post_send(qp: ic->i_cm_id->qp, send_wr: &first->s_wr, bad_send_wr: &failed_wr); |
738 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n" , ic, |
739 | first, &first->s_wr, ret, failed_wr); |
740 | BUG_ON(failed_wr != &first->s_wr); |
741 | if (ret) { |
742 | printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c " |
743 | "returned %d\n" , &conn->c_faddr, ret); |
744 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc); |
745 | rds_ib_sub_signaled(ic, nr: nr_sig); |
746 | if (prev->s_op) { |
747 | ic->i_data_op = prev->s_op; |
748 | prev->s_op = NULL; |
749 | } |
750 | |
751 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n" ); |
752 | goto out; |
753 | } |
754 | |
755 | ret = bytes_sent; |
756 | out: |
757 | BUG_ON(adv_credits); |
758 | return ret; |
759 | } |
760 | |
761 | /* |
762 | * Issue atomic operation. |
763 | * A simplified version of the rdma case, we always map 1 SG, and |
764 | * only 8 bytes, for the return value from the atomic operation. |
765 | */ |
766 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) |
767 | { |
768 | struct rds_ib_connection *ic = conn->c_transport_data; |
769 | struct rds_ib_send_work *send = NULL; |
770 | const struct ib_send_wr *failed_wr; |
771 | u32 pos; |
772 | u32 work_alloc; |
773 | int ret; |
774 | int nr_sig = 0; |
775 | |
776 | work_alloc = rds_ib_ring_alloc(ring: &ic->i_send_ring, val: 1, pos: &pos); |
777 | if (work_alloc != 1) { |
778 | rds_ib_stats_inc(s_ib_tx_ring_full); |
779 | ret = -ENOMEM; |
780 | goto out; |
781 | } |
782 | |
783 | /* address of send request in ring */ |
784 | send = &ic->i_sends[pos]; |
785 | send->s_queued = jiffies; |
786 | |
787 | if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { |
788 | send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; |
789 | send->s_atomic_wr.compare_add = op->op_m_cswp.compare; |
790 | send->s_atomic_wr.swap = op->op_m_cswp.swap; |
791 | send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask; |
792 | send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask; |
793 | } else { /* FADD */ |
794 | send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; |
795 | send->s_atomic_wr.compare_add = op->op_m_fadd.add; |
796 | send->s_atomic_wr.swap = 0; |
797 | send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; |
798 | send->s_atomic_wr.swap_mask = 0; |
799 | } |
800 | send->s_wr.send_flags = 0; |
801 | nr_sig = rds_ib_set_wr_signal_state(ic, send, notify: op->op_notify); |
802 | send->s_atomic_wr.wr.num_sge = 1; |
803 | send->s_atomic_wr.wr.next = NULL; |
804 | send->s_atomic_wr.remote_addr = op->op_remote_addr; |
805 | send->s_atomic_wr.rkey = op->op_rkey; |
806 | send->s_op = op; |
807 | rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); |
808 | |
809 | /* map 8 byte retval buffer to the device */ |
810 | ret = ib_dma_map_sg(dev: ic->i_cm_id->device, sg: op->op_sg, nents: 1, direction: DMA_FROM_DEVICE); |
811 | rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n" , ic, op, ret); |
812 | if (ret != 1) { |
813 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc); |
814 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
815 | ret = -ENOMEM; /* XXX ? */ |
816 | goto out; |
817 | } |
818 | |
819 | /* Convert our struct scatterlist to struct ib_sge */ |
820 | send->s_sge[0].addr = sg_dma_address(op->op_sg); |
821 | send->s_sge[0].length = sg_dma_len(op->op_sg); |
822 | send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; |
823 | |
824 | rdsdebug("rva %Lx rpa %Lx len %u\n" , op->op_remote_addr, |
825 | send->s_sge[0].addr, send->s_sge[0].length); |
826 | |
827 | if (nr_sig) |
828 | atomic_add(i: nr_sig, v: &ic->i_signaled_sends); |
829 | |
830 | failed_wr = &send->s_atomic_wr.wr; |
831 | ret = ib_post_send(qp: ic->i_cm_id->qp, send_wr: &send->s_atomic_wr.wr, bad_send_wr: &failed_wr); |
832 | rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n" , ic, |
833 | send, &send->s_atomic_wr, ret, failed_wr); |
834 | BUG_ON(failed_wr != &send->s_atomic_wr.wr); |
835 | if (ret) { |
836 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c " |
837 | "returned %d\n" , &conn->c_faddr, ret); |
838 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc); |
839 | rds_ib_sub_signaled(ic, nr: nr_sig); |
840 | goto out; |
841 | } |
842 | |
843 | if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { |
844 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n" , ret); |
845 | BUG_ON(failed_wr != &send->s_atomic_wr.wr); |
846 | } |
847 | |
848 | out: |
849 | return ret; |
850 | } |
851 | |
852 | int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) |
853 | { |
854 | struct rds_ib_connection *ic = conn->c_transport_data; |
855 | struct rds_ib_send_work *send = NULL; |
856 | struct rds_ib_send_work *first; |
857 | struct rds_ib_send_work *prev; |
858 | const struct ib_send_wr *failed_wr; |
859 | struct scatterlist *scat; |
860 | unsigned long len; |
861 | u64 remote_addr = op->op_remote_addr; |
862 | u32 max_sge = ic->rds_ibdev->max_sge; |
863 | u32 pos; |
864 | u32 work_alloc; |
865 | u32 i; |
866 | u32 j; |
867 | int sent; |
868 | int ret; |
869 | int num_sge; |
870 | int nr_sig = 0; |
871 | u64 odp_addr = op->op_odp_addr; |
872 | u32 odp_lkey = 0; |
873 | |
874 | /* map the op the first time we see it */ |
875 | if (!op->op_odp_mr) { |
876 | if (!op->op_mapped) { |
877 | op->op_count = |
878 | ib_dma_map_sg(dev: ic->i_cm_id->device, sg: op->op_sg, |
879 | nents: op->op_nents, |
880 | direction: (op->op_write) ? DMA_TO_DEVICE : |
881 | DMA_FROM_DEVICE); |
882 | rdsdebug("ic %p mapping op %p: %d\n" , ic, op, |
883 | op->op_count); |
884 | if (op->op_count == 0) { |
885 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
886 | ret = -ENOMEM; /* XXX ? */ |
887 | goto out; |
888 | } |
889 | op->op_mapped = 1; |
890 | } |
891 | } else { |
892 | op->op_count = op->op_nents; |
893 | odp_lkey = rds_ib_get_lkey(trans_private: op->op_odp_mr->r_trans_private); |
894 | } |
895 | |
896 | /* |
897 | * Instead of knowing how to return a partial rdma read/write we insist that there |
898 | * be enough work requests to send the entire message. |
899 | */ |
900 | i = DIV_ROUND_UP(op->op_count, max_sge); |
901 | |
902 | work_alloc = rds_ib_ring_alloc(ring: &ic->i_send_ring, val: i, pos: &pos); |
903 | if (work_alloc != i) { |
904 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc); |
905 | rds_ib_stats_inc(s_ib_tx_ring_full); |
906 | ret = -ENOMEM; |
907 | goto out; |
908 | } |
909 | |
910 | send = &ic->i_sends[pos]; |
911 | first = send; |
912 | prev = NULL; |
913 | scat = &op->op_sg[0]; |
914 | sent = 0; |
915 | num_sge = op->op_count; |
916 | |
917 | for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { |
918 | send->s_wr.send_flags = 0; |
919 | send->s_queued = jiffies; |
920 | send->s_op = NULL; |
921 | |
922 | if (!op->op_notify) |
923 | nr_sig += rds_ib_set_wr_signal_state(ic, send, |
924 | notify: op->op_notify); |
925 | |
926 | send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; |
927 | send->s_rdma_wr.remote_addr = remote_addr; |
928 | send->s_rdma_wr.rkey = op->op_rkey; |
929 | |
930 | if (num_sge > max_sge) { |
931 | send->s_rdma_wr.wr.num_sge = max_sge; |
932 | num_sge -= max_sge; |
933 | } else { |
934 | send->s_rdma_wr.wr.num_sge = num_sge; |
935 | } |
936 | |
937 | send->s_rdma_wr.wr.next = NULL; |
938 | |
939 | if (prev) |
940 | prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr; |
941 | |
942 | for (j = 0; j < send->s_rdma_wr.wr.num_sge && |
943 | scat != &op->op_sg[op->op_count]; j++) { |
944 | len = sg_dma_len(scat); |
945 | if (!op->op_odp_mr) { |
946 | send->s_sge[j].addr = sg_dma_address(scat); |
947 | send->s_sge[j].lkey = ic->i_pd->local_dma_lkey; |
948 | } else { |
949 | send->s_sge[j].addr = odp_addr; |
950 | send->s_sge[j].lkey = odp_lkey; |
951 | } |
952 | send->s_sge[j].length = len; |
953 | |
954 | sent += len; |
955 | rdsdebug("ic %p sent %d remote_addr %llu\n" , ic, sent, remote_addr); |
956 | |
957 | remote_addr += len; |
958 | odp_addr += len; |
959 | scat++; |
960 | } |
961 | |
962 | rdsdebug("send %p wr %p num_sge %u next %p\n" , send, |
963 | &send->s_rdma_wr.wr, |
964 | send->s_rdma_wr.wr.num_sge, |
965 | send->s_rdma_wr.wr.next); |
966 | |
967 | prev = send; |
968 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) |
969 | send = ic->i_sends; |
970 | } |
971 | |
972 | /* give a reference to the last op */ |
973 | if (scat == &op->op_sg[op->op_count]) { |
974 | prev->s_op = op; |
975 | rds_message_addref(container_of(op, struct rds_message, rdma)); |
976 | } |
977 | |
978 | if (i < work_alloc) { |
979 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc - i); |
980 | work_alloc = i; |
981 | } |
982 | |
983 | if (nr_sig) |
984 | atomic_add(i: nr_sig, v: &ic->i_signaled_sends); |
985 | |
986 | failed_wr = &first->s_rdma_wr.wr; |
987 | ret = ib_post_send(qp: ic->i_cm_id->qp, send_wr: &first->s_rdma_wr.wr, bad_send_wr: &failed_wr); |
988 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n" , ic, |
989 | first, &first->s_rdma_wr.wr, ret, failed_wr); |
990 | BUG_ON(failed_wr != &first->s_rdma_wr.wr); |
991 | if (ret) { |
992 | printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c " |
993 | "returned %d\n" , &conn->c_faddr, ret); |
994 | rds_ib_ring_unalloc(ring: &ic->i_send_ring, val: work_alloc); |
995 | rds_ib_sub_signaled(ic, nr: nr_sig); |
996 | goto out; |
997 | } |
998 | |
999 | if (unlikely(failed_wr != &first->s_rdma_wr.wr)) { |
1000 | printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n" , ret); |
1001 | BUG_ON(failed_wr != &first->s_rdma_wr.wr); |
1002 | } |
1003 | |
1004 | |
1005 | out: |
1006 | return ret; |
1007 | } |
1008 | |
1009 | void rds_ib_xmit_path_complete(struct rds_conn_path *cp) |
1010 | { |
1011 | struct rds_connection *conn = cp->cp_conn; |
1012 | struct rds_ib_connection *ic = conn->c_transport_data; |
1013 | |
1014 | /* We may have a pending ACK or window update we were unable |
1015 | * to send previously (due to flow control). Try again. */ |
1016 | rds_ib_attempt_ack(ic); |
1017 | } |
1018 | |