1 | /* |
2 | * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | * |
32 | */ |
33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> |
35 | #include <net/sock.h> |
36 | #include <linux/in.h> |
37 | #include <linux/export.h> |
38 | #include <linux/sched/clock.h> |
39 | #include <linux/time.h> |
40 | #include <linux/rds.h> |
41 | |
42 | #include "rds.h" |
43 | |
44 | void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, |
45 | struct in6_addr *saddr) |
46 | { |
47 | refcount_set(r: &inc->i_refcount, n: 1); |
48 | INIT_LIST_HEAD(list: &inc->i_item); |
49 | inc->i_conn = conn; |
50 | inc->i_saddr = *saddr; |
51 | inc->i_usercopy.rdma_cookie = 0; |
52 | inc->i_usercopy.rx_tstamp = ktime_set(secs: 0, nsecs: 0); |
53 | |
54 | memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace)); |
55 | } |
56 | EXPORT_SYMBOL_GPL(rds_inc_init); |
57 | |
58 | void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp, |
59 | struct in6_addr *saddr) |
60 | { |
61 | refcount_set(r: &inc->i_refcount, n: 1); |
62 | INIT_LIST_HEAD(list: &inc->i_item); |
63 | inc->i_conn = cp->cp_conn; |
64 | inc->i_conn_path = cp; |
65 | inc->i_saddr = *saddr; |
66 | inc->i_usercopy.rdma_cookie = 0; |
67 | inc->i_usercopy.rx_tstamp = ktime_set(secs: 0, nsecs: 0); |
68 | } |
69 | EXPORT_SYMBOL_GPL(rds_inc_path_init); |
70 | |
71 | static void rds_inc_addref(struct rds_incoming *inc) |
72 | { |
73 | rdsdebug("addref inc %p ref %d\n" , inc, refcount_read(&inc->i_refcount)); |
74 | refcount_inc(r: &inc->i_refcount); |
75 | } |
76 | |
77 | void rds_inc_put(struct rds_incoming *inc) |
78 | { |
79 | rdsdebug("put inc %p ref %d\n" , inc, refcount_read(&inc->i_refcount)); |
80 | if (refcount_dec_and_test(r: &inc->i_refcount)) { |
81 | BUG_ON(!list_empty(&inc->i_item)); |
82 | |
83 | inc->i_conn->c_trans->inc_free(inc); |
84 | } |
85 | } |
86 | EXPORT_SYMBOL_GPL(rds_inc_put); |
87 | |
88 | static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, |
89 | struct rds_cong_map *map, |
90 | int delta, __be16 port) |
91 | { |
92 | int now_congested; |
93 | |
94 | if (delta == 0) |
95 | return; |
96 | |
97 | rs->rs_rcv_bytes += delta; |
98 | if (delta > 0) |
99 | rds_stats_add(s_recv_bytes_added_to_socket, delta); |
100 | else |
101 | rds_stats_add(s_recv_bytes_removed_from_socket, -delta); |
102 | |
103 | /* loop transport doesn't send/recv congestion updates */ |
104 | if (rs->rs_transport->t_type == RDS_TRANS_LOOP) |
105 | return; |
106 | |
107 | now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); |
108 | |
109 | rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d " |
110 | "now_cong %d delta %d\n" , |
111 | rs, &rs->rs_bound_addr, |
112 | ntohs(rs->rs_bound_port), rs->rs_rcv_bytes, |
113 | rds_sk_rcvbuf(rs), now_congested, delta); |
114 | |
115 | /* wasn't -> am congested */ |
116 | if (!rs->rs_congested && now_congested) { |
117 | rs->rs_congested = 1; |
118 | rds_cong_set_bit(map, port); |
119 | rds_cong_queue_updates(map); |
120 | } |
121 | /* was -> aren't congested */ |
122 | /* Require more free space before reporting uncongested to prevent |
123 | bouncing cong/uncong state too often */ |
124 | else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) { |
125 | rs->rs_congested = 0; |
126 | rds_cong_clear_bit(map, port); |
127 | rds_cong_queue_updates(map); |
128 | } |
129 | |
130 | /* do nothing if no change in cong state */ |
131 | } |
132 | |
133 | static void rds_conn_peer_gen_update(struct rds_connection *conn, |
134 | u32 peer_gen_num) |
135 | { |
136 | int i; |
137 | struct rds_message *rm, *tmp; |
138 | unsigned long flags; |
139 | |
140 | WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP); |
141 | if (peer_gen_num != 0) { |
142 | if (conn->c_peer_gen_num != 0 && |
143 | peer_gen_num != conn->c_peer_gen_num) { |
144 | for (i = 0; i < RDS_MPATH_WORKERS; i++) { |
145 | struct rds_conn_path *cp; |
146 | |
147 | cp = &conn->c_path[i]; |
148 | spin_lock_irqsave(&cp->cp_lock, flags); |
149 | cp->cp_next_tx_seq = 1; |
150 | cp->cp_next_rx_seq = 0; |
151 | list_for_each_entry_safe(rm, tmp, |
152 | &cp->cp_retrans, |
153 | m_conn_item) { |
154 | set_bit(RDS_MSG_FLUSH, addr: &rm->m_flags); |
155 | } |
156 | spin_unlock_irqrestore(lock: &cp->cp_lock, flags); |
157 | } |
158 | } |
159 | conn->c_peer_gen_num = peer_gen_num; |
160 | } |
161 | } |
162 | |
163 | /* |
164 | * Process all extension headers that come with this message. |
165 | */ |
166 | static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs) |
167 | { |
168 | struct rds_header *hdr = &inc->i_hdr; |
169 | unsigned int pos = 0, type, len; |
170 | union { |
171 | struct rds_ext_header_version version; |
172 | struct rds_ext_header_rdma rdma; |
173 | struct rds_ext_header_rdma_dest rdma_dest; |
174 | } buffer; |
175 | |
176 | while (1) { |
177 | len = sizeof(buffer); |
178 | type = rds_message_next_extension(hdr, pos: &pos, buf: &buffer, buflen: &len); |
179 | if (type == RDS_EXTHDR_NONE) |
180 | break; |
181 | /* Process extension header here */ |
182 | switch (type) { |
183 | case RDS_EXTHDR_RDMA: |
184 | rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), force: 0); |
185 | break; |
186 | |
187 | case RDS_EXTHDR_RDMA_DEST: |
188 | /* We ignore the size for now. We could stash it |
189 | * somewhere and use it for error checking. */ |
190 | inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie( |
191 | be32_to_cpu(buffer.rdma_dest.h_rdma_rkey), |
192 | be32_to_cpu(buffer.rdma_dest.h_rdma_offset)); |
193 | |
194 | break; |
195 | } |
196 | } |
197 | } |
198 | |
199 | static void rds_recv_hs_exthdrs(struct rds_header *hdr, |
200 | struct rds_connection *conn) |
201 | { |
202 | unsigned int pos = 0, type, len; |
203 | union { |
204 | struct rds_ext_header_version version; |
205 | u16 rds_npaths; |
206 | u32 rds_gen_num; |
207 | } buffer; |
208 | u32 new_peer_gen_num = 0; |
209 | |
210 | while (1) { |
211 | len = sizeof(buffer); |
212 | type = rds_message_next_extension(hdr, pos: &pos, buf: &buffer, buflen: &len); |
213 | if (type == RDS_EXTHDR_NONE) |
214 | break; |
215 | /* Process extension header here */ |
216 | switch (type) { |
217 | case RDS_EXTHDR_NPATHS: |
218 | conn->c_npaths = min_t(int, RDS_MPATH_WORKERS, |
219 | be16_to_cpu(buffer.rds_npaths)); |
220 | break; |
221 | case RDS_EXTHDR_GEN_NUM: |
222 | new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num); |
223 | break; |
224 | default: |
225 | pr_warn_ratelimited("ignoring unknown exthdr type " |
226 | "0x%x\n" , type); |
227 | } |
228 | } |
229 | /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */ |
230 | conn->c_npaths = max_t(int, conn->c_npaths, 1); |
231 | conn->c_ping_triggered = 0; |
232 | rds_conn_peer_gen_update(conn, peer_gen_num: new_peer_gen_num); |
233 | } |
234 | |
235 | /* rds_start_mprds() will synchronously start multiple paths when appropriate. |
236 | * The scheme is based on the following rules: |
237 | * |
238 | * 1. rds_sendmsg on first connect attempt sends the probe ping, with the |
239 | * sender's npaths (s_npaths) |
240 | * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It |
241 | * sends back a probe-pong with r_npaths. After that, if rcvr is the |
242 | * smaller ip addr, it starts rds_conn_path_connect_if_down on all |
243 | * mprds_paths. |
244 | * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down. |
245 | * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be |
246 | * called after reception of the probe-pong on all mprds_paths. |
247 | * Otherwise (sender of probe-ping is not the smaller ip addr): just call |
248 | * rds_conn_path_connect_if_down on the hashed path. (see rule 4) |
249 | * 4. rds_connect_worker must only trigger a connection if laddr < faddr. |
250 | * 5. sender may end up queuing the packet on the cp. will get sent out later. |
251 | * when connection is completed. |
252 | */ |
253 | static void rds_start_mprds(struct rds_connection *conn) |
254 | { |
255 | int i; |
256 | struct rds_conn_path *cp; |
257 | |
258 | if (conn->c_npaths > 1 && |
259 | rds_addr_cmp(a1: &conn->c_laddr, a2: &conn->c_faddr) < 0) { |
260 | for (i = 0; i < conn->c_npaths; i++) { |
261 | cp = &conn->c_path[i]; |
262 | rds_conn_path_connect_if_down(cp); |
263 | } |
264 | } |
265 | } |
266 | |
267 | /* |
268 | * The transport must make sure that this is serialized against other |
269 | * rx and conn reset on this specific conn. |
270 | * |
271 | * We currently assert that only one fragmented message will be sent |
272 | * down a connection at a time. This lets us reassemble in the conn |
273 | * instead of per-flow which means that we don't have to go digging through |
274 | * flows to tear down partial reassembly progress on conn failure and |
275 | * we save flow lookup and locking for each frag arrival. It does mean |
276 | * that small messages will wait behind large ones. Fragmenting at all |
277 | * is only to reduce the memory consumption of pre-posted buffers. |
278 | * |
279 | * The caller passes in saddr and daddr instead of us getting it from the |
280 | * conn. This lets loopback, who only has one conn for both directions, |
281 | * tell us which roles the addrs in the conn are playing for this message. |
282 | */ |
283 | void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, |
284 | struct in6_addr *daddr, |
285 | struct rds_incoming *inc, gfp_t gfp) |
286 | { |
287 | struct rds_sock *rs = NULL; |
288 | struct sock *sk; |
289 | unsigned long flags; |
290 | struct rds_conn_path *cp; |
291 | |
292 | inc->i_conn = conn; |
293 | inc->i_rx_jiffies = jiffies; |
294 | if (conn->c_trans->t_mp_capable) |
295 | cp = inc->i_conn_path; |
296 | else |
297 | cp = &conn->c_path[0]; |
298 | |
299 | rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u " |
300 | "flags 0x%x rx_jiffies %lu\n" , conn, |
301 | (unsigned long long)cp->cp_next_rx_seq, |
302 | inc, |
303 | (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence), |
304 | be32_to_cpu(inc->i_hdr.h_len), |
305 | be16_to_cpu(inc->i_hdr.h_sport), |
306 | be16_to_cpu(inc->i_hdr.h_dport), |
307 | inc->i_hdr.h_flags, |
308 | inc->i_rx_jiffies); |
309 | |
310 | /* |
311 | * Sequence numbers should only increase. Messages get their |
312 | * sequence number as they're queued in a sending conn. They |
313 | * can be dropped, though, if the sending socket is closed before |
314 | * they hit the wire. So sequence numbers can skip forward |
315 | * under normal operation. They can also drop back in the conn |
316 | * failover case as previously sent messages are resent down the |
317 | * new instance of a conn. We drop those, otherwise we have |
318 | * to assume that the next valid seq does not come after a |
319 | * hole in the fragment stream. |
320 | * |
321 | * The headers don't give us a way to realize if fragments of |
322 | * a message have been dropped. We assume that frags that arrive |
323 | * to a flow are part of the current message on the flow that is |
324 | * being reassembled. This means that senders can't drop messages |
325 | * from the sending conn until all their frags are sent. |
326 | * |
327 | * XXX we could spend more on the wire to get more robust failure |
328 | * detection, arguably worth it to avoid data corruption. |
329 | */ |
330 | if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq && |
331 | (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { |
332 | rds_stats_inc(s_recv_drop_old_seq); |
333 | goto out; |
334 | } |
335 | cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1; |
336 | |
337 | if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) { |
338 | if (inc->i_hdr.h_sport == 0) { |
339 | rdsdebug("ignore ping with 0 sport from %pI6c\n" , |
340 | saddr); |
341 | goto out; |
342 | } |
343 | rds_stats_inc(s_recv_ping); |
344 | rds_send_pong(cp, dport: inc->i_hdr.h_sport); |
345 | /* if this is a handshake ping, start multipath if necessary */ |
346 | if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport), |
347 | be16_to_cpu(inc->i_hdr.h_dport))) { |
348 | rds_recv_hs_exthdrs(hdr: &inc->i_hdr, conn: cp->cp_conn); |
349 | rds_start_mprds(conn: cp->cp_conn); |
350 | } |
351 | goto out; |
352 | } |
353 | |
354 | if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT && |
355 | inc->i_hdr.h_sport == 0) { |
356 | rds_recv_hs_exthdrs(hdr: &inc->i_hdr, conn: cp->cp_conn); |
357 | /* if this is a handshake pong, start multipath if necessary */ |
358 | rds_start_mprds(conn: cp->cp_conn); |
359 | wake_up(&cp->cp_conn->c_hs_waitq); |
360 | goto out; |
361 | } |
362 | |
363 | rs = rds_find_bound(addr: daddr, port: inc->i_hdr.h_dport, scope_id: conn->c_bound_if); |
364 | if (!rs) { |
365 | rds_stats_inc(s_recv_drop_no_sock); |
366 | goto out; |
367 | } |
368 | |
369 | /* Process extension headers */ |
370 | rds_recv_incoming_exthdrs(inc, rs); |
371 | |
372 | /* We can be racing with rds_release() which marks the socket dead. */ |
373 | sk = rds_rs_to_sk(rs); |
374 | |
375 | /* serialize with rds_release -> sock_orphan */ |
376 | write_lock_irqsave(&rs->rs_recv_lock, flags); |
377 | if (!sock_flag(sk, flag: SOCK_DEAD)) { |
378 | rdsdebug("adding inc %p to rs %p's recv queue\n" , inc, rs); |
379 | rds_stats_inc(s_recv_queued); |
380 | rds_recv_rcvbuf_delta(rs, sk, map: inc->i_conn->c_lcong, |
381 | be32_to_cpu(inc->i_hdr.h_len), |
382 | port: inc->i_hdr.h_dport); |
383 | if (sock_flag(sk, flag: SOCK_RCVTSTAMP)) |
384 | inc->i_usercopy.rx_tstamp = ktime_get_real(); |
385 | rds_inc_addref(inc); |
386 | inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock(); |
387 | list_add_tail(new: &inc->i_item, head: &rs->rs_recv_queue); |
388 | __rds_wake_sk_sleep(sk); |
389 | } else { |
390 | rds_stats_inc(s_recv_drop_dead_sock); |
391 | } |
392 | write_unlock_irqrestore(&rs->rs_recv_lock, flags); |
393 | |
394 | out: |
395 | if (rs) |
396 | rds_sock_put(rs); |
397 | } |
398 | EXPORT_SYMBOL_GPL(rds_recv_incoming); |
399 | |
400 | /* |
401 | * be very careful here. This is being called as the condition in |
402 | * wait_event_*() needs to cope with being called many times. |
403 | */ |
404 | static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc) |
405 | { |
406 | unsigned long flags; |
407 | |
408 | if (!*inc) { |
409 | read_lock_irqsave(&rs->rs_recv_lock, flags); |
410 | if (!list_empty(head: &rs->rs_recv_queue)) { |
411 | *inc = list_entry(rs->rs_recv_queue.next, |
412 | struct rds_incoming, |
413 | i_item); |
414 | rds_inc_addref(inc: *inc); |
415 | } |
416 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); |
417 | } |
418 | |
419 | return *inc != NULL; |
420 | } |
421 | |
422 | static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, |
423 | int drop) |
424 | { |
425 | struct sock *sk = rds_rs_to_sk(rs); |
426 | int ret = 0; |
427 | unsigned long flags; |
428 | struct rds_incoming *to_drop = NULL; |
429 | |
430 | write_lock_irqsave(&rs->rs_recv_lock, flags); |
431 | if (!list_empty(head: &inc->i_item)) { |
432 | ret = 1; |
433 | if (drop) { |
434 | /* XXX make sure this i_conn is reliable */ |
435 | rds_recv_rcvbuf_delta(rs, sk, map: inc->i_conn->c_lcong, |
436 | delta: -be32_to_cpu(inc->i_hdr.h_len), |
437 | port: inc->i_hdr.h_dport); |
438 | list_del_init(entry: &inc->i_item); |
439 | to_drop = inc; |
440 | } |
441 | } |
442 | write_unlock_irqrestore(&rs->rs_recv_lock, flags); |
443 | |
444 | if (to_drop) |
445 | rds_inc_put(to_drop); |
446 | |
447 | rdsdebug("inc %p rs %p still %d dropped %d\n" , inc, rs, ret, drop); |
448 | return ret; |
449 | } |
450 | |
451 | /* |
452 | * Pull errors off the error queue. |
453 | * If msghdr is NULL, we will just purge the error queue. |
454 | */ |
455 | int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) |
456 | { |
457 | struct rds_notifier *notifier; |
458 | struct rds_rdma_notify cmsg; |
459 | unsigned int count = 0, max_messages = ~0U; |
460 | unsigned long flags; |
461 | LIST_HEAD(copy); |
462 | int err = 0; |
463 | |
464 | memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ |
465 | |
466 | /* put_cmsg copies to user space and thus may sleep. We can't do this |
467 | * with rs_lock held, so first grab as many notifications as we can stuff |
468 | * in the user provided cmsg buffer. We don't try to copy more, to avoid |
469 | * losing notifications - except when the buffer is so small that it wouldn't |
470 | * even hold a single notification. Then we give him as much of this single |
471 | * msg as we can squeeze in, and set MSG_CTRUNC. |
472 | */ |
473 | if (msghdr) { |
474 | max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg)); |
475 | if (!max_messages) |
476 | max_messages = 1; |
477 | } |
478 | |
479 | spin_lock_irqsave(&rs->rs_lock, flags); |
480 | while (!list_empty(head: &rs->rs_notify_queue) && count < max_messages) { |
481 | notifier = list_entry(rs->rs_notify_queue.next, |
482 | struct rds_notifier, n_list); |
483 | list_move(list: ¬ifier->n_list, head: ©); |
484 | count++; |
485 | } |
486 | spin_unlock_irqrestore(lock: &rs->rs_lock, flags); |
487 | |
488 | if (!count) |
489 | return 0; |
490 | |
491 | while (!list_empty(head: ©)) { |
492 | notifier = list_entry(copy.next, struct rds_notifier, n_list); |
493 | |
494 | if (msghdr) { |
495 | cmsg.user_token = notifier->n_user_token; |
496 | cmsg.status = notifier->n_status; |
497 | |
498 | err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS, |
499 | len: sizeof(cmsg), data: &cmsg); |
500 | if (err) |
501 | break; |
502 | } |
503 | |
504 | list_del_init(entry: ¬ifier->n_list); |
505 | kfree(objp: notifier); |
506 | } |
507 | |
508 | /* If we bailed out because of an error in put_cmsg, |
509 | * we may be left with one or more notifications that we |
510 | * didn't process. Return them to the head of the list. */ |
511 | if (!list_empty(head: ©)) { |
512 | spin_lock_irqsave(&rs->rs_lock, flags); |
513 | list_splice(list: ©, head: &rs->rs_notify_queue); |
514 | spin_unlock_irqrestore(lock: &rs->rs_lock, flags); |
515 | } |
516 | |
517 | return err; |
518 | } |
519 | |
520 | /* |
521 | * Queue a congestion notification |
522 | */ |
523 | static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr) |
524 | { |
525 | uint64_t notify = rs->rs_cong_notify; |
526 | unsigned long flags; |
527 | int err; |
528 | |
529 | err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE, |
530 | len: sizeof(notify), data: ¬ify); |
531 | if (err) |
532 | return err; |
533 | |
534 | spin_lock_irqsave(&rs->rs_lock, flags); |
535 | rs->rs_cong_notify &= ~notify; |
536 | spin_unlock_irqrestore(lock: &rs->rs_lock, flags); |
537 | |
538 | return 0; |
539 | } |
540 | |
541 | /* |
542 | * Receive any control messages. |
543 | */ |
544 | static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, |
545 | struct rds_sock *rs) |
546 | { |
547 | int ret = 0; |
548 | |
549 | if (inc->i_usercopy.rdma_cookie) { |
550 | ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST, |
551 | len: sizeof(inc->i_usercopy.rdma_cookie), |
552 | data: &inc->i_usercopy.rdma_cookie); |
553 | if (ret) |
554 | goto out; |
555 | } |
556 | |
557 | if ((inc->i_usercopy.rx_tstamp != 0) && |
558 | sock_flag(sk: rds_rs_to_sk(rs), flag: SOCK_RCVTSTAMP)) { |
559 | struct __kernel_old_timeval tv = |
560 | ns_to_kernel_old_timeval(nsec: inc->i_usercopy.rx_tstamp); |
561 | |
562 | if (!sock_flag(sk: rds_rs_to_sk(rs), flag: SOCK_TSTAMP_NEW)) { |
563 | ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, |
564 | len: sizeof(tv), data: &tv); |
565 | } else { |
566 | struct __kernel_sock_timeval sk_tv; |
567 | |
568 | sk_tv.tv_sec = tv.tv_sec; |
569 | sk_tv.tv_usec = tv.tv_usec; |
570 | |
571 | ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, |
572 | len: sizeof(sk_tv), data: &sk_tv); |
573 | } |
574 | |
575 | if (ret) |
576 | goto out; |
577 | } |
578 | |
579 | if (rs->rs_rx_traces) { |
580 | struct rds_cmsg_rx_trace t; |
581 | int i, j; |
582 | |
583 | memset(&t, 0, sizeof(t)); |
584 | inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); |
585 | t.rx_traces = rs->rs_rx_traces; |
586 | for (i = 0; i < rs->rs_rx_traces; i++) { |
587 | j = rs->rs_rx_trace[i]; |
588 | t.rx_trace_pos[i] = j; |
589 | t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] - |
590 | inc->i_rx_lat_trace[j]; |
591 | } |
592 | |
593 | ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY, |
594 | len: sizeof(t), data: &t); |
595 | if (ret) |
596 | goto out; |
597 | } |
598 | |
599 | out: |
600 | return ret; |
601 | } |
602 | |
603 | static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg) |
604 | { |
605 | struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue; |
606 | struct rds_msg_zcopy_info *info = NULL; |
607 | struct rds_zcopy_cookies *done; |
608 | unsigned long flags; |
609 | |
610 | if (!msg->msg_control) |
611 | return false; |
612 | |
613 | if (!sock_flag(sk: rds_rs_to_sk(rs), flag: SOCK_ZEROCOPY) || |
614 | msg->msg_controllen < CMSG_SPACE(sizeof(*done))) |
615 | return false; |
616 | |
617 | spin_lock_irqsave(&q->lock, flags); |
618 | if (!list_empty(head: &q->zcookie_head)) { |
619 | info = list_entry(q->zcookie_head.next, |
620 | struct rds_msg_zcopy_info, rs_zcookie_next); |
621 | list_del(entry: &info->rs_zcookie_next); |
622 | } |
623 | spin_unlock_irqrestore(lock: &q->lock, flags); |
624 | if (!info) |
625 | return false; |
626 | done = &info->zcookies; |
627 | if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, len: sizeof(*done), |
628 | data: done)) { |
629 | spin_lock_irqsave(&q->lock, flags); |
630 | list_add(new: &info->rs_zcookie_next, head: &q->zcookie_head); |
631 | spin_unlock_irqrestore(lock: &q->lock, flags); |
632 | return false; |
633 | } |
634 | kfree(objp: info); |
635 | return true; |
636 | } |
637 | |
638 | int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, |
639 | int msg_flags) |
640 | { |
641 | struct sock *sk = sock->sk; |
642 | struct rds_sock *rs = rds_sk_to_rs(sk); |
643 | long timeo; |
644 | int ret = 0, nonblock = msg_flags & MSG_DONTWAIT; |
645 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
646 | DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); |
647 | struct rds_incoming *inc = NULL; |
648 | |
649 | /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */ |
650 | timeo = sock_rcvtimeo(sk, noblock: nonblock); |
651 | |
652 | rdsdebug("size %zu flags 0x%x timeo %ld\n" , size, msg_flags, timeo); |
653 | |
654 | if (msg_flags & MSG_OOB) |
655 | goto out; |
656 | if (msg_flags & MSG_ERRQUEUE) |
657 | return sock_recv_errqueue(sk, msg, len: size, SOL_IP, IP_RECVERR); |
658 | |
659 | while (1) { |
660 | /* If there are pending notifications, do those - and nothing else */ |
661 | if (!list_empty(head: &rs->rs_notify_queue)) { |
662 | ret = rds_notify_queue_get(rs, msghdr: msg); |
663 | break; |
664 | } |
665 | |
666 | if (rs->rs_cong_notify) { |
667 | ret = rds_notify_cong(rs, msghdr: msg); |
668 | break; |
669 | } |
670 | |
671 | if (!rds_next_incoming(rs, inc: &inc)) { |
672 | if (nonblock) { |
673 | bool reaped = rds_recvmsg_zcookie(rs, msg); |
674 | |
675 | ret = reaped ? 0 : -EAGAIN; |
676 | break; |
677 | } |
678 | |
679 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
680 | (!list_empty(&rs->rs_notify_queue) || |
681 | rs->rs_cong_notify || |
682 | rds_next_incoming(rs, &inc)), timeo); |
683 | rdsdebug("recvmsg woke inc %p timeo %ld\n" , inc, |
684 | timeo); |
685 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) |
686 | continue; |
687 | |
688 | ret = timeo; |
689 | if (ret == 0) |
690 | ret = -ETIMEDOUT; |
691 | break; |
692 | } |
693 | |
694 | rdsdebug("copying inc %p from %pI6c:%u to user\n" , inc, |
695 | &inc->i_conn->c_faddr, |
696 | ntohs(inc->i_hdr.h_sport)); |
697 | ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter); |
698 | if (ret < 0) |
699 | break; |
700 | |
701 | /* |
702 | * if the message we just copied isn't at the head of the |
703 | * recv queue then someone else raced us to return it, try |
704 | * to get the next message. |
705 | */ |
706 | if (!rds_still_queued(rs, inc, drop: !(msg_flags & MSG_PEEK))) { |
707 | rds_inc_put(inc); |
708 | inc = NULL; |
709 | rds_stats_inc(s_recv_deliver_raced); |
710 | iov_iter_revert(i: &msg->msg_iter, bytes: ret); |
711 | continue; |
712 | } |
713 | |
714 | if (ret < be32_to_cpu(inc->i_hdr.h_len)) { |
715 | if (msg_flags & MSG_TRUNC) |
716 | ret = be32_to_cpu(inc->i_hdr.h_len); |
717 | msg->msg_flags |= MSG_TRUNC; |
718 | } |
719 | |
720 | if (rds_cmsg_recv(inc, msg, rs)) { |
721 | ret = -EFAULT; |
722 | break; |
723 | } |
724 | rds_recvmsg_zcookie(rs, msg); |
725 | |
726 | rds_stats_inc(s_recv_delivered); |
727 | |
728 | if (msg->msg_name) { |
729 | if (ipv6_addr_v4mapped(a: &inc->i_saddr)) { |
730 | sin->sin_family = AF_INET; |
731 | sin->sin_port = inc->i_hdr.h_sport; |
732 | sin->sin_addr.s_addr = |
733 | inc->i_saddr.s6_addr32[3]; |
734 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
735 | msg->msg_namelen = sizeof(*sin); |
736 | } else { |
737 | sin6->sin6_family = AF_INET6; |
738 | sin6->sin6_port = inc->i_hdr.h_sport; |
739 | sin6->sin6_addr = inc->i_saddr; |
740 | sin6->sin6_flowinfo = 0; |
741 | sin6->sin6_scope_id = rs->rs_bound_scope_id; |
742 | msg->msg_namelen = sizeof(*sin6); |
743 | } |
744 | } |
745 | break; |
746 | } |
747 | |
748 | if (inc) |
749 | rds_inc_put(inc); |
750 | |
751 | out: |
752 | return ret; |
753 | } |
754 | |
755 | /* |
756 | * The socket is being shut down and we're asked to drop messages that were |
757 | * queued for recvmsg. The caller has unbound the socket so the receive path |
758 | * won't queue any more incoming fragments or messages on the socket. |
759 | */ |
760 | void rds_clear_recv_queue(struct rds_sock *rs) |
761 | { |
762 | struct sock *sk = rds_rs_to_sk(rs); |
763 | struct rds_incoming *inc, *tmp; |
764 | unsigned long flags; |
765 | LIST_HEAD(to_drop); |
766 | |
767 | write_lock_irqsave(&rs->rs_recv_lock, flags); |
768 | list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) { |
769 | rds_recv_rcvbuf_delta(rs, sk, map: inc->i_conn->c_lcong, |
770 | delta: -be32_to_cpu(inc->i_hdr.h_len), |
771 | port: inc->i_hdr.h_dport); |
772 | list_move(list: &inc->i_item, head: &to_drop); |
773 | } |
774 | write_unlock_irqrestore(&rs->rs_recv_lock, flags); |
775 | |
776 | list_for_each_entry_safe(inc, tmp, &to_drop, i_item) { |
777 | list_del_init(entry: &inc->i_item); |
778 | rds_inc_put(inc); |
779 | } |
780 | } |
781 | |
782 | /* |
783 | * inc->i_saddr isn't used here because it is only set in the receive |
784 | * path. |
785 | */ |
786 | void rds_inc_info_copy(struct rds_incoming *inc, |
787 | struct rds_info_iterator *iter, |
788 | __be32 saddr, __be32 daddr, int flip) |
789 | { |
790 | struct rds_info_message minfo; |
791 | |
792 | minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence); |
793 | minfo.len = be32_to_cpu(inc->i_hdr.h_len); |
794 | minfo.tos = inc->i_conn->c_tos; |
795 | |
796 | if (flip) { |
797 | minfo.laddr = daddr; |
798 | minfo.faddr = saddr; |
799 | minfo.lport = inc->i_hdr.h_dport; |
800 | minfo.fport = inc->i_hdr.h_sport; |
801 | } else { |
802 | minfo.laddr = saddr; |
803 | minfo.faddr = daddr; |
804 | minfo.lport = inc->i_hdr.h_sport; |
805 | minfo.fport = inc->i_hdr.h_dport; |
806 | } |
807 | |
808 | minfo.flags = 0; |
809 | |
810 | rds_info_copy(iter, data: &minfo, bytes: sizeof(minfo)); |
811 | } |
812 | |
813 | #if IS_ENABLED(CONFIG_IPV6) |
814 | void rds6_inc_info_copy(struct rds_incoming *inc, |
815 | struct rds_info_iterator *iter, |
816 | struct in6_addr *saddr, struct in6_addr *daddr, |
817 | int flip) |
818 | { |
819 | struct rds6_info_message minfo6; |
820 | |
821 | minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); |
822 | minfo6.len = be32_to_cpu(inc->i_hdr.h_len); |
823 | minfo6.tos = inc->i_conn->c_tos; |
824 | |
825 | if (flip) { |
826 | minfo6.laddr = *daddr; |
827 | minfo6.faddr = *saddr; |
828 | minfo6.lport = inc->i_hdr.h_dport; |
829 | minfo6.fport = inc->i_hdr.h_sport; |
830 | } else { |
831 | minfo6.laddr = *saddr; |
832 | minfo6.faddr = *daddr; |
833 | minfo6.lport = inc->i_hdr.h_sport; |
834 | minfo6.fport = inc->i_hdr.h_dport; |
835 | } |
836 | |
837 | minfo6.flags = 0; |
838 | |
839 | rds_info_copy(iter, data: &minfo6, bytes: sizeof(minfo6)); |
840 | } |
841 | #endif |
842 | |