1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* AF_RXRPC sendmsg() implementation. |
3 | * |
4 | * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/net.h> |
11 | #include <linux/gfp.h> |
12 | #include <linux/skbuff.h> |
13 | #include <linux/export.h> |
14 | #include <linux/sched/signal.h> |
15 | |
16 | #include <net/sock.h> |
17 | #include <net/af_rxrpc.h> |
18 | #include "ar-internal.h" |
19 | |
20 | /* |
21 | * Propose an abort to be made in the I/O thread. |
22 | */ |
23 | bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, |
24 | enum rxrpc_abort_reason why) |
25 | { |
26 | _enter("{%d},%d,%d,%u" , call->debug_id, abort_code, error, why); |
27 | |
28 | if (!call->send_abort && !rxrpc_call_is_complete(call)) { |
29 | call->send_abort_why = why; |
30 | call->send_abort_err = error; |
31 | call->send_abort_seq = 0; |
32 | /* Request abort locklessly vs rxrpc_input_call_event(). */ |
33 | smp_store_release(&call->send_abort, abort_code); |
34 | rxrpc_poke_call(call, what: rxrpc_call_poke_abort); |
35 | return true; |
36 | } |
37 | |
38 | return false; |
39 | } |
40 | |
41 | /* |
42 | * Wait for a call to become connected. Interruption here doesn't cause the |
43 | * call to be aborted. |
44 | */ |
45 | static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo) |
46 | { |
47 | DECLARE_WAITQUEUE(myself, current); |
48 | int ret = 0; |
49 | |
50 | _enter("%d" , call->debug_id); |
51 | |
52 | if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) |
53 | goto no_wait; |
54 | |
55 | add_wait_queue_exclusive(wq_head: &call->waitq, wq_entry: &myself); |
56 | |
57 | for (;;) { |
58 | switch (call->interruptibility) { |
59 | case RXRPC_INTERRUPTIBLE: |
60 | case RXRPC_PREINTERRUPTIBLE: |
61 | set_current_state(TASK_INTERRUPTIBLE); |
62 | break; |
63 | case RXRPC_UNINTERRUPTIBLE: |
64 | default: |
65 | set_current_state(TASK_UNINTERRUPTIBLE); |
66 | break; |
67 | } |
68 | |
69 | if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) |
70 | break; |
71 | if ((call->interruptibility == RXRPC_INTERRUPTIBLE || |
72 | call->interruptibility == RXRPC_PREINTERRUPTIBLE) && |
73 | signal_pending(current)) { |
74 | ret = sock_intr_errno(timeo: *timeo); |
75 | break; |
76 | } |
77 | *timeo = schedule_timeout(timeout: *timeo); |
78 | } |
79 | |
80 | remove_wait_queue(wq_head: &call->waitq, wq_entry: &myself); |
81 | __set_current_state(TASK_RUNNING); |
82 | |
83 | no_wait: |
84 | if (ret == 0 && rxrpc_call_is_complete(call)) |
85 | ret = call->error; |
86 | |
87 | _leave(" = %d" , ret); |
88 | return ret; |
89 | } |
90 | |
91 | /* |
92 | * Return true if there's sufficient Tx queue space. |
93 | */ |
94 | static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) |
95 | { |
96 | if (_tx_win) |
97 | *_tx_win = call->tx_bottom; |
98 | return call->tx_prepared - call->tx_bottom < 256; |
99 | } |
100 | |
101 | /* |
102 | * Wait for space to appear in the Tx queue or a signal to occur. |
103 | */ |
104 | static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, |
105 | struct rxrpc_call *call, |
106 | long *timeo) |
107 | { |
108 | for (;;) { |
109 | set_current_state(TASK_INTERRUPTIBLE); |
110 | if (rxrpc_check_tx_space(call, NULL)) |
111 | return 0; |
112 | |
113 | if (rxrpc_call_is_complete(call)) |
114 | return call->error; |
115 | |
116 | if (signal_pending(current)) |
117 | return sock_intr_errno(timeo: *timeo); |
118 | |
119 | trace_rxrpc_txqueue(call, why: rxrpc_txqueue_wait); |
120 | *timeo = schedule_timeout(timeout: *timeo); |
121 | } |
122 | } |
123 | |
124 | /* |
125 | * Wait for space to appear in the Tx queue uninterruptibly, but with |
126 | * a timeout of 2*RTT if no progress was made and a signal occurred. |
127 | */ |
128 | static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, |
129 | struct rxrpc_call *call) |
130 | { |
131 | rxrpc_seq_t tx_start, tx_win; |
132 | signed long rtt, timeout; |
133 | |
134 | rtt = READ_ONCE(call->peer->srtt_us) >> 3; |
135 | rtt = usecs_to_jiffies(u: rtt) * 2; |
136 | if (rtt < 2) |
137 | rtt = 2; |
138 | |
139 | timeout = rtt; |
140 | tx_start = smp_load_acquire(&call->acks_hard_ack); |
141 | |
142 | for (;;) { |
143 | set_current_state(TASK_UNINTERRUPTIBLE); |
144 | |
145 | if (rxrpc_check_tx_space(call, tx_win: &tx_win)) |
146 | return 0; |
147 | |
148 | if (rxrpc_call_is_complete(call)) |
149 | return call->error; |
150 | |
151 | if (timeout == 0 && |
152 | tx_win == tx_start && signal_pending(current)) |
153 | return -EINTR; |
154 | |
155 | if (tx_win != tx_start) { |
156 | timeout = rtt; |
157 | tx_start = tx_win; |
158 | } |
159 | |
160 | trace_rxrpc_txqueue(call, why: rxrpc_txqueue_wait); |
161 | timeout = schedule_timeout(timeout); |
162 | } |
163 | } |
164 | |
165 | /* |
166 | * Wait for space to appear in the Tx queue uninterruptibly. |
167 | */ |
168 | static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, |
169 | struct rxrpc_call *call, |
170 | long *timeo) |
171 | { |
172 | for (;;) { |
173 | set_current_state(TASK_UNINTERRUPTIBLE); |
174 | if (rxrpc_check_tx_space(call, NULL)) |
175 | return 0; |
176 | |
177 | if (rxrpc_call_is_complete(call)) |
178 | return call->error; |
179 | |
180 | trace_rxrpc_txqueue(call, why: rxrpc_txqueue_wait); |
181 | *timeo = schedule_timeout(timeout: *timeo); |
182 | } |
183 | } |
184 | |
185 | /* |
186 | * wait for space to appear in the transmit/ACK window |
187 | * - caller holds the socket locked |
188 | */ |
189 | static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, |
190 | struct rxrpc_call *call, |
191 | long *timeo, |
192 | bool waitall) |
193 | { |
194 | DECLARE_WAITQUEUE(myself, current); |
195 | int ret; |
196 | |
197 | _enter(",{%u,%u,%u,%u}" , |
198 | call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize); |
199 | |
200 | add_wait_queue(wq_head: &call->waitq, wq_entry: &myself); |
201 | |
202 | switch (call->interruptibility) { |
203 | case RXRPC_INTERRUPTIBLE: |
204 | if (waitall) |
205 | ret = rxrpc_wait_for_tx_window_waitall(rx, call); |
206 | else |
207 | ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); |
208 | break; |
209 | case RXRPC_PREINTERRUPTIBLE: |
210 | case RXRPC_UNINTERRUPTIBLE: |
211 | default: |
212 | ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); |
213 | break; |
214 | } |
215 | |
216 | remove_wait_queue(wq_head: &call->waitq, wq_entry: &myself); |
217 | set_current_state(TASK_RUNNING); |
218 | _leave(" = %d" , ret); |
219 | return ret; |
220 | } |
221 | |
222 | /* |
223 | * Notify the owner of the call that the transmit phase is ended and the last |
224 | * packet has been queued. |
225 | */ |
226 | static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, |
227 | rxrpc_notify_end_tx_t notify_end_tx) |
228 | { |
229 | if (notify_end_tx) |
230 | notify_end_tx(&rx->sk, call, call->user_call_ID); |
231 | } |
232 | |
233 | /* |
234 | * Queue a DATA packet for transmission, set the resend timeout and send |
235 | * the packet immediately. Returns the error from rxrpc_send_data_packet() |
236 | * in case the caller wants to do something with it. |
237 | */ |
238 | static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, |
239 | struct rxrpc_txbuf *txb, |
240 | rxrpc_notify_end_tx_t notify_end_tx) |
241 | { |
242 | rxrpc_seq_t seq = txb->seq; |
243 | bool poke, last = txb->flags & RXRPC_LAST_PACKET; |
244 | |
245 | rxrpc_inc_stat(call->rxnet, stat_tx_data); |
246 | |
247 | ASSERTCMP(txb->seq, ==, call->tx_prepared + 1); |
248 | |
249 | /* We have to set the timestamp before queueing as the retransmit |
250 | * algorithm can see the packet as soon as we queue it. |
251 | */ |
252 | txb->last_sent = ktime_get_real(); |
253 | |
254 | if (last) |
255 | trace_rxrpc_txqueue(call, why: rxrpc_txqueue_queue_last); |
256 | else |
257 | trace_rxrpc_txqueue(call, why: rxrpc_txqueue_queue); |
258 | |
259 | /* Add the packet to the call's output buffer */ |
260 | spin_lock(lock: &call->tx_lock); |
261 | poke = list_empty(head: &call->tx_sendmsg); |
262 | list_add_tail(new: &txb->call_link, head: &call->tx_sendmsg); |
263 | call->tx_prepared = seq; |
264 | if (last) |
265 | rxrpc_notify_end_tx(rx, call, notify_end_tx); |
266 | spin_unlock(lock: &call->tx_lock); |
267 | |
268 | if (poke) |
269 | rxrpc_poke_call(call, what: rxrpc_call_poke_start); |
270 | } |
271 | |
272 | /* |
273 | * send data through a socket |
274 | * - must be called in process context |
275 | * - The caller holds the call user access mutex, but not the socket lock. |
276 | */ |
277 | static int rxrpc_send_data(struct rxrpc_sock *rx, |
278 | struct rxrpc_call *call, |
279 | struct msghdr *msg, size_t len, |
280 | rxrpc_notify_end_tx_t notify_end_tx, |
281 | bool *_dropped_lock) |
282 | { |
283 | struct rxrpc_txbuf *txb; |
284 | struct sock *sk = &rx->sk; |
285 | enum rxrpc_call_state state; |
286 | long timeo; |
287 | bool more = msg->msg_flags & MSG_MORE; |
288 | int ret, copied = 0; |
289 | |
290 | timeo = sock_sndtimeo(sk, noblock: msg->msg_flags & MSG_DONTWAIT); |
291 | |
292 | ret = rxrpc_wait_to_be_connected(call, timeo: &timeo); |
293 | if (ret < 0) |
294 | return ret; |
295 | |
296 | if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) { |
297 | ret = rxrpc_init_client_conn_security(call->conn); |
298 | if (ret < 0) |
299 | return ret; |
300 | } |
301 | |
302 | /* this should be in poll */ |
303 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
304 | |
305 | reload: |
306 | ret = -EPIPE; |
307 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
308 | goto maybe_error; |
309 | state = rxrpc_call_state(call); |
310 | ret = -ESHUTDOWN; |
311 | if (state >= RXRPC_CALL_COMPLETE) |
312 | goto maybe_error; |
313 | ret = -EPROTO; |
314 | if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && |
315 | state != RXRPC_CALL_SERVER_ACK_REQUEST && |
316 | state != RXRPC_CALL_SERVER_SEND_REPLY) { |
317 | /* Request phase complete for this client call */ |
318 | trace_rxrpc_abort(call_nr: call->debug_id, why: rxrpc_sendmsg_late_send, |
319 | cid: call->cid, call_id: call->call_id, seq: call->rx_consumed, |
320 | abort_code: 0, error: -EPROTO); |
321 | goto maybe_error; |
322 | } |
323 | |
324 | ret = -EMSGSIZE; |
325 | if (call->tx_total_len != -1) { |
326 | if (len - copied > call->tx_total_len) |
327 | goto maybe_error; |
328 | if (!more && len - copied != call->tx_total_len) |
329 | goto maybe_error; |
330 | } |
331 | |
332 | txb = call->tx_pending; |
333 | call->tx_pending = NULL; |
334 | if (txb) |
335 | rxrpc_see_txbuf(txb, what: rxrpc_txbuf_see_send_more); |
336 | |
337 | do { |
338 | if (!txb) { |
339 | size_t remain; |
340 | |
341 | _debug("alloc" ); |
342 | |
343 | if (!rxrpc_check_tx_space(call, NULL)) |
344 | goto wait_for_space; |
345 | |
346 | /* Work out the maximum size of a packet. Assume that |
347 | * the security header is going to be in the padded |
348 | * region (enc blocksize), but the trailer is not. |
349 | */ |
350 | remain = more ? INT_MAX : msg_data_left(msg); |
351 | txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation); |
352 | if (!txb) { |
353 | ret = -ENOMEM; |
354 | goto maybe_error; |
355 | } |
356 | } |
357 | |
358 | _debug("append" ); |
359 | |
360 | /* append next segment of data to the current buffer */ |
361 | if (msg_data_left(msg) > 0) { |
362 | size_t copy = min_t(size_t, txb->space, msg_data_left(msg)); |
363 | |
364 | _debug("add %zu" , copy); |
365 | if (!copy_from_iter_full(addr: txb->kvec[0].iov_base + txb->offset, |
366 | bytes: copy, i: &msg->msg_iter)) |
367 | goto efault; |
368 | _debug("added" ); |
369 | txb->space -= copy; |
370 | txb->len += copy; |
371 | txb->offset += copy; |
372 | copied += copy; |
373 | if (call->tx_total_len != -1) |
374 | call->tx_total_len -= copy; |
375 | } |
376 | |
377 | /* check for the far side aborting the call or a network error |
378 | * occurring */ |
379 | if (rxrpc_call_is_complete(call)) |
380 | goto call_terminated; |
381 | |
382 | /* add the packet to the send queue if it's now full */ |
383 | if (!txb->space || |
384 | (msg_data_left(msg) == 0 && !more)) { |
385 | if (msg_data_left(msg) == 0 && !more) |
386 | txb->flags |= RXRPC_LAST_PACKET; |
387 | else if (call->tx_top - call->acks_hard_ack < |
388 | call->tx_winsize) |
389 | txb->flags |= RXRPC_MORE_PACKETS; |
390 | |
391 | ret = call->security->secure_packet(call, txb); |
392 | if (ret < 0) |
393 | goto out; |
394 | |
395 | txb->kvec[0].iov_len += txb->len; |
396 | txb->len = txb->kvec[0].iov_len; |
397 | rxrpc_queue_packet(rx, call, txb, notify_end_tx); |
398 | txb = NULL; |
399 | } |
400 | } while (msg_data_left(msg) > 0); |
401 | |
402 | success: |
403 | ret = copied; |
404 | if (rxrpc_call_is_complete(call) && |
405 | call->error < 0) |
406 | ret = call->error; |
407 | out: |
408 | call->tx_pending = txb; |
409 | _leave(" = %d" , ret); |
410 | return ret; |
411 | |
412 | call_terminated: |
413 | rxrpc_put_txbuf(txb, what: rxrpc_txbuf_put_send_aborted); |
414 | _leave(" = %d" , call->error); |
415 | return call->error; |
416 | |
417 | maybe_error: |
418 | if (copied) |
419 | goto success; |
420 | goto out; |
421 | |
422 | efault: |
423 | ret = -EFAULT; |
424 | goto out; |
425 | |
426 | wait_for_space: |
427 | ret = -EAGAIN; |
428 | if (msg->msg_flags & MSG_DONTWAIT) |
429 | goto maybe_error; |
430 | mutex_unlock(lock: &call->user_mutex); |
431 | *_dropped_lock = true; |
432 | ret = rxrpc_wait_for_tx_window(rx, call, timeo: &timeo, |
433 | waitall: msg->msg_flags & MSG_WAITALL); |
434 | if (ret < 0) |
435 | goto maybe_error; |
436 | if (call->interruptibility == RXRPC_INTERRUPTIBLE) { |
437 | if (mutex_lock_interruptible(&call->user_mutex) < 0) { |
438 | ret = sock_intr_errno(timeo); |
439 | goto maybe_error; |
440 | } |
441 | } else { |
442 | mutex_lock(&call->user_mutex); |
443 | } |
444 | *_dropped_lock = false; |
445 | goto reload; |
446 | } |
447 | |
448 | /* |
449 | * extract control messages from the sendmsg() control buffer |
450 | */ |
451 | static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) |
452 | { |
453 | struct cmsghdr *cmsg; |
454 | bool got_user_ID = false; |
455 | int len; |
456 | |
457 | if (msg->msg_controllen == 0) |
458 | return -EINVAL; |
459 | |
460 | for_each_cmsghdr(cmsg, msg) { |
461 | if (!CMSG_OK(msg, cmsg)) |
462 | return -EINVAL; |
463 | |
464 | len = cmsg->cmsg_len - sizeof(struct cmsghdr); |
465 | _debug("CMSG %d, %d, %d" , |
466 | cmsg->cmsg_level, cmsg->cmsg_type, len); |
467 | |
468 | if (cmsg->cmsg_level != SOL_RXRPC) |
469 | continue; |
470 | |
471 | switch (cmsg->cmsg_type) { |
472 | case RXRPC_USER_CALL_ID: |
473 | if (msg->msg_flags & MSG_CMSG_COMPAT) { |
474 | if (len != sizeof(u32)) |
475 | return -EINVAL; |
476 | p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); |
477 | } else { |
478 | if (len != sizeof(unsigned long)) |
479 | return -EINVAL; |
480 | p->call.user_call_ID = *(unsigned long *) |
481 | CMSG_DATA(cmsg); |
482 | } |
483 | got_user_ID = true; |
484 | break; |
485 | |
486 | case RXRPC_ABORT: |
487 | if (p->command != RXRPC_CMD_SEND_DATA) |
488 | return -EINVAL; |
489 | p->command = RXRPC_CMD_SEND_ABORT; |
490 | if (len != sizeof(p->abort_code)) |
491 | return -EINVAL; |
492 | p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); |
493 | if (p->abort_code == 0) |
494 | return -EINVAL; |
495 | break; |
496 | |
497 | case RXRPC_CHARGE_ACCEPT: |
498 | if (p->command != RXRPC_CMD_SEND_DATA) |
499 | return -EINVAL; |
500 | p->command = RXRPC_CMD_CHARGE_ACCEPT; |
501 | if (len != 0) |
502 | return -EINVAL; |
503 | break; |
504 | |
505 | case RXRPC_EXCLUSIVE_CALL: |
506 | p->exclusive = true; |
507 | if (len != 0) |
508 | return -EINVAL; |
509 | break; |
510 | |
511 | case RXRPC_UPGRADE_SERVICE: |
512 | p->upgrade = true; |
513 | if (len != 0) |
514 | return -EINVAL; |
515 | break; |
516 | |
517 | case RXRPC_TX_LENGTH: |
518 | if (p->call.tx_total_len != -1 || len != sizeof(__s64)) |
519 | return -EINVAL; |
520 | p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); |
521 | if (p->call.tx_total_len < 0) |
522 | return -EINVAL; |
523 | break; |
524 | |
525 | case RXRPC_SET_CALL_TIMEOUT: |
526 | if (len & 3 || len < 4 || len > 12) |
527 | return -EINVAL; |
528 | memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); |
529 | p->call.nr_timeouts = len / 4; |
530 | if (p->call.timeouts.hard > INT_MAX / HZ) |
531 | return -ERANGE; |
532 | if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) |
533 | return -ERANGE; |
534 | if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) |
535 | return -ERANGE; |
536 | break; |
537 | |
538 | default: |
539 | return -EINVAL; |
540 | } |
541 | } |
542 | |
543 | if (!got_user_ID) |
544 | return -EINVAL; |
545 | if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) |
546 | return -EINVAL; |
547 | _leave(" = 0" ); |
548 | return 0; |
549 | } |
550 | |
551 | /* |
552 | * Create a new client call for sendmsg(). |
553 | * - Called with the socket lock held, which it must release. |
554 | * - If it returns a call, the call's lock will need releasing by the caller. |
555 | */ |
556 | static struct rxrpc_call * |
557 | rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, |
558 | struct rxrpc_send_params *p) |
559 | __releases(&rx->sk.sk_lock.slock) |
560 | __acquires(&call->user_mutex) |
561 | { |
562 | struct rxrpc_conn_parameters cp; |
563 | struct rxrpc_peer *peer; |
564 | struct rxrpc_call *call; |
565 | struct key *key; |
566 | |
567 | DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); |
568 | |
569 | _enter("" ); |
570 | |
571 | if (!msg->msg_name) { |
572 | release_sock(sk: &rx->sk); |
573 | return ERR_PTR(error: -EDESTADDRREQ); |
574 | } |
575 | |
576 | peer = rxrpc_lookup_peer(local: rx->local, srx, GFP_KERNEL); |
577 | if (!peer) { |
578 | release_sock(sk: &rx->sk); |
579 | return ERR_PTR(error: -ENOMEM); |
580 | } |
581 | |
582 | key = rx->key; |
583 | if (key && !rx->key->payload.data[0]) |
584 | key = NULL; |
585 | |
586 | memset(&cp, 0, sizeof(cp)); |
587 | cp.local = rx->local; |
588 | cp.peer = peer; |
589 | cp.key = rx->key; |
590 | cp.security_level = rx->min_sec_level; |
591 | cp.exclusive = rx->exclusive | p->exclusive; |
592 | cp.upgrade = p->upgrade; |
593 | cp.service_id = srx->srx_service; |
594 | call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL, |
595 | atomic_inc_return(v: &rxrpc_debug_id)); |
596 | /* The socket is now unlocked */ |
597 | |
598 | rxrpc_put_peer(peer, rxrpc_peer_put_application); |
599 | _leave(" = %p\n" , call); |
600 | return call; |
601 | } |
602 | |
603 | /* |
604 | * send a message forming part of a client call through an RxRPC socket |
605 | * - caller holds the socket locked |
606 | * - the socket may be either a client socket or a server socket |
607 | */ |
608 | int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) |
609 | __releases(&rx->sk.sk_lock.slock) |
610 | { |
611 | struct rxrpc_call *call; |
612 | bool dropped_lock = false; |
613 | int ret; |
614 | |
615 | struct rxrpc_send_params p = { |
616 | .call.tx_total_len = -1, |
617 | .call.user_call_ID = 0, |
618 | .call.nr_timeouts = 0, |
619 | .call.interruptibility = RXRPC_INTERRUPTIBLE, |
620 | .abort_code = 0, |
621 | .command = RXRPC_CMD_SEND_DATA, |
622 | .exclusive = false, |
623 | .upgrade = false, |
624 | }; |
625 | |
626 | _enter("" ); |
627 | |
628 | ret = rxrpc_sendmsg_cmsg(msg, p: &p); |
629 | if (ret < 0) |
630 | goto error_release_sock; |
631 | |
632 | if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { |
633 | ret = -EINVAL; |
634 | if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) |
635 | goto error_release_sock; |
636 | ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); |
637 | goto error_release_sock; |
638 | } |
639 | |
640 | call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); |
641 | if (!call) { |
642 | ret = -EBADSLT; |
643 | if (p.command != RXRPC_CMD_SEND_DATA) |
644 | goto error_release_sock; |
645 | call = rxrpc_new_client_call_for_sendmsg(rx, msg, p: &p); |
646 | /* The socket is now unlocked... */ |
647 | if (IS_ERR(ptr: call)) |
648 | return PTR_ERR(ptr: call); |
649 | /* ... and we have the call lock. */ |
650 | p.call.nr_timeouts = 0; |
651 | ret = 0; |
652 | if (rxrpc_call_is_complete(call)) |
653 | goto out_put_unlock; |
654 | } else { |
655 | switch (rxrpc_call_state(call)) { |
656 | case RXRPC_CALL_CLIENT_AWAIT_CONN: |
657 | case RXRPC_CALL_SERVER_SECURING: |
658 | if (p.command == RXRPC_CMD_SEND_ABORT) |
659 | break; |
660 | fallthrough; |
661 | case RXRPC_CALL_UNINITIALISED: |
662 | case RXRPC_CALL_SERVER_PREALLOC: |
663 | rxrpc_put_call(call, rxrpc_call_put_sendmsg); |
664 | ret = -EBUSY; |
665 | goto error_release_sock; |
666 | default: |
667 | break; |
668 | } |
669 | |
670 | ret = mutex_lock_interruptible(&call->user_mutex); |
671 | release_sock(sk: &rx->sk); |
672 | if (ret < 0) { |
673 | ret = -ERESTARTSYS; |
674 | goto error_put; |
675 | } |
676 | |
677 | if (p.call.tx_total_len != -1) { |
678 | ret = -EINVAL; |
679 | if (call->tx_total_len != -1 || |
680 | call->tx_pending || |
681 | call->tx_top != 0) |
682 | goto out_put_unlock; |
683 | call->tx_total_len = p.call.tx_total_len; |
684 | } |
685 | } |
686 | |
687 | switch (p.call.nr_timeouts) { |
688 | case 3: |
689 | WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal); |
690 | fallthrough; |
691 | case 2: |
692 | WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle); |
693 | fallthrough; |
694 | case 1: |
695 | if (p.call.timeouts.hard > 0) { |
696 | ktime_t delay = ms_to_ktime(ms: p.call.timeouts.hard * MSEC_PER_SEC); |
697 | |
698 | WRITE_ONCE(call->expect_term_by, |
699 | ktime_add(p.call.timeouts.hard, |
700 | ktime_get_real())); |
701 | trace_rxrpc_timer_set(call, delay, why: rxrpc_timer_trace_hard); |
702 | rxrpc_poke_call(call, what: rxrpc_call_poke_set_timeout); |
703 | |
704 | } |
705 | break; |
706 | } |
707 | |
708 | if (rxrpc_call_is_complete(call)) { |
709 | /* it's too late for this call */ |
710 | ret = -ESHUTDOWN; |
711 | } else if (p.command == RXRPC_CMD_SEND_ABORT) { |
712 | rxrpc_propose_abort(call, abort_code: p.abort_code, error: -ECONNABORTED, |
713 | why: rxrpc_abort_call_sendmsg); |
714 | ret = 0; |
715 | } else if (p.command != RXRPC_CMD_SEND_DATA) { |
716 | ret = -EINVAL; |
717 | } else { |
718 | ret = rxrpc_send_data(rx, call, msg, len, NULL, dropped_lock: &dropped_lock); |
719 | } |
720 | |
721 | out_put_unlock: |
722 | if (!dropped_lock) |
723 | mutex_unlock(lock: &call->user_mutex); |
724 | error_put: |
725 | rxrpc_put_call(call, rxrpc_call_put_sendmsg); |
726 | _leave(" = %d" , ret); |
727 | return ret; |
728 | |
729 | error_release_sock: |
730 | release_sock(sk: &rx->sk); |
731 | return ret; |
732 | } |
733 | |
734 | /** |
735 | * rxrpc_kernel_send_data - Allow a kernel service to send data on a call |
736 | * @sock: The socket the call is on |
737 | * @call: The call to send data through |
738 | * @msg: The data to send |
739 | * @len: The amount of data to send |
740 | * @notify_end_tx: Notification that the last packet is queued. |
741 | * |
742 | * Allow a kernel service to send data on a call. The call must be in an state |
743 | * appropriate to sending data. No control data should be supplied in @msg, |
744 | * nor should an address be supplied. MSG_MORE should be flagged if there's |
745 | * more data to come, otherwise this data will end the transmission phase. |
746 | */ |
747 | int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, |
748 | struct msghdr *msg, size_t len, |
749 | rxrpc_notify_end_tx_t notify_end_tx) |
750 | { |
751 | bool dropped_lock = false; |
752 | int ret; |
753 | |
754 | _enter("{%d}," , call->debug_id); |
755 | |
756 | ASSERTCMP(msg->msg_name, ==, NULL); |
757 | ASSERTCMP(msg->msg_control, ==, NULL); |
758 | |
759 | mutex_lock(&call->user_mutex); |
760 | |
761 | ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, |
762 | notify_end_tx, dropped_lock: &dropped_lock); |
763 | if (ret == -ESHUTDOWN) |
764 | ret = call->error; |
765 | |
766 | if (!dropped_lock) |
767 | mutex_unlock(lock: &call->user_mutex); |
768 | _leave(" = %d" , ret); |
769 | return ret; |
770 | } |
771 | EXPORT_SYMBOL(rxrpc_kernel_send_data); |
772 | |
773 | /** |
774 | * rxrpc_kernel_abort_call - Allow a kernel service to abort a call |
775 | * @sock: The socket the call is on |
776 | * @call: The call to be aborted |
777 | * @abort_code: The abort code to stick into the ABORT packet |
778 | * @error: Local error value |
779 | * @why: Indication as to why. |
780 | * |
781 | * Allow a kernel service to abort a call, if it's still in an abortable state |
782 | * and return true if the call was aborted, false if it was already complete. |
783 | */ |
784 | bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, |
785 | u32 abort_code, int error, enum rxrpc_abort_reason why) |
786 | { |
787 | bool aborted; |
788 | |
789 | _enter("{%d},%d,%d,%u" , call->debug_id, abort_code, error, why); |
790 | |
791 | mutex_lock(&call->user_mutex); |
792 | aborted = rxrpc_propose_abort(call, abort_code, error, why); |
793 | mutex_unlock(lock: &call->user_mutex); |
794 | return aborted; |
795 | } |
796 | EXPORT_SYMBOL(rxrpc_kernel_abort_call); |
797 | |
798 | /** |
799 | * rxrpc_kernel_set_tx_length - Set the total Tx length on a call |
800 | * @sock: The socket the call is on |
801 | * @call: The call to be informed |
802 | * @tx_total_len: The amount of data to be transmitted for this call |
803 | * |
804 | * Allow a kernel service to set the total transmit length on a call. This |
805 | * allows buffer-to-packet encrypt-and-copy to be performed. |
806 | * |
807 | * This function is primarily for use for setting the reply length since the |
808 | * request length can be set when beginning the call. |
809 | */ |
810 | void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, |
811 | s64 tx_total_len) |
812 | { |
813 | WARN_ON(call->tx_total_len != -1); |
814 | call->tx_total_len = tx_total_len; |
815 | } |
816 | EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); |
817 | |