1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* incoming call handling |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/net.h> |
12 | #include <linux/skbuff.h> |
13 | #include <linux/errqueue.h> |
14 | #include <linux/udp.h> |
15 | #include <linux/in.h> |
16 | #include <linux/in6.h> |
17 | #include <linux/icmp.h> |
18 | #include <linux/gfp.h> |
19 | #include <linux/circ_buf.h> |
20 | #include <net/sock.h> |
21 | #include <net/af_rxrpc.h> |
22 | #include <net/ip.h> |
23 | #include "ar-internal.h" |
24 | |
25 | static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, |
26 | unsigned long user_call_ID) |
27 | { |
28 | } |
29 | |
30 | /* |
31 | * Preallocate a single service call, connection and peer and, if possible, |
32 | * give them a user ID and attach the user's side of the ID to them. |
33 | */ |
34 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, |
35 | struct rxrpc_backlog *b, |
36 | rxrpc_notify_rx_t notify_rx, |
37 | rxrpc_user_attach_call_t user_attach_call, |
38 | unsigned long user_call_ID, gfp_t gfp, |
39 | unsigned int debug_id) |
40 | { |
41 | struct rxrpc_call *call, *xcall; |
42 | struct rxrpc_net *rxnet = rxrpc_net(net: sock_net(sk: &rx->sk)); |
43 | struct rb_node *parent, **pp; |
44 | int max, tmp; |
45 | unsigned int size = RXRPC_BACKLOG_MAX; |
46 | unsigned int head, tail, call_head, call_tail; |
47 | |
48 | max = rx->sk.sk_max_ack_backlog; |
49 | tmp = rx->sk.sk_ack_backlog; |
50 | if (tmp >= max) { |
51 | _leave(" = -ENOBUFS [full %u]" , max); |
52 | return -ENOBUFS; |
53 | } |
54 | max -= tmp; |
55 | |
56 | /* We don't need more conns and peers than we have calls, but on the |
57 | * other hand, we shouldn't ever use more peers than conns or conns |
58 | * than calls. |
59 | */ |
60 | call_head = b->call_backlog_head; |
61 | call_tail = READ_ONCE(b->call_backlog_tail); |
62 | tmp = CIRC_CNT(call_head, call_tail, size); |
63 | if (tmp >= max) { |
64 | _leave(" = -ENOBUFS [enough %u]" , tmp); |
65 | return -ENOBUFS; |
66 | } |
67 | max = tmp + 1; |
68 | |
69 | head = b->peer_backlog_head; |
70 | tail = READ_ONCE(b->peer_backlog_tail); |
71 | if (CIRC_CNT(head, tail, size) < max) { |
72 | struct rxrpc_peer *peer; |
73 | |
74 | peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc); |
75 | if (!peer) |
76 | return -ENOMEM; |
77 | b->peer_backlog[head] = peer; |
78 | smp_store_release(&b->peer_backlog_head, |
79 | (head + 1) & (size - 1)); |
80 | } |
81 | |
82 | head = b->conn_backlog_head; |
83 | tail = READ_ONCE(b->conn_backlog_tail); |
84 | if (CIRC_CNT(head, tail, size) < max) { |
85 | struct rxrpc_connection *conn; |
86 | |
87 | conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
88 | if (!conn) |
89 | return -ENOMEM; |
90 | b->conn_backlog[head] = conn; |
91 | smp_store_release(&b->conn_backlog_head, |
92 | (head + 1) & (size - 1)); |
93 | } |
94 | |
95 | /* Now it gets complicated, because calls get registered with the |
96 | * socket here, with a user ID preassigned by the user. |
97 | */ |
98 | call = rxrpc_alloc_call(rx, gfp, debug_id); |
99 | if (!call) |
100 | return -ENOMEM; |
101 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); |
102 | rxrpc_set_call_state(call, state: RXRPC_CALL_SERVER_PREALLOC); |
103 | __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events); |
104 | |
105 | trace_rxrpc_call(call_debug_id: call->debug_id, ref: refcount_read(r: &call->ref), |
106 | aux: user_call_ID, why: rxrpc_call_new_prealloc_service); |
107 | |
108 | write_lock(&rx->call_lock); |
109 | |
110 | /* Check the user ID isn't already in use */ |
111 | pp = &rx->calls.rb_node; |
112 | parent = NULL; |
113 | while (*pp) { |
114 | parent = *pp; |
115 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
116 | if (user_call_ID < xcall->user_call_ID) |
117 | pp = &(*pp)->rb_left; |
118 | else if (user_call_ID > xcall->user_call_ID) |
119 | pp = &(*pp)->rb_right; |
120 | else |
121 | goto id_in_use; |
122 | } |
123 | |
124 | call->user_call_ID = user_call_ID; |
125 | call->notify_rx = notify_rx; |
126 | if (user_attach_call) { |
127 | rxrpc_get_call(call, rxrpc_call_get_kernel_service); |
128 | user_attach_call(call, user_call_ID); |
129 | } |
130 | |
131 | rxrpc_get_call(call, rxrpc_call_get_userid); |
132 | rb_link_node(node: &call->sock_node, parent, rb_link: pp); |
133 | rb_insert_color(&call->sock_node, &rx->calls); |
134 | set_bit(nr: RXRPC_CALL_HAS_USERID, addr: &call->flags); |
135 | |
136 | list_add(new: &call->sock_link, head: &rx->sock_calls); |
137 | |
138 | write_unlock(&rx->call_lock); |
139 | |
140 | rxnet = call->rxnet; |
141 | spin_lock(lock: &rxnet->call_lock); |
142 | list_add_tail_rcu(new: &call->link, head: &rxnet->calls); |
143 | spin_unlock(lock: &rxnet->call_lock); |
144 | |
145 | b->call_backlog[call_head] = call; |
146 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); |
147 | _leave(" = 0 [%d -> %lx]" , call->debug_id, user_call_ID); |
148 | return 0; |
149 | |
150 | id_in_use: |
151 | write_unlock(&rx->call_lock); |
152 | rxrpc_cleanup_call(call); |
153 | _leave(" = -EBADSLT" ); |
154 | return -EBADSLT; |
155 | } |
156 | |
157 | /* |
158 | * Allocate the preallocation buffers for incoming service calls. These must |
159 | * be charged manually. |
160 | */ |
161 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) |
162 | { |
163 | struct rxrpc_backlog *b = rx->backlog; |
164 | |
165 | if (!b) { |
166 | b = kzalloc(size: sizeof(struct rxrpc_backlog), flags: gfp); |
167 | if (!b) |
168 | return -ENOMEM; |
169 | rx->backlog = b; |
170 | } |
171 | |
172 | return 0; |
173 | } |
174 | |
175 | /* |
176 | * Discard the preallocation on a service. |
177 | */ |
178 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) |
179 | { |
180 | struct rxrpc_backlog *b = rx->backlog; |
181 | struct rxrpc_net *rxnet = rxrpc_net(net: sock_net(sk: &rx->sk)); |
182 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
183 | |
184 | if (!b) |
185 | return; |
186 | rx->backlog = NULL; |
187 | |
188 | /* Make sure that there aren't any incoming calls in progress before we |
189 | * clear the preallocation buffers. |
190 | */ |
191 | spin_lock(lock: &rx->incoming_lock); |
192 | spin_unlock(lock: &rx->incoming_lock); |
193 | |
194 | head = b->peer_backlog_head; |
195 | tail = b->peer_backlog_tail; |
196 | while (CIRC_CNT(head, tail, size) > 0) { |
197 | struct rxrpc_peer *peer = b->peer_backlog[tail]; |
198 | rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer); |
199 | kfree(objp: peer); |
200 | tail = (tail + 1) & (size - 1); |
201 | } |
202 | |
203 | head = b->conn_backlog_head; |
204 | tail = b->conn_backlog_tail; |
205 | while (CIRC_CNT(head, tail, size) > 0) { |
206 | struct rxrpc_connection *conn = b->conn_backlog[tail]; |
207 | write_lock(&rxnet->conn_lock); |
208 | list_del(entry: &conn->link); |
209 | list_del(entry: &conn->proc_link); |
210 | write_unlock(&rxnet->conn_lock); |
211 | kfree(objp: conn); |
212 | if (atomic_dec_and_test(v: &rxnet->nr_conns)) |
213 | wake_up_var(var: &rxnet->nr_conns); |
214 | tail = (tail + 1) & (size - 1); |
215 | } |
216 | |
217 | head = b->call_backlog_head; |
218 | tail = b->call_backlog_tail; |
219 | while (CIRC_CNT(head, tail, size) > 0) { |
220 | struct rxrpc_call *call = b->call_backlog[tail]; |
221 | rcu_assign_pointer(call->socket, rx); |
222 | if (rx->discard_new_call) { |
223 | _debug("discard %lx" , call->user_call_ID); |
224 | rx->discard_new_call(call, call->user_call_ID); |
225 | if (call->notify_rx) |
226 | call->notify_rx = rxrpc_dummy_notify; |
227 | rxrpc_put_call(call, rxrpc_call_put_kernel); |
228 | } |
229 | rxrpc_call_completed(call); |
230 | rxrpc_release_call(rx, call); |
231 | rxrpc_put_call(call, rxrpc_call_put_discard_prealloc); |
232 | tail = (tail + 1) & (size - 1); |
233 | } |
234 | |
235 | kfree(objp: b); |
236 | } |
237 | |
238 | /* |
239 | * Allocate a new incoming call from the prealloc pool, along with a connection |
240 | * and a peer as necessary. |
241 | */ |
242 | static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
243 | struct rxrpc_local *local, |
244 | struct rxrpc_peer *peer, |
245 | struct rxrpc_connection *conn, |
246 | const struct rxrpc_security *sec, |
247 | struct sockaddr_rxrpc *peer_srx, |
248 | struct sk_buff *skb) |
249 | { |
250 | struct rxrpc_backlog *b = rx->backlog; |
251 | struct rxrpc_call *call; |
252 | unsigned short call_head, conn_head, peer_head; |
253 | unsigned short call_tail, conn_tail, peer_tail; |
254 | unsigned short call_count, conn_count; |
255 | |
256 | /* #calls >= #conns >= #peers must hold true. */ |
257 | call_head = smp_load_acquire(&b->call_backlog_head); |
258 | call_tail = b->call_backlog_tail; |
259 | call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); |
260 | conn_head = smp_load_acquire(&b->conn_backlog_head); |
261 | conn_tail = b->conn_backlog_tail; |
262 | conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); |
263 | ASSERTCMP(conn_count, >=, call_count); |
264 | peer_head = smp_load_acquire(&b->peer_backlog_head); |
265 | peer_tail = b->peer_backlog_tail; |
266 | ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, |
267 | conn_count); |
268 | |
269 | if (call_count == 0) |
270 | return NULL; |
271 | |
272 | if (!conn) { |
273 | if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn)) |
274 | peer = NULL; |
275 | if (!peer) { |
276 | peer = b->peer_backlog[peer_tail]; |
277 | peer->srx = *peer_srx; |
278 | b->peer_backlog[peer_tail] = NULL; |
279 | smp_store_release(&b->peer_backlog_tail, |
280 | (peer_tail + 1) & |
281 | (RXRPC_BACKLOG_MAX - 1)); |
282 | |
283 | rxrpc_new_incoming_peer(local, peer); |
284 | } |
285 | |
286 | /* Now allocate and set up the connection */ |
287 | conn = b->conn_backlog[conn_tail]; |
288 | b->conn_backlog[conn_tail] = NULL; |
289 | smp_store_release(&b->conn_backlog_tail, |
290 | (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); |
291 | conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn); |
292 | conn->peer = peer; |
293 | rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn); |
294 | rxrpc_new_incoming_connection(rx, conn, sec, skb); |
295 | } else { |
296 | rxrpc_get_connection(conn, rxrpc_conn_get_service_conn); |
297 | atomic_inc(v: &conn->active); |
298 | } |
299 | |
300 | /* And now we can allocate and set up a new call */ |
301 | call = b->call_backlog[call_tail]; |
302 | b->call_backlog[call_tail] = NULL; |
303 | smp_store_release(&b->call_backlog_tail, |
304 | (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); |
305 | |
306 | rxrpc_see_call(call, rxrpc_call_see_accept); |
307 | call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call); |
308 | call->conn = conn; |
309 | call->security = conn->security; |
310 | call->security_ix = conn->security_ix; |
311 | call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept); |
312 | call->dest_srx = peer->srx; |
313 | call->cong_ssthresh = call->peer->cong_ssthresh; |
314 | call->tx_last_sent = ktime_get_real(); |
315 | return call; |
316 | } |
317 | |
318 | /* |
319 | * Set up a new incoming call. Called from the I/O thread. |
320 | * |
321 | * If this is for a kernel service, when we allocate the call, it will have |
322 | * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the |
323 | * retainer ref obtained from the backlog buffer. Prealloc calls for userspace |
324 | * services only have the ref from the backlog buffer. |
325 | * |
326 | * If we want to report an error, we mark the skb with the packet type and |
327 | * abort code and return false. |
328 | */ |
329 | bool rxrpc_new_incoming_call(struct rxrpc_local *local, |
330 | struct rxrpc_peer *peer, |
331 | struct rxrpc_connection *conn, |
332 | struct sockaddr_rxrpc *peer_srx, |
333 | struct sk_buff *skb) |
334 | { |
335 | const struct rxrpc_security *sec = NULL; |
336 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
337 | struct rxrpc_call *call = NULL; |
338 | struct rxrpc_sock *rx; |
339 | |
340 | _enter("" ); |
341 | |
342 | /* Don't set up a call for anything other than a DATA packet. */ |
343 | if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA) |
344 | return rxrpc_protocol_error(skb, why: rxrpc_eproto_no_service_call); |
345 | |
346 | read_lock(&local->services_lock); |
347 | |
348 | /* Weed out packets to services we're not offering. Packets that would |
349 | * begin a call are explicitly rejected and the rest are just |
350 | * discarded. |
351 | */ |
352 | rx = local->service; |
353 | if (!rx || (sp->hdr.serviceId != rx->srx.srx_service && |
354 | sp->hdr.serviceId != rx->second_service) |
355 | ) { |
356 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && |
357 | sp->hdr.seq == 1) |
358 | goto unsupported_service; |
359 | goto discard; |
360 | } |
361 | |
362 | if (!conn) { |
363 | sec = rxrpc_get_incoming_security(rx, skb); |
364 | if (!sec) |
365 | goto unsupported_security; |
366 | } |
367 | |
368 | spin_lock(lock: &rx->incoming_lock); |
369 | if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || |
370 | rx->sk.sk_state == RXRPC_CLOSE) { |
371 | rxrpc_direct_abort(skb, why: rxrpc_abort_shut_down, |
372 | RX_INVALID_OPERATION, err: -ESHUTDOWN); |
373 | goto no_call; |
374 | } |
375 | |
376 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx, |
377 | skb); |
378 | if (!call) { |
379 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
380 | goto no_call; |
381 | } |
382 | |
383 | trace_rxrpc_receive(call, why: rxrpc_receive_incoming, |
384 | serial: sp->hdr.serial, seq: sp->hdr.seq); |
385 | |
386 | /* Make the call live. */ |
387 | rxrpc_incoming_call(rx, call, skb); |
388 | conn = call->conn; |
389 | |
390 | if (rx->notify_new_call) |
391 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); |
392 | |
393 | spin_lock(lock: &conn->state_lock); |
394 | if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) { |
395 | conn->state = RXRPC_CONN_SERVICE_CHALLENGING; |
396 | set_bit(nr: RXRPC_CONN_EV_CHALLENGE, addr: &call->conn->events); |
397 | rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge); |
398 | } |
399 | spin_unlock(lock: &conn->state_lock); |
400 | |
401 | spin_unlock(lock: &rx->incoming_lock); |
402 | read_unlock(&local->services_lock); |
403 | |
404 | if (hlist_unhashed(h: &call->error_link)) { |
405 | spin_lock(lock: &call->peer->lock); |
406 | hlist_add_head(n: &call->error_link, h: &call->peer->error_targets); |
407 | spin_unlock(lock: &call->peer->lock); |
408 | } |
409 | |
410 | _leave(" = %p{%d}" , call, call->debug_id); |
411 | rxrpc_input_call_event(call, skb); |
412 | rxrpc_put_call(call, rxrpc_call_put_input); |
413 | return true; |
414 | |
415 | unsupported_service: |
416 | read_unlock(&local->services_lock); |
417 | return rxrpc_direct_abort(skb, why: rxrpc_abort_service_not_offered, |
418 | RX_INVALID_OPERATION, err: -EOPNOTSUPP); |
419 | unsupported_security: |
420 | read_unlock(&local->services_lock); |
421 | return rxrpc_direct_abort(skb, why: rxrpc_abort_service_not_offered, |
422 | RX_INVALID_OPERATION, err: -EKEYREJECTED); |
423 | no_call: |
424 | spin_unlock(lock: &rx->incoming_lock); |
425 | read_unlock(&local->services_lock); |
426 | _leave(" = f [%u]" , skb->mark); |
427 | return false; |
428 | discard: |
429 | read_unlock(&local->services_lock); |
430 | return true; |
431 | } |
432 | |
433 | /* |
434 | * Charge up socket with preallocated calls, attaching user call IDs. |
435 | */ |
436 | int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) |
437 | { |
438 | struct rxrpc_backlog *b = rx->backlog; |
439 | |
440 | if (rx->sk.sk_state == RXRPC_CLOSE) |
441 | return -ESHUTDOWN; |
442 | |
443 | return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, |
444 | GFP_KERNEL, |
445 | debug_id: atomic_inc_return(v: &rxrpc_debug_id)); |
446 | } |
447 | |
448 | /* |
449 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls |
450 | * @sock: The socket on which to preallocate |
451 | * @notify_rx: Event notification function for the call |
452 | * @user_attach_call: Func to attach call to user_call_ID |
453 | * @user_call_ID: The tag to attach to the preallocated call |
454 | * @gfp: The allocation conditions. |
455 | * @debug_id: The tracing debug ID. |
456 | * |
457 | * Charge up the socket with preallocated calls, each with a user ID. A |
458 | * function should be provided to effect the attachment from the user's side. |
459 | * The user is given a ref to hold on the call. |
460 | * |
461 | * Note that the call may be come connected before this function returns. |
462 | */ |
463 | int rxrpc_kernel_charge_accept(struct socket *sock, |
464 | rxrpc_notify_rx_t notify_rx, |
465 | rxrpc_user_attach_call_t user_attach_call, |
466 | unsigned long user_call_ID, gfp_t gfp, |
467 | unsigned int debug_id) |
468 | { |
469 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); |
470 | struct rxrpc_backlog *b = rx->backlog; |
471 | |
472 | if (sock->sk->sk_state == RXRPC_CLOSE) |
473 | return -ESHUTDOWN; |
474 | |
475 | return rxrpc_service_prealloc_one(rx, b, notify_rx, |
476 | user_attach_call, user_call_ID, |
477 | gfp, debug_id); |
478 | } |
479 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |
480 | |