1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC remote transport endpoint record management
3 *
4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <linux/udp.h>
14#include <linux/in.h>
15#include <linux/in6.h>
16#include <linux/slab.h>
17#include <linux/hashtable.h>
18#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include <net/ip.h>
21#include <net/route.h>
22#include <net/ip6_route.h>
23#include "ar-internal.h"
24
25static const struct sockaddr_rxrpc rxrpc_null_addr;
26
27/*
28 * Hash a peer key.
29 */
30static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
31 const struct sockaddr_rxrpc *srx)
32{
33 const u16 *p;
34 unsigned int i, size;
35 unsigned long hash_key;
36
37 _enter("");
38
39 hash_key = (unsigned long)local / __alignof__(*local);
40 hash_key += srx->transport_type;
41 hash_key += srx->transport_len;
42 hash_key += srx->transport.family;
43
44 switch (srx->transport.family) {
45 case AF_INET:
46 hash_key += (u16 __force)srx->transport.sin.sin_port;
47 size = sizeof(srx->transport.sin.sin_addr);
48 p = (u16 *)&srx->transport.sin.sin_addr;
49 break;
50#ifdef CONFIG_AF_RXRPC_IPV6
51 case AF_INET6:
52 hash_key += (u16 __force)srx->transport.sin.sin_port;
53 size = sizeof(srx->transport.sin6.sin6_addr);
54 p = (u16 *)&srx->transport.sin6.sin6_addr;
55 break;
56#endif
57 default:
58 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
59 return 0;
60 }
61
62 /* Step through the peer address in 16-bit portions for speed */
63 for (i = 0; i < size; i += sizeof(*p), p++)
64 hash_key += *p;
65
66 _leave(" 0x%lx", hash_key);
67 return hash_key;
68}
69
70/*
71 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
72 * or greater than.
73 *
74 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
75 * buckets and mid-bucket insertion, so we don't make full use of this
76 * information at this point.
77 */
78static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
79 struct rxrpc_local *local,
80 const struct sockaddr_rxrpc *srx,
81 unsigned long hash_key)
82{
83 long diff;
84
85 diff = ((peer->hash_key - hash_key) ?:
86 ((unsigned long)peer->local - (unsigned long)local) ?:
87 (peer->srx.transport_type - srx->transport_type) ?:
88 (peer->srx.transport_len - srx->transport_len) ?:
89 (peer->srx.transport.family - srx->transport.family));
90 if (diff != 0)
91 return diff;
92
93 switch (srx->transport.family) {
94 case AF_INET:
95 return ((u16 __force)peer->srx.transport.sin.sin_port -
96 (u16 __force)srx->transport.sin.sin_port) ?:
97 memcmp(p: &peer->srx.transport.sin.sin_addr,
98 q: &srx->transport.sin.sin_addr,
99 size: sizeof(struct in_addr));
100#ifdef CONFIG_AF_RXRPC_IPV6
101 case AF_INET6:
102 return ((u16 __force)peer->srx.transport.sin6.sin6_port -
103 (u16 __force)srx->transport.sin6.sin6_port) ?:
104 memcmp(p: &peer->srx.transport.sin6.sin6_addr,
105 q: &srx->transport.sin6.sin6_addr,
106 size: sizeof(struct in6_addr));
107#endif
108 default:
109 BUG();
110 }
111}
112
113/*
114 * Look up a remote transport endpoint for the specified address using RCU.
115 */
116static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
117 struct rxrpc_local *local,
118 const struct sockaddr_rxrpc *srx,
119 unsigned long hash_key)
120{
121 struct rxrpc_peer *peer;
122 struct rxrpc_net *rxnet = local->rxnet;
123
124 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
125 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
126 refcount_read(r: &peer->ref) > 0)
127 return peer;
128 }
129
130 return NULL;
131}
132
133/*
134 * Look up a remote transport endpoint for the specified address using RCU.
135 */
136struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
137 const struct sockaddr_rxrpc *srx)
138{
139 struct rxrpc_peer *peer;
140 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
141
142 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
143 if (peer)
144 _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
145 return peer;
146}
147
148/*
149 * assess the MTU size for the network interface through which this peer is
150 * reached
151 */
152static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
153 struct rxrpc_peer *peer)
154{
155 struct net *net = local->net;
156 struct dst_entry *dst;
157 struct rtable *rt;
158 struct flowi fl;
159 struct flowi4 *fl4 = &fl.u.ip4;
160#ifdef CONFIG_AF_RXRPC_IPV6
161 struct flowi6 *fl6 = &fl.u.ip6;
162#endif
163
164 peer->if_mtu = 1500;
165
166 memset(&fl, 0, sizeof(fl));
167 switch (peer->srx.transport.family) {
168 case AF_INET:
169 rt = ip_route_output_ports(
170 net, fl4, NULL,
171 daddr: peer->srx.transport.sin.sin_addr.s_addr, saddr: 0,
172 htons(7000), htons(7001), IPPROTO_UDP, tos: 0, oif: 0);
173 if (IS_ERR(ptr: rt)) {
174 _leave(" [route err %ld]", PTR_ERR(rt));
175 return;
176 }
177 dst = &rt->dst;
178 break;
179
180#ifdef CONFIG_AF_RXRPC_IPV6
181 case AF_INET6:
182 fl6->flowi6_iif = LOOPBACK_IFINDEX;
183 fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
184 fl6->flowi6_proto = IPPROTO_UDP;
185 memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
186 sizeof(struct in6_addr));
187 fl6->fl6_dport = htons(7001);
188 fl6->fl6_sport = htons(7000);
189 dst = ip6_route_output(net, NULL, fl6);
190 if (dst->error) {
191 _leave(" [route err %d]", dst->error);
192 return;
193 }
194 break;
195#endif
196
197 default:
198 BUG();
199 }
200
201 peer->if_mtu = dst_mtu(dst);
202 dst_release(dst);
203
204 _leave(" [if_mtu %u]", peer->if_mtu);
205}
206
207/*
208 * Allocate a peer.
209 */
210struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
211 enum rxrpc_peer_trace why)
212{
213 struct rxrpc_peer *peer;
214
215 _enter("");
216
217 peer = kzalloc(size: sizeof(struct rxrpc_peer), flags: gfp);
218 if (peer) {
219 refcount_set(r: &peer->ref, n: 1);
220 peer->local = rxrpc_get_local(local, rxrpc_local_get_peer);
221 INIT_HLIST_HEAD(&peer->error_targets);
222 peer->service_conns = RB_ROOT;
223 seqlock_init(&peer->service_conn_lock);
224 spin_lock_init(&peer->lock);
225 spin_lock_init(&peer->rtt_input_lock);
226 peer->debug_id = atomic_inc_return(v: &rxrpc_debug_id);
227
228 rxrpc_peer_init_rtt(peer);
229
230 peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
231 trace_rxrpc_peer(peer_debug_id: peer->debug_id, ref: 1, why);
232 }
233
234 _leave(" = %p", peer);
235 return peer;
236}
237
238/*
239 * Initialise peer record.
240 */
241static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
242 unsigned long hash_key)
243{
244 peer->hash_key = hash_key;
245 rxrpc_assess_MTU_size(local, peer);
246 peer->mtu = peer->if_mtu;
247 peer->rtt_last_req = ktime_get_real();
248
249 switch (peer->srx.transport.family) {
250 case AF_INET:
251 peer->hdrsize = sizeof(struct iphdr);
252 break;
253#ifdef CONFIG_AF_RXRPC_IPV6
254 case AF_INET6:
255 peer->hdrsize = sizeof(struct ipv6hdr);
256 break;
257#endif
258 default:
259 BUG();
260 }
261
262 switch (peer->srx.transport_type) {
263 case SOCK_DGRAM:
264 peer->hdrsize += sizeof(struct udphdr);
265 break;
266 default:
267 BUG();
268 }
269
270 peer->hdrsize += sizeof(struct rxrpc_wire_header);
271 peer->maxdata = peer->mtu - peer->hdrsize;
272}
273
274/*
275 * Set up a new peer.
276 */
277static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
278 struct sockaddr_rxrpc *srx,
279 unsigned long hash_key,
280 gfp_t gfp)
281{
282 struct rxrpc_peer *peer;
283
284 _enter("");
285
286 peer = rxrpc_alloc_peer(local, gfp, why: rxrpc_peer_new_client);
287 if (peer) {
288 memcpy(&peer->srx, srx, sizeof(*srx));
289 rxrpc_init_peer(local, peer, hash_key);
290 }
291
292 _leave(" = %p", peer);
293 return peer;
294}
295
296static void rxrpc_free_peer(struct rxrpc_peer *peer)
297{
298 trace_rxrpc_peer(peer_debug_id: peer->debug_id, ref: 0, why: rxrpc_peer_free);
299 rxrpc_put_local(peer->local, rxrpc_local_put_peer);
300 kfree_rcu(peer, rcu);
301}
302
303/*
304 * Set up a new incoming peer. There shouldn't be any other matching peers
305 * since we've already done a search in the list from the non-reentrant context
306 * (the data_ready handler) that is the only place we can add new peers.
307 */
308void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
309{
310 struct rxrpc_net *rxnet = local->rxnet;
311 unsigned long hash_key;
312
313 hash_key = rxrpc_peer_hash_key(local, srx: &peer->srx);
314 rxrpc_init_peer(local, peer, hash_key);
315
316 spin_lock(lock: &rxnet->peer_hash_lock);
317 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
318 list_add_tail(new: &peer->keepalive_link, head: &rxnet->peer_keepalive_new);
319 spin_unlock(lock: &rxnet->peer_hash_lock);
320}
321
322/*
323 * obtain a remote transport endpoint for the specified address
324 */
325struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
326 struct sockaddr_rxrpc *srx, gfp_t gfp)
327{
328 struct rxrpc_peer *peer, *candidate;
329 struct rxrpc_net *rxnet = local->rxnet;
330 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
331
332 _enter("{%pISp}", &srx->transport);
333
334 /* search the peer list first */
335 rcu_read_lock();
336 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
337 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
338 peer = NULL;
339 rcu_read_unlock();
340
341 if (!peer) {
342 /* The peer is not yet present in hash - create a candidate
343 * for a new record and then redo the search.
344 */
345 candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
346 if (!candidate) {
347 _leave(" = NULL [nomem]");
348 return NULL;
349 }
350
351 spin_lock(lock: &rxnet->peer_hash_lock);
352
353 /* Need to check that we aren't racing with someone else */
354 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
355 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
356 peer = NULL;
357 if (!peer) {
358 hash_add_rcu(rxnet->peer_hash,
359 &candidate->hash_link, hash_key);
360 list_add_tail(new: &candidate->keepalive_link,
361 head: &rxnet->peer_keepalive_new);
362 }
363
364 spin_unlock(lock: &rxnet->peer_hash_lock);
365
366 if (peer)
367 rxrpc_free_peer(peer: candidate);
368 else
369 peer = candidate;
370 }
371
372 _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
373 return peer;
374}
375
376/*
377 * Get a ref on a peer record.
378 */
379struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
380{
381 int r;
382
383 __refcount_inc(r: &peer->ref, oldp: &r);
384 trace_rxrpc_peer(peer_debug_id: peer->debug_id, ref: r + 1, why);
385 return peer;
386}
387
388/*
389 * Get a ref on a peer record unless its usage has already reached 0.
390 */
391struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer,
392 enum rxrpc_peer_trace why)
393{
394 int r;
395
396 if (peer) {
397 if (__refcount_inc_not_zero(r: &peer->ref, oldp: &r))
398 trace_rxrpc_peer(peer_debug_id: peer->debug_id, ref: r + 1, why);
399 else
400 peer = NULL;
401 }
402 return peer;
403}
404
405/*
406 * Discard a peer record.
407 */
408static void __rxrpc_put_peer(struct rxrpc_peer *peer)
409{
410 struct rxrpc_net *rxnet = peer->local->rxnet;
411
412 ASSERT(hlist_empty(&peer->error_targets));
413
414 spin_lock(lock: &rxnet->peer_hash_lock);
415 hash_del_rcu(node: &peer->hash_link);
416 list_del_init(entry: &peer->keepalive_link);
417 spin_unlock(lock: &rxnet->peer_hash_lock);
418
419 rxrpc_free_peer(peer);
420}
421
422/*
423 * Drop a ref on a peer record.
424 */
425void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
426{
427 unsigned int debug_id;
428 bool dead;
429 int r;
430
431 if (peer) {
432 debug_id = peer->debug_id;
433 dead = __refcount_dec_and_test(r: &peer->ref, oldp: &r);
434 trace_rxrpc_peer(peer_debug_id: debug_id, ref: r - 1, why);
435 if (dead)
436 __rxrpc_put_peer(peer);
437 }
438}
439
440/*
441 * Make sure all peer records have been discarded.
442 */
443void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
444{
445 struct rxrpc_peer *peer;
446 int i;
447
448 for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
449 if (hlist_empty(h: &rxnet->peer_hash[i]))
450 continue;
451
452 hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
453 pr_err("Leaked peer %u {%u} %pISp\n",
454 peer->debug_id,
455 refcount_read(&peer->ref),
456 &peer->srx.transport);
457 }
458 }
459}
460
461/**
462 * rxrpc_kernel_get_call_peer - Get the peer address of a call
463 * @sock: The socket on which the call is in progress.
464 * @call: The call to query
465 *
466 * Get a record for the remote peer in a call.
467 */
468struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call)
469{
470 return call->peer;
471}
472EXPORT_SYMBOL(rxrpc_kernel_get_call_peer);
473
474/**
475 * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
476 * @peer: The peer to query
477 *
478 * Get the call's peer smoothed RTT in uS or UINT_MAX if we have no samples.
479 */
480unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer)
481{
482 return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX;
483}
484EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
485
486/**
487 * rxrpc_kernel_remote_srx - Get the address of a peer
488 * @peer: The peer to query
489 *
490 * Get a pointer to the address from a peer record. The caller is responsible
491 * for making sure that the address is not deallocated.
492 */
493const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer)
494{
495 return peer ? &peer->srx : &rxrpc_null_addr;
496}
497EXPORT_SYMBOL(rxrpc_kernel_remote_srx);
498
499/**
500 * rxrpc_kernel_remote_addr - Get the peer transport address of a call
501 * @peer: The peer to query
502 *
503 * Get a pointer to the transport address from a peer record. The caller is
504 * responsible for making sure that the address is not deallocated.
505 */
506const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer)
507{
508 return (const struct sockaddr *)
509 (peer ? &peer->srx.transport : &rxrpc_null_addr.transport);
510}
511EXPORT_SYMBOL(rxrpc_kernel_remote_addr);
512

source code of linux/net/rxrpc/peer_object.c