1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Generic INET transport hashtables |
8 | * |
9 | * Authors: Lotsa people, from code originally in tcp |
10 | */ |
11 | |
12 | #include <linux/module.h> |
13 | #include <linux/random.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/wait.h> |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/memblock.h> |
19 | |
20 | #include <net/addrconf.h> |
21 | #include <net/inet_connection_sock.h> |
22 | #include <net/inet_hashtables.h> |
23 | #if IS_ENABLED(CONFIG_IPV6) |
24 | #include <net/inet6_hashtables.h> |
25 | #endif |
26 | #include <net/secure_seq.h> |
27 | #include <net/hotdata.h> |
28 | #include <net/ip.h> |
29 | #include <net/tcp.h> |
30 | #include <net/sock_reuseport.h> |
31 | |
32 | u32 inet_ehashfn(const struct net *net, const __be32 laddr, |
33 | const __u16 lport, const __be32 faddr, |
34 | const __be16 fport) |
35 | { |
36 | net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); |
37 | |
38 | return __inet_ehashfn(laddr, lport, faddr, fport, |
39 | inet_ehash_secret + net_hash_mix(net)); |
40 | } |
41 | EXPORT_SYMBOL_GPL(inet_ehashfn); |
42 | |
43 | /* This function handles inet_sock, but also timewait and request sockets |
44 | * for IPv4/IPv6. |
45 | */ |
46 | static u32 sk_ehashfn(const struct sock *sk) |
47 | { |
48 | #if IS_ENABLED(CONFIG_IPV6) |
49 | if (sk->sk_family == AF_INET6 && |
50 | !ipv6_addr_v4mapped(a: &sk->sk_v6_daddr)) |
51 | return inet6_ehashfn(sock_net(sk), |
52 | &sk->sk_v6_rcv_saddr, sk->sk_num, |
53 | &sk->sk_v6_daddr, sk->sk_dport); |
54 | #endif |
55 | return inet_ehashfn(sock_net(sk), |
56 | sk->sk_rcv_saddr, sk->sk_num, |
57 | sk->sk_daddr, sk->sk_dport); |
58 | } |
59 | |
60 | /* |
61 | * Allocate and initialize a new local port bind bucket. |
62 | * The bindhash mutex for snum's hash chain must be held here. |
63 | */ |
64 | struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, |
65 | struct net *net, |
66 | struct inet_bind_hashbucket *head, |
67 | const unsigned short snum, |
68 | int l3mdev) |
69 | { |
70 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
71 | |
72 | if (tb) { |
73 | write_pnet(pnet: &tb->ib_net, net); |
74 | tb->l3mdev = l3mdev; |
75 | tb->port = snum; |
76 | tb->fastreuse = 0; |
77 | tb->fastreuseport = 0; |
78 | INIT_HLIST_HEAD(&tb->bhash2); |
79 | hlist_add_head(n: &tb->node, h: &head->chain); |
80 | } |
81 | return tb; |
82 | } |
83 | |
84 | /* |
85 | * Caller must hold hashbucket lock for this tb with local BH disabled |
86 | */ |
87 | void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) |
88 | { |
89 | if (hlist_empty(h: &tb->bhash2)) { |
90 | __hlist_del(n: &tb->node); |
91 | kmem_cache_free(s: cachep, objp: tb); |
92 | } |
93 | } |
94 | |
95 | bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, |
96 | unsigned short port, int l3mdev) |
97 | { |
98 | return net_eq(net1: ib_net(ib: tb), net2: net) && tb->port == port && |
99 | tb->l3mdev == l3mdev; |
100 | } |
101 | |
102 | static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2, |
103 | struct net *net, |
104 | struct inet_bind_hashbucket *head, |
105 | struct inet_bind_bucket *tb, |
106 | const struct sock *sk) |
107 | { |
108 | write_pnet(pnet: &tb2->ib_net, net); |
109 | tb2->l3mdev = tb->l3mdev; |
110 | tb2->port = tb->port; |
111 | #if IS_ENABLED(CONFIG_IPV6) |
112 | BUILD_BUG_ON(USHRT_MAX < (IPV6_ADDR_ANY | IPV6_ADDR_MAPPED)); |
113 | if (sk->sk_family == AF_INET6) { |
114 | tb2->addr_type = ipv6_addr_type(addr: &sk->sk_v6_rcv_saddr); |
115 | tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr; |
116 | } else { |
117 | tb2->addr_type = IPV6_ADDR_MAPPED; |
118 | ipv6_addr_set_v4mapped(addr: sk->sk_rcv_saddr, v4mapped: &tb2->v6_rcv_saddr); |
119 | } |
120 | #else |
121 | tb2->rcv_saddr = sk->sk_rcv_saddr; |
122 | #endif |
123 | INIT_HLIST_HEAD(&tb2->owners); |
124 | hlist_add_head(n: &tb2->node, h: &head->chain); |
125 | hlist_add_head(n: &tb2->bhash_node, h: &tb->bhash2); |
126 | } |
127 | |
128 | struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep, |
129 | struct net *net, |
130 | struct inet_bind_hashbucket *head, |
131 | struct inet_bind_bucket *tb, |
132 | const struct sock *sk) |
133 | { |
134 | struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC); |
135 | |
136 | if (tb2) |
137 | inet_bind2_bucket_init(tb2, net, head, tb, sk); |
138 | |
139 | return tb2; |
140 | } |
141 | |
142 | /* Caller must hold hashbucket lock for this tb with local BH disabled */ |
143 | void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) |
144 | { |
145 | if (hlist_empty(h: &tb->owners)) { |
146 | __hlist_del(n: &tb->node); |
147 | __hlist_del(n: &tb->bhash_node); |
148 | kmem_cache_free(s: cachep, objp: tb); |
149 | } |
150 | } |
151 | |
152 | static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, |
153 | const struct sock *sk) |
154 | { |
155 | #if IS_ENABLED(CONFIG_IPV6) |
156 | if (sk->sk_family == AF_INET6) |
157 | return ipv6_addr_equal(a1: &tb2->v6_rcv_saddr, a2: &sk->sk_v6_rcv_saddr); |
158 | |
159 | if (tb2->addr_type != IPV6_ADDR_MAPPED) |
160 | return false; |
161 | #endif |
162 | return tb2->rcv_saddr == sk->sk_rcv_saddr; |
163 | } |
164 | |
165 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
166 | struct inet_bind2_bucket *tb2, unsigned short port) |
167 | { |
168 | inet_sk(sk)->inet_num = port; |
169 | inet_csk(sk)->icsk_bind_hash = tb; |
170 | inet_csk(sk)->icsk_bind2_hash = tb2; |
171 | sk_add_bind_node(sk, list: &tb2->owners); |
172 | } |
173 | |
174 | /* |
175 | * Get rid of any references to a local port held by the given sock. |
176 | */ |
177 | static void __inet_put_port(struct sock *sk) |
178 | { |
179 | struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); |
180 | struct inet_bind_hashbucket *head, *head2; |
181 | struct net *net = sock_net(sk); |
182 | struct inet_bind_bucket *tb; |
183 | int bhash; |
184 | |
185 | bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, bhash_size: hashinfo->bhash_size); |
186 | head = &hashinfo->bhash[bhash]; |
187 | head2 = inet_bhashfn_portaddr(hinfo: hashinfo, sk, net, inet_sk(sk)->inet_num); |
188 | |
189 | spin_lock(lock: &head->lock); |
190 | tb = inet_csk(sk)->icsk_bind_hash; |
191 | inet_csk(sk)->icsk_bind_hash = NULL; |
192 | inet_sk(sk)->inet_num = 0; |
193 | |
194 | spin_lock(lock: &head2->lock); |
195 | if (inet_csk(sk)->icsk_bind2_hash) { |
196 | struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; |
197 | |
198 | __sk_del_bind_node(sk); |
199 | inet_csk(sk)->icsk_bind2_hash = NULL; |
200 | inet_bind2_bucket_destroy(cachep: hashinfo->bind2_bucket_cachep, tb: tb2); |
201 | } |
202 | spin_unlock(lock: &head2->lock); |
203 | |
204 | inet_bind_bucket_destroy(cachep: hashinfo->bind_bucket_cachep, tb); |
205 | spin_unlock(lock: &head->lock); |
206 | } |
207 | |
208 | void inet_put_port(struct sock *sk) |
209 | { |
210 | local_bh_disable(); |
211 | __inet_put_port(sk); |
212 | local_bh_enable(); |
213 | } |
214 | EXPORT_SYMBOL(inet_put_port); |
215 | |
216 | int __inet_inherit_port(const struct sock *sk, struct sock *child) |
217 | { |
218 | struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk); |
219 | unsigned short port = inet_sk(child)->inet_num; |
220 | struct inet_bind_hashbucket *head, *head2; |
221 | bool created_inet_bind_bucket = false; |
222 | struct net *net = sock_net(sk); |
223 | bool update_fastreuse = false; |
224 | struct inet_bind2_bucket *tb2; |
225 | struct inet_bind_bucket *tb; |
226 | int bhash, l3mdev; |
227 | |
228 | bhash = inet_bhashfn(net, lport: port, bhash_size: table->bhash_size); |
229 | head = &table->bhash[bhash]; |
230 | head2 = inet_bhashfn_portaddr(hinfo: table, sk: child, net, port); |
231 | |
232 | spin_lock(lock: &head->lock); |
233 | spin_lock(lock: &head2->lock); |
234 | tb = inet_csk(sk)->icsk_bind_hash; |
235 | tb2 = inet_csk(sk)->icsk_bind2_hash; |
236 | if (unlikely(!tb || !tb2)) { |
237 | spin_unlock(lock: &head2->lock); |
238 | spin_unlock(lock: &head->lock); |
239 | return -ENOENT; |
240 | } |
241 | if (tb->port != port) { |
242 | l3mdev = inet_sk_bound_l3mdev(sk); |
243 | |
244 | /* NOTE: using tproxy and redirecting skbs to a proxy |
245 | * on a different listener port breaks the assumption |
246 | * that the listener socket's icsk_bind_hash is the same |
247 | * as that of the child socket. We have to look up or |
248 | * create a new bind bucket for the child here. */ |
249 | inet_bind_bucket_for_each(tb, &head->chain) { |
250 | if (inet_bind_bucket_match(tb, net, port, l3mdev)) |
251 | break; |
252 | } |
253 | if (!tb) { |
254 | tb = inet_bind_bucket_create(cachep: table->bind_bucket_cachep, |
255 | net, head, snum: port, l3mdev); |
256 | if (!tb) { |
257 | spin_unlock(lock: &head2->lock); |
258 | spin_unlock(lock: &head->lock); |
259 | return -ENOMEM; |
260 | } |
261 | created_inet_bind_bucket = true; |
262 | } |
263 | update_fastreuse = true; |
264 | |
265 | goto bhash2_find; |
266 | } else if (!inet_bind2_bucket_addr_match(tb2, sk: child)) { |
267 | l3mdev = inet_sk_bound_l3mdev(sk); |
268 | |
269 | bhash2_find: |
270 | tb2 = inet_bind2_bucket_find(head: head2, net, port, l3mdev, sk: child); |
271 | if (!tb2) { |
272 | tb2 = inet_bind2_bucket_create(cachep: table->bind2_bucket_cachep, |
273 | net, head: head2, tb, sk: child); |
274 | if (!tb2) |
275 | goto error; |
276 | } |
277 | } |
278 | if (update_fastreuse) |
279 | inet_csk_update_fastreuse(tb, sk: child); |
280 | inet_bind_hash(sk: child, tb, tb2, port); |
281 | spin_unlock(lock: &head2->lock); |
282 | spin_unlock(lock: &head->lock); |
283 | |
284 | return 0; |
285 | |
286 | error: |
287 | if (created_inet_bind_bucket) |
288 | inet_bind_bucket_destroy(cachep: table->bind_bucket_cachep, tb); |
289 | spin_unlock(lock: &head2->lock); |
290 | spin_unlock(lock: &head->lock); |
291 | return -ENOMEM; |
292 | } |
293 | EXPORT_SYMBOL_GPL(__inet_inherit_port); |
294 | |
295 | static struct inet_listen_hashbucket * |
296 | inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) |
297 | { |
298 | u32 hash; |
299 | |
300 | #if IS_ENABLED(CONFIG_IPV6) |
301 | if (sk->sk_family == AF_INET6) |
302 | hash = ipv6_portaddr_hash(net: sock_net(sk), |
303 | addr6: &sk->sk_v6_rcv_saddr, |
304 | inet_sk(sk)->inet_num); |
305 | else |
306 | #endif |
307 | hash = ipv4_portaddr_hash(net: sock_net(sk), |
308 | inet_sk(sk)->inet_rcv_saddr, |
309 | inet_sk(sk)->inet_num); |
310 | return inet_lhash2_bucket(h, hash); |
311 | } |
312 | |
313 | static inline int compute_score(struct sock *sk, struct net *net, |
314 | const unsigned short hnum, const __be32 daddr, |
315 | const int dif, const int sdif) |
316 | { |
317 | int score = -1; |
318 | |
319 | if (net_eq(net1: sock_net(sk), net2: net) && sk->sk_num == hnum && |
320 | !ipv6_only_sock(sk)) { |
321 | if (sk->sk_rcv_saddr != daddr) |
322 | return -1; |
323 | |
324 | if (!inet_sk_bound_dev_eq(net, bound_dev_if: sk->sk_bound_dev_if, dif, sdif)) |
325 | return -1; |
326 | score = sk->sk_bound_dev_if ? 2 : 1; |
327 | |
328 | if (sk->sk_family == PF_INET) |
329 | score++; |
330 | if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) |
331 | score++; |
332 | } |
333 | return score; |
334 | } |
335 | |
336 | /** |
337 | * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary. |
338 | * @net: network namespace. |
339 | * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP. |
340 | * @skb: context for a potential SK_REUSEPORT program. |
341 | * @doff: header offset. |
342 | * @saddr: source address. |
343 | * @sport: source port. |
344 | * @daddr: destination address. |
345 | * @hnum: destination port in host byte order. |
346 | * @ehashfn: hash function used to generate the fallback hash. |
347 | * |
348 | * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to |
349 | * the selected sock or an error. |
350 | */ |
351 | struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk, |
352 | struct sk_buff *skb, int doff, |
353 | __be32 saddr, __be16 sport, |
354 | __be32 daddr, unsigned short hnum, |
355 | inet_ehashfn_t *ehashfn) |
356 | { |
357 | struct sock *reuse_sk = NULL; |
358 | u32 phash; |
359 | |
360 | if (sk->sk_reuseport) { |
361 | phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn, |
362 | net, daddr, hnum, saddr, sport); |
363 | reuse_sk = reuseport_select_sock(sk, hash: phash, skb, hdr_len: doff); |
364 | } |
365 | return reuse_sk; |
366 | } |
367 | EXPORT_SYMBOL_GPL(inet_lookup_reuseport); |
368 | |
369 | /* |
370 | * Here are some nice properties to exploit here. The BSD API |
371 | * does not allow a listening sock to specify the remote port nor the |
372 | * remote address for the connection. So always assume those are both |
373 | * wildcarded during the search since they can never be otherwise. |
374 | */ |
375 | |
376 | /* called with rcu_read_lock() : No refcount taken on the socket */ |
377 | static struct sock *inet_lhash2_lookup(struct net *net, |
378 | struct inet_listen_hashbucket *ilb2, |
379 | struct sk_buff *skb, int doff, |
380 | const __be32 saddr, __be16 sport, |
381 | const __be32 daddr, const unsigned short hnum, |
382 | const int dif, const int sdif) |
383 | { |
384 | struct sock *sk, *result = NULL; |
385 | struct hlist_nulls_node *node; |
386 | int score, hiscore = 0; |
387 | |
388 | sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { |
389 | score = compute_score(sk, net, hnum, daddr, dif, sdif); |
390 | if (score > hiscore) { |
391 | result = inet_lookup_reuseport(net, sk, skb, doff, |
392 | saddr, sport, daddr, hnum, inet_ehashfn); |
393 | if (result) |
394 | return result; |
395 | |
396 | result = sk; |
397 | hiscore = score; |
398 | } |
399 | } |
400 | |
401 | return result; |
402 | } |
403 | |
404 | struct sock *inet_lookup_run_sk_lookup(struct net *net, |
405 | int protocol, |
406 | struct sk_buff *skb, int doff, |
407 | __be32 saddr, __be16 sport, |
408 | __be32 daddr, u16 hnum, const int dif, |
409 | inet_ehashfn_t *ehashfn) |
410 | { |
411 | struct sock *sk, *reuse_sk; |
412 | bool no_reuseport; |
413 | |
414 | no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport, |
415 | daddr, dport: hnum, ifindex: dif, psk: &sk); |
416 | if (no_reuseport || IS_ERR_OR_NULL(ptr: sk)) |
417 | return sk; |
418 | |
419 | reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, |
420 | ehashfn); |
421 | if (reuse_sk) |
422 | sk = reuse_sk; |
423 | return sk; |
424 | } |
425 | |
426 | struct sock *__inet_lookup_listener(struct net *net, |
427 | struct inet_hashinfo *hashinfo, |
428 | struct sk_buff *skb, int doff, |
429 | const __be32 saddr, __be16 sport, |
430 | const __be32 daddr, const unsigned short hnum, |
431 | const int dif, const int sdif) |
432 | { |
433 | struct inet_listen_hashbucket *ilb2; |
434 | struct sock *result = NULL; |
435 | unsigned int hash2; |
436 | |
437 | /* Lookup redirect from BPF */ |
438 | if (static_branch_unlikely(&bpf_sk_lookup_enabled) && |
439 | hashinfo == net->ipv4.tcp_death_row.hashinfo) { |
440 | result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, |
441 | saddr, sport, daddr, hnum, dif, |
442 | ehashfn: inet_ehashfn); |
443 | if (result) |
444 | goto done; |
445 | } |
446 | |
447 | hash2 = ipv4_portaddr_hash(net, saddr: daddr, port: hnum); |
448 | ilb2 = inet_lhash2_bucket(h: hashinfo, hash: hash2); |
449 | |
450 | result = inet_lhash2_lookup(net, ilb2, skb, doff, |
451 | saddr, sport, daddr, hnum, |
452 | dif, sdif); |
453 | if (result) |
454 | goto done; |
455 | |
456 | /* Lookup lhash2 with INADDR_ANY */ |
457 | hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), port: hnum); |
458 | ilb2 = inet_lhash2_bucket(h: hashinfo, hash: hash2); |
459 | |
460 | result = inet_lhash2_lookup(net, ilb2, skb, doff, |
461 | saddr, sport, htonl(INADDR_ANY), hnum, |
462 | dif, sdif); |
463 | done: |
464 | if (IS_ERR(ptr: result)) |
465 | return NULL; |
466 | return result; |
467 | } |
468 | EXPORT_SYMBOL_GPL(__inet_lookup_listener); |
469 | |
470 | /* All sockets share common refcount, but have different destructors */ |
471 | void sock_gen_put(struct sock *sk) |
472 | { |
473 | if (!refcount_dec_and_test(r: &sk->sk_refcnt)) |
474 | return; |
475 | |
476 | if (sk->sk_state == TCP_TIME_WAIT) |
477 | inet_twsk_free(tw: inet_twsk(sk)); |
478 | else if (sk->sk_state == TCP_NEW_SYN_RECV) |
479 | reqsk_free(req: inet_reqsk(sk)); |
480 | else |
481 | sk_free(sk); |
482 | } |
483 | EXPORT_SYMBOL_GPL(sock_gen_put); |
484 | |
485 | void sock_edemux(struct sk_buff *skb) |
486 | { |
487 | sock_gen_put(skb->sk); |
488 | } |
489 | EXPORT_SYMBOL(sock_edemux); |
490 | |
491 | struct sock *__inet_lookup_established(struct net *net, |
492 | struct inet_hashinfo *hashinfo, |
493 | const __be32 saddr, const __be16 sport, |
494 | const __be32 daddr, const u16 hnum, |
495 | const int dif, const int sdif) |
496 | { |
497 | INET_ADDR_COOKIE(acookie, saddr, daddr); |
498 | const __portpair ports = INET_COMBINED_PORTS(sport, hnum); |
499 | struct sock *sk; |
500 | const struct hlist_nulls_node *node; |
501 | /* Optimize here for direct hit, only listening connections can |
502 | * have wildcards anyways. |
503 | */ |
504 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); |
505 | unsigned int slot = hash & hashinfo->ehash_mask; |
506 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; |
507 | |
508 | begin: |
509 | sk_nulls_for_each_rcu(sk, node, &head->chain) { |
510 | if (sk->sk_hash != hash) |
511 | continue; |
512 | if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) { |
513 | if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) |
514 | goto out; |
515 | if (unlikely(!inet_match(net, sk, acookie, |
516 | ports, dif, sdif))) { |
517 | sock_gen_put(sk); |
518 | goto begin; |
519 | } |
520 | goto found; |
521 | } |
522 | } |
523 | /* |
524 | * if the nulls value we got at the end of this lookup is |
525 | * not the expected one, we must restart lookup. |
526 | * We probably met an item that was moved to another chain. |
527 | */ |
528 | if (get_nulls_value(ptr: node) != slot) |
529 | goto begin; |
530 | out: |
531 | sk = NULL; |
532 | found: |
533 | return sk; |
534 | } |
535 | EXPORT_SYMBOL_GPL(__inet_lookup_established); |
536 | |
537 | /* called with local bh disabled */ |
538 | static int __inet_check_established(struct inet_timewait_death_row *death_row, |
539 | struct sock *sk, __u16 lport, |
540 | struct inet_timewait_sock **twp) |
541 | { |
542 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
543 | struct inet_sock *inet = inet_sk(sk); |
544 | __be32 daddr = inet->inet_rcv_saddr; |
545 | __be32 saddr = inet->inet_daddr; |
546 | int dif = sk->sk_bound_dev_if; |
547 | struct net *net = sock_net(sk); |
548 | int sdif = l3mdev_master_ifindex_by_index(net, ifindex: dif); |
549 | INET_ADDR_COOKIE(acookie, saddr, daddr); |
550 | const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); |
551 | unsigned int hash = inet_ehashfn(net, daddr, lport, |
552 | saddr, inet->inet_dport); |
553 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo: hinfo, hash); |
554 | spinlock_t *lock = inet_ehash_lockp(hashinfo: hinfo, hash); |
555 | struct sock *sk2; |
556 | const struct hlist_nulls_node *node; |
557 | struct inet_timewait_sock *tw = NULL; |
558 | |
559 | spin_lock(lock); |
560 | |
561 | sk_nulls_for_each(sk2, node, &head->chain) { |
562 | if (sk2->sk_hash != hash) |
563 | continue; |
564 | |
565 | if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) { |
566 | if (sk2->sk_state == TCP_TIME_WAIT) { |
567 | tw = inet_twsk(sk: sk2); |
568 | if (twsk_unique(sk, sktw: sk2, twp)) |
569 | break; |
570 | } |
571 | goto not_unique; |
572 | } |
573 | } |
574 | |
575 | /* Must record num and sport now. Otherwise we will see |
576 | * in hash table socket with a funny identity. |
577 | */ |
578 | inet->inet_num = lport; |
579 | inet->inet_sport = htons(lport); |
580 | sk->sk_hash = hash; |
581 | WARN_ON(!sk_unhashed(sk)); |
582 | __sk_nulls_add_node_rcu(sk, list: &head->chain); |
583 | if (tw) { |
584 | sk_nulls_del_node_init_rcu(sk: (struct sock *)tw); |
585 | __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); |
586 | } |
587 | spin_unlock(lock); |
588 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: 1); |
589 | |
590 | if (twp) { |
591 | *twp = tw; |
592 | } else if (tw) { |
593 | /* Silly. Should hash-dance instead... */ |
594 | inet_twsk_deschedule_put(tw); |
595 | } |
596 | return 0; |
597 | |
598 | not_unique: |
599 | spin_unlock(lock); |
600 | return -EADDRNOTAVAIL; |
601 | } |
602 | |
603 | static u64 inet_sk_port_offset(const struct sock *sk) |
604 | { |
605 | const struct inet_sock *inet = inet_sk(sk); |
606 | |
607 | return secure_ipv4_port_ephemeral(saddr: inet->inet_rcv_saddr, |
608 | daddr: inet->inet_daddr, |
609 | dport: inet->inet_dport); |
610 | } |
611 | |
612 | /* Searches for an exsiting socket in the ehash bucket list. |
613 | * Returns true if found, false otherwise. |
614 | */ |
615 | static bool inet_ehash_lookup_by_sk(struct sock *sk, |
616 | struct hlist_nulls_head *list) |
617 | { |
618 | const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num); |
619 | const int sdif = sk->sk_bound_dev_if; |
620 | const int dif = sk->sk_bound_dev_if; |
621 | const struct hlist_nulls_node *node; |
622 | struct net *net = sock_net(sk); |
623 | struct sock *esk; |
624 | |
625 | INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr); |
626 | |
627 | sk_nulls_for_each_rcu(esk, node, list) { |
628 | if (esk->sk_hash != sk->sk_hash) |
629 | continue; |
630 | if (sk->sk_family == AF_INET) { |
631 | if (unlikely(inet_match(net, esk, acookie, |
632 | ports, dif, sdif))) { |
633 | return true; |
634 | } |
635 | } |
636 | #if IS_ENABLED(CONFIG_IPV6) |
637 | else if (sk->sk_family == AF_INET6) { |
638 | if (unlikely(inet6_match(net, esk, |
639 | &sk->sk_v6_daddr, |
640 | &sk->sk_v6_rcv_saddr, |
641 | ports, dif, sdif))) { |
642 | return true; |
643 | } |
644 | } |
645 | #endif |
646 | } |
647 | return false; |
648 | } |
649 | |
650 | /* Insert a socket into ehash, and eventually remove another one |
651 | * (The another one can be a SYN_RECV or TIMEWAIT) |
652 | * If an existing socket already exists, socket sk is not inserted, |
653 | * and sets found_dup_sk parameter to true. |
654 | */ |
655 | bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) |
656 | { |
657 | struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); |
658 | struct inet_ehash_bucket *head; |
659 | struct hlist_nulls_head *list; |
660 | spinlock_t *lock; |
661 | bool ret = true; |
662 | |
663 | WARN_ON_ONCE(!sk_unhashed(sk)); |
664 | |
665 | sk->sk_hash = sk_ehashfn(sk); |
666 | head = inet_ehash_bucket(hashinfo, hash: sk->sk_hash); |
667 | list = &head->chain; |
668 | lock = inet_ehash_lockp(hashinfo, hash: sk->sk_hash); |
669 | |
670 | spin_lock(lock); |
671 | if (osk) { |
672 | WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); |
673 | ret = sk_nulls_del_node_init_rcu(sk: osk); |
674 | } else if (found_dup_sk) { |
675 | *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); |
676 | if (*found_dup_sk) |
677 | ret = false; |
678 | } |
679 | |
680 | if (ret) |
681 | __sk_nulls_add_node_rcu(sk, list); |
682 | |
683 | spin_unlock(lock); |
684 | |
685 | return ret; |
686 | } |
687 | |
688 | bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) |
689 | { |
690 | bool ok = inet_ehash_insert(sk, osk, found_dup_sk); |
691 | |
692 | if (ok) { |
693 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: 1); |
694 | } else { |
695 | this_cpu_inc(*sk->sk_prot->orphan_count); |
696 | inet_sk_set_state(sk, state: TCP_CLOSE); |
697 | sock_set_flag(sk, flag: SOCK_DEAD); |
698 | inet_csk_destroy_sock(sk); |
699 | } |
700 | return ok; |
701 | } |
702 | EXPORT_SYMBOL_GPL(inet_ehash_nolisten); |
703 | |
704 | static int inet_reuseport_add_sock(struct sock *sk, |
705 | struct inet_listen_hashbucket *ilb) |
706 | { |
707 | struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; |
708 | const struct hlist_nulls_node *node; |
709 | struct sock *sk2; |
710 | kuid_t uid = sock_i_uid(sk); |
711 | |
712 | sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { |
713 | if (sk2 != sk && |
714 | sk2->sk_family == sk->sk_family && |
715 | ipv6_only_sock(sk2) == ipv6_only_sock(sk) && |
716 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if && |
717 | inet_csk(sk: sk2)->icsk_bind_hash == tb && |
718 | sk2->sk_reuseport && uid_eq(left: uid, right: sock_i_uid(sk: sk2)) && |
719 | inet_rcv_saddr_equal(sk, sk2, match_wildcard: false)) |
720 | return reuseport_add_sock(sk, sk2, |
721 | bind_inany: inet_rcv_saddr_any(sk)); |
722 | } |
723 | |
724 | return reuseport_alloc(sk, bind_inany: inet_rcv_saddr_any(sk)); |
725 | } |
726 | |
727 | int __inet_hash(struct sock *sk, struct sock *osk) |
728 | { |
729 | struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); |
730 | struct inet_listen_hashbucket *ilb2; |
731 | int err = 0; |
732 | |
733 | if (sk->sk_state != TCP_LISTEN) { |
734 | local_bh_disable(); |
735 | inet_ehash_nolisten(sk, osk, NULL); |
736 | local_bh_enable(); |
737 | return 0; |
738 | } |
739 | WARN_ON(!sk_unhashed(sk)); |
740 | ilb2 = inet_lhash2_bucket_sk(h: hashinfo, sk); |
741 | |
742 | spin_lock(lock: &ilb2->lock); |
743 | if (sk->sk_reuseport) { |
744 | err = inet_reuseport_add_sock(sk, ilb: ilb2); |
745 | if (err) |
746 | goto unlock; |
747 | } |
748 | sock_set_flag(sk, flag: SOCK_RCU_FREE); |
749 | if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && |
750 | sk->sk_family == AF_INET6) |
751 | __sk_nulls_add_node_tail_rcu(sk, list: &ilb2->nulls_head); |
752 | else |
753 | __sk_nulls_add_node_rcu(sk, list: &ilb2->nulls_head); |
754 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: 1); |
755 | unlock: |
756 | spin_unlock(lock: &ilb2->lock); |
757 | |
758 | return err; |
759 | } |
760 | EXPORT_SYMBOL(__inet_hash); |
761 | |
762 | int inet_hash(struct sock *sk) |
763 | { |
764 | int err = 0; |
765 | |
766 | if (sk->sk_state != TCP_CLOSE) |
767 | err = __inet_hash(sk, NULL); |
768 | |
769 | return err; |
770 | } |
771 | EXPORT_SYMBOL_GPL(inet_hash); |
772 | |
773 | void inet_unhash(struct sock *sk) |
774 | { |
775 | struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); |
776 | |
777 | if (sk_unhashed(sk)) |
778 | return; |
779 | |
780 | if (sk->sk_state == TCP_LISTEN) { |
781 | struct inet_listen_hashbucket *ilb2; |
782 | |
783 | ilb2 = inet_lhash2_bucket_sk(h: hashinfo, sk); |
784 | /* Don't disable bottom halves while acquiring the lock to |
785 | * avoid circular locking dependency on PREEMPT_RT. |
786 | */ |
787 | spin_lock(lock: &ilb2->lock); |
788 | if (sk_unhashed(sk)) { |
789 | spin_unlock(lock: &ilb2->lock); |
790 | return; |
791 | } |
792 | |
793 | if (rcu_access_pointer(sk->sk_reuseport_cb)) |
794 | reuseport_stop_listen_sock(sk); |
795 | |
796 | __sk_nulls_del_node_init_rcu(sk); |
797 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: -1); |
798 | spin_unlock(lock: &ilb2->lock); |
799 | } else { |
800 | spinlock_t *lock = inet_ehash_lockp(hashinfo, hash: sk->sk_hash); |
801 | |
802 | spin_lock_bh(lock); |
803 | if (sk_unhashed(sk)) { |
804 | spin_unlock_bh(lock); |
805 | return; |
806 | } |
807 | __sk_nulls_del_node_init_rcu(sk); |
808 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: -1); |
809 | spin_unlock_bh(lock); |
810 | } |
811 | } |
812 | EXPORT_SYMBOL_GPL(inet_unhash); |
813 | |
814 | static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, |
815 | const struct net *net, unsigned short port, |
816 | int l3mdev, const struct sock *sk) |
817 | { |
818 | if (!net_eq(net1: ib2_net(ib: tb), net2: net) || tb->port != port || |
819 | tb->l3mdev != l3mdev) |
820 | return false; |
821 | |
822 | return inet_bind2_bucket_addr_match(tb2: tb, sk); |
823 | } |
824 | |
825 | bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, |
826 | unsigned short port, int l3mdev, const struct sock *sk) |
827 | { |
828 | if (!net_eq(net1: ib2_net(ib: tb), net2: net) || tb->port != port || |
829 | tb->l3mdev != l3mdev) |
830 | return false; |
831 | |
832 | #if IS_ENABLED(CONFIG_IPV6) |
833 | if (tb->addr_type == IPV6_ADDR_ANY) |
834 | return true; |
835 | |
836 | if (tb->addr_type != IPV6_ADDR_MAPPED) |
837 | return false; |
838 | |
839 | if (sk->sk_family == AF_INET6 && |
840 | !ipv6_addr_v4mapped(a: &sk->sk_v6_rcv_saddr)) |
841 | return false; |
842 | #endif |
843 | return tb->rcv_saddr == 0; |
844 | } |
845 | |
846 | /* The socket's bhash2 hashbucket spinlock must be held when this is called */ |
847 | struct inet_bind2_bucket * |
848 | inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net, |
849 | unsigned short port, int l3mdev, const struct sock *sk) |
850 | { |
851 | struct inet_bind2_bucket *bhash2 = NULL; |
852 | |
853 | inet_bind_bucket_for_each(bhash2, &head->chain) |
854 | if (inet_bind2_bucket_match(tb: bhash2, net, port, l3mdev, sk)) |
855 | break; |
856 | |
857 | return bhash2; |
858 | } |
859 | |
860 | struct inet_bind_hashbucket * |
861 | inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port) |
862 | { |
863 | struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); |
864 | u32 hash; |
865 | |
866 | #if IS_ENABLED(CONFIG_IPV6) |
867 | if (sk->sk_family == AF_INET6) |
868 | hash = ipv6_portaddr_hash(net, addr6: &in6addr_any, port); |
869 | else |
870 | #endif |
871 | hash = ipv4_portaddr_hash(net, saddr: 0, port); |
872 | |
873 | return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; |
874 | } |
875 | |
876 | static void inet_update_saddr(struct sock *sk, void *saddr, int family) |
877 | { |
878 | if (family == AF_INET) { |
879 | inet_sk(sk)->inet_saddr = *(__be32 *)saddr; |
880 | sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr); |
881 | } |
882 | #if IS_ENABLED(CONFIG_IPV6) |
883 | else { |
884 | sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr; |
885 | } |
886 | #endif |
887 | } |
888 | |
889 | static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset) |
890 | { |
891 | struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); |
892 | struct inet_bind_hashbucket *head, *head2; |
893 | struct inet_bind2_bucket *tb2, *new_tb2; |
894 | int l3mdev = inet_sk_bound_l3mdev(sk); |
895 | int port = inet_sk(sk)->inet_num; |
896 | struct net *net = sock_net(sk); |
897 | int bhash; |
898 | |
899 | if (!inet_csk(sk)->icsk_bind2_hash) { |
900 | /* Not bind()ed before. */ |
901 | if (reset) |
902 | inet_reset_saddr(sk); |
903 | else |
904 | inet_update_saddr(sk, saddr, family); |
905 | |
906 | return 0; |
907 | } |
908 | |
909 | /* Allocate a bind2 bucket ahead of time to avoid permanently putting |
910 | * the bhash2 table in an inconsistent state if a new tb2 bucket |
911 | * allocation fails. |
912 | */ |
913 | new_tb2 = kmem_cache_alloc(cachep: hinfo->bind2_bucket_cachep, GFP_ATOMIC); |
914 | if (!new_tb2) { |
915 | if (reset) { |
916 | /* The (INADDR_ANY, port) bucket might have already |
917 | * been freed, then we cannot fixup icsk_bind2_hash, |
918 | * so we give up and unlink sk from bhash/bhash2 not |
919 | * to leave inconsistency in bhash2. |
920 | */ |
921 | inet_put_port(sk); |
922 | inet_reset_saddr(sk); |
923 | } |
924 | |
925 | return -ENOMEM; |
926 | } |
927 | |
928 | bhash = inet_bhashfn(net, lport: port, bhash_size: hinfo->bhash_size); |
929 | head = &hinfo->bhash[bhash]; |
930 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); |
931 | |
932 | /* If we change saddr locklessly, another thread |
933 | * iterating over bhash might see corrupted address. |
934 | */ |
935 | spin_lock_bh(lock: &head->lock); |
936 | |
937 | spin_lock(lock: &head2->lock); |
938 | __sk_del_bind_node(sk); |
939 | inet_bind2_bucket_destroy(cachep: hinfo->bind2_bucket_cachep, tb: inet_csk(sk)->icsk_bind2_hash); |
940 | spin_unlock(lock: &head2->lock); |
941 | |
942 | if (reset) |
943 | inet_reset_saddr(sk); |
944 | else |
945 | inet_update_saddr(sk, saddr, family); |
946 | |
947 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); |
948 | |
949 | spin_lock(lock: &head2->lock); |
950 | tb2 = inet_bind2_bucket_find(head: head2, net, port, l3mdev, sk); |
951 | if (!tb2) { |
952 | tb2 = new_tb2; |
953 | inet_bind2_bucket_init(tb2, net, head: head2, tb: inet_csk(sk)->icsk_bind_hash, sk); |
954 | } |
955 | inet_csk(sk)->icsk_bind2_hash = tb2; |
956 | sk_add_bind_node(sk, list: &tb2->owners); |
957 | spin_unlock(lock: &head2->lock); |
958 | |
959 | spin_unlock_bh(lock: &head->lock); |
960 | |
961 | if (tb2 != new_tb2) |
962 | kmem_cache_free(s: hinfo->bind2_bucket_cachep, objp: new_tb2); |
963 | |
964 | return 0; |
965 | } |
966 | |
967 | int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family) |
968 | { |
969 | return __inet_bhash2_update_saddr(sk, saddr, family, reset: false); |
970 | } |
971 | EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr); |
972 | |
973 | void inet_bhash2_reset_saddr(struct sock *sk) |
974 | { |
975 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
976 | __inet_bhash2_update_saddr(sk, NULL, family: 0, reset: true); |
977 | } |
978 | EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr); |
979 | |
980 | /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm |
981 | * Note that we use 32bit integers (vs RFC 'short integers') |
982 | * because 2^16 is not a multiple of num_ephemeral and this |
983 | * property might be used by clever attacker. |
984 | * |
985 | * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though |
986 | * attacks were since demonstrated, thus we use 65536 by default instead |
987 | * to really give more isolation and privacy, at the expense of 256kB |
988 | * of kernel memory. |
989 | */ |
990 | #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) |
991 | static u32 *table_perturb; |
992 | |
993 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
994 | struct sock *sk, u64 port_offset, |
995 | int (*check_established)(struct inet_timewait_death_row *, |
996 | struct sock *, __u16, struct inet_timewait_sock **)) |
997 | { |
998 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
999 | struct inet_bind_hashbucket *head, *head2; |
1000 | struct inet_timewait_sock *tw = NULL; |
1001 | int port = inet_sk(sk)->inet_num; |
1002 | struct net *net = sock_net(sk); |
1003 | struct inet_bind2_bucket *tb2; |
1004 | struct inet_bind_bucket *tb; |
1005 | bool tb_created = false; |
1006 | u32 remaining, offset; |
1007 | int ret, i, low, high; |
1008 | bool local_ports; |
1009 | int step, l3mdev; |
1010 | u32 index; |
1011 | |
1012 | if (port) { |
1013 | local_bh_disable(); |
1014 | ret = check_established(death_row, sk, port, NULL); |
1015 | local_bh_enable(); |
1016 | return ret; |
1017 | } |
1018 | |
1019 | l3mdev = inet_sk_bound_l3mdev(sk); |
1020 | |
1021 | local_ports = inet_sk_get_local_port_range(sk, low: &low, high: &high); |
1022 | step = local_ports ? 1 : 2; |
1023 | |
1024 | high++; /* [32768, 60999] -> [32768, 61000[ */ |
1025 | remaining = high - low; |
1026 | if (!local_ports && remaining > 1) |
1027 | remaining &= ~1U; |
1028 | |
1029 | get_random_sleepable_once(table_perturb, |
1030 | INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); |
1031 | index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); |
1032 | |
1033 | offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); |
1034 | offset %= remaining; |
1035 | |
1036 | /* In first pass we try ports of @low parity. |
1037 | * inet_csk_get_port() does the opposite choice. |
1038 | */ |
1039 | if (!local_ports) |
1040 | offset &= ~1U; |
1041 | other_parity_scan: |
1042 | port = low + offset; |
1043 | for (i = 0; i < remaining; i += step, port += step) { |
1044 | if (unlikely(port >= high)) |
1045 | port -= remaining; |
1046 | if (inet_is_local_reserved_port(net, port)) |
1047 | continue; |
1048 | head = &hinfo->bhash[inet_bhashfn(net, lport: port, |
1049 | bhash_size: hinfo->bhash_size)]; |
1050 | spin_lock_bh(lock: &head->lock); |
1051 | |
1052 | /* Does not bother with rcv_saddr checks, because |
1053 | * the established check is already unique enough. |
1054 | */ |
1055 | inet_bind_bucket_for_each(tb, &head->chain) { |
1056 | if (inet_bind_bucket_match(tb, net, port, l3mdev)) { |
1057 | if (tb->fastreuse >= 0 || |
1058 | tb->fastreuseport >= 0) |
1059 | goto next_port; |
1060 | WARN_ON(hlist_empty(&tb->bhash2)); |
1061 | if (!check_established(death_row, sk, |
1062 | port, &tw)) |
1063 | goto ok; |
1064 | goto next_port; |
1065 | } |
1066 | } |
1067 | |
1068 | tb = inet_bind_bucket_create(cachep: hinfo->bind_bucket_cachep, |
1069 | net, head, snum: port, l3mdev); |
1070 | if (!tb) { |
1071 | spin_unlock_bh(lock: &head->lock); |
1072 | return -ENOMEM; |
1073 | } |
1074 | tb_created = true; |
1075 | tb->fastreuse = -1; |
1076 | tb->fastreuseport = -1; |
1077 | goto ok; |
1078 | next_port: |
1079 | spin_unlock_bh(lock: &head->lock); |
1080 | cond_resched(); |
1081 | } |
1082 | |
1083 | if (!local_ports) { |
1084 | offset++; |
1085 | if ((offset & 1) && remaining > 1) |
1086 | goto other_parity_scan; |
1087 | } |
1088 | return -EADDRNOTAVAIL; |
1089 | |
1090 | ok: |
1091 | /* Find the corresponding tb2 bucket since we need to |
1092 | * add the socket to the bhash2 table as well |
1093 | */ |
1094 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); |
1095 | spin_lock(lock: &head2->lock); |
1096 | |
1097 | tb2 = inet_bind2_bucket_find(head: head2, net, port, l3mdev, sk); |
1098 | if (!tb2) { |
1099 | tb2 = inet_bind2_bucket_create(cachep: hinfo->bind2_bucket_cachep, net, |
1100 | head: head2, tb, sk); |
1101 | if (!tb2) |
1102 | goto error; |
1103 | } |
1104 | |
1105 | /* Here we want to add a little bit of randomness to the next source |
1106 | * port that will be chosen. We use a max() with a random here so that |
1107 | * on low contention the randomness is maximal and on high contention |
1108 | * it may be inexistent. |
1109 | */ |
1110 | i = max_t(int, i, get_random_u32_below(8) * step); |
1111 | WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + step); |
1112 | |
1113 | /* Head lock still held and bh's disabled */ |
1114 | inet_bind_hash(sk, tb, tb2, port); |
1115 | |
1116 | if (sk_unhashed(sk)) { |
1117 | inet_sk(sk)->inet_sport = htons(port); |
1118 | inet_ehash_nolisten(sk, (struct sock *)tw, NULL); |
1119 | } |
1120 | if (tw) |
1121 | inet_twsk_bind_unhash(tw, hashinfo: hinfo); |
1122 | |
1123 | spin_unlock(lock: &head2->lock); |
1124 | spin_unlock(lock: &head->lock); |
1125 | |
1126 | if (tw) |
1127 | inet_twsk_deschedule_put(tw); |
1128 | local_bh_enable(); |
1129 | return 0; |
1130 | |
1131 | error: |
1132 | if (sk_hashed(sk)) { |
1133 | spinlock_t *lock = inet_ehash_lockp(hashinfo: hinfo, hash: sk->sk_hash); |
1134 | |
1135 | sock_prot_inuse_add(net, prot: sk->sk_prot, val: -1); |
1136 | |
1137 | spin_lock(lock); |
1138 | __sk_nulls_del_node_init_rcu(sk); |
1139 | spin_unlock(lock); |
1140 | |
1141 | sk->sk_hash = 0; |
1142 | inet_sk(sk)->inet_sport = 0; |
1143 | inet_sk(sk)->inet_num = 0; |
1144 | |
1145 | if (tw) |
1146 | inet_twsk_bind_unhash(tw, hashinfo: hinfo); |
1147 | } |
1148 | |
1149 | spin_unlock(lock: &head2->lock); |
1150 | if (tb_created) |
1151 | inet_bind_bucket_destroy(cachep: hinfo->bind_bucket_cachep, tb); |
1152 | spin_unlock(lock: &head->lock); |
1153 | |
1154 | if (tw) |
1155 | inet_twsk_deschedule_put(tw); |
1156 | |
1157 | local_bh_enable(); |
1158 | |
1159 | return -ENOMEM; |
1160 | } |
1161 | |
1162 | /* |
1163 | * Bind a port for a connect operation and hash it. |
1164 | */ |
1165 | int inet_hash_connect(struct inet_timewait_death_row *death_row, |
1166 | struct sock *sk) |
1167 | { |
1168 | u64 port_offset = 0; |
1169 | |
1170 | if (!inet_sk(sk)->inet_num) |
1171 | port_offset = inet_sk_port_offset(sk); |
1172 | return __inet_hash_connect(death_row, sk, port_offset, |
1173 | check_established: __inet_check_established); |
1174 | } |
1175 | EXPORT_SYMBOL_GPL(inet_hash_connect); |
1176 | |
1177 | static void init_hashinfo_lhash2(struct inet_hashinfo *h) |
1178 | { |
1179 | int i; |
1180 | |
1181 | for (i = 0; i <= h->lhash2_mask; i++) { |
1182 | spin_lock_init(&h->lhash2[i].lock); |
1183 | INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head, |
1184 | i + LISTENING_NULLS_BASE); |
1185 | } |
1186 | } |
1187 | |
1188 | void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, |
1189 | unsigned long numentries, int scale, |
1190 | unsigned long low_limit, |
1191 | unsigned long high_limit) |
1192 | { |
1193 | h->lhash2 = alloc_large_system_hash(tablename: name, |
1194 | bucketsize: sizeof(*h->lhash2), |
1195 | numentries, |
1196 | scale, |
1197 | flags: 0, |
1198 | NULL, |
1199 | hash_mask: &h->lhash2_mask, |
1200 | low_limit, |
1201 | high_limit); |
1202 | init_hashinfo_lhash2(h); |
1203 | |
1204 | /* this one is used for source ports of outgoing connections */ |
1205 | table_perturb = alloc_large_system_hash(tablename: "Table-perturb" , |
1206 | bucketsize: sizeof(*table_perturb), |
1207 | INET_TABLE_PERTURB_SIZE, |
1208 | scale: 0, flags: 0, NULL, NULL, |
1209 | INET_TABLE_PERTURB_SIZE, |
1210 | INET_TABLE_PERTURB_SIZE); |
1211 | } |
1212 | |
1213 | int inet_hashinfo2_init_mod(struct inet_hashinfo *h) |
1214 | { |
1215 | h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, size: sizeof(*h->lhash2), GFP_KERNEL); |
1216 | if (!h->lhash2) |
1217 | return -ENOMEM; |
1218 | |
1219 | h->lhash2_mask = INET_LHTABLE_SIZE - 1; |
1220 | /* INET_LHTABLE_SIZE must be a power of 2 */ |
1221 | BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask); |
1222 | |
1223 | init_hashinfo_lhash2(h); |
1224 | return 0; |
1225 | } |
1226 | EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod); |
1227 | |
1228 | int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) |
1229 | { |
1230 | unsigned int locksz = sizeof(spinlock_t); |
1231 | unsigned int i, nblocks = 1; |
1232 | |
1233 | if (locksz != 0) { |
1234 | /* allocate 2 cache lines or at least one spinlock per cpu */ |
1235 | nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); |
1236 | nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); |
1237 | |
1238 | /* no more locks than number of hash buckets */ |
1239 | nblocks = min(nblocks, hashinfo->ehash_mask + 1); |
1240 | |
1241 | hashinfo->ehash_locks = kvmalloc_array(n: nblocks, size: locksz, GFP_KERNEL); |
1242 | if (!hashinfo->ehash_locks) |
1243 | return -ENOMEM; |
1244 | |
1245 | for (i = 0; i < nblocks; i++) |
1246 | spin_lock_init(&hashinfo->ehash_locks[i]); |
1247 | } |
1248 | hashinfo->ehash_locks_mask = nblocks - 1; |
1249 | return 0; |
1250 | } |
1251 | EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); |
1252 | |
1253 | struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo, |
1254 | unsigned int ehash_entries) |
1255 | { |
1256 | struct inet_hashinfo *new_hashinfo; |
1257 | int i; |
1258 | |
1259 | new_hashinfo = kmemdup(p: hashinfo, size: sizeof(*hashinfo), GFP_KERNEL); |
1260 | if (!new_hashinfo) |
1261 | goto err; |
1262 | |
1263 | new_hashinfo->ehash = vmalloc_huge(size: ehash_entries * sizeof(struct inet_ehash_bucket), |
1264 | GFP_KERNEL_ACCOUNT); |
1265 | if (!new_hashinfo->ehash) |
1266 | goto free_hashinfo; |
1267 | |
1268 | new_hashinfo->ehash_mask = ehash_entries - 1; |
1269 | |
1270 | if (inet_ehash_locks_alloc(new_hashinfo)) |
1271 | goto free_ehash; |
1272 | |
1273 | for (i = 0; i < ehash_entries; i++) |
1274 | INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i); |
1275 | |
1276 | new_hashinfo->pernet = true; |
1277 | |
1278 | return new_hashinfo; |
1279 | |
1280 | free_ehash: |
1281 | vfree(addr: new_hashinfo->ehash); |
1282 | free_hashinfo: |
1283 | kfree(objp: new_hashinfo); |
1284 | err: |
1285 | return NULL; |
1286 | } |
1287 | EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc); |
1288 | |
1289 | void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo) |
1290 | { |
1291 | if (!hashinfo->pernet) |
1292 | return; |
1293 | |
1294 | inet_ehash_locks_free(hashinfo); |
1295 | vfree(addr: hashinfo->ehash); |
1296 | kfree(objp: hashinfo); |
1297 | } |
1298 | EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free); |
1299 | |