1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * The User Datagram Protocol (UDP). |
8 | * |
9 | * Authors: Ross Biro |
10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
11 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
12 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
13 | * Hirokazu Takahashi, <taka@valinux.co.jp> |
14 | * |
15 | * Fixes: |
16 | * Alan Cox : verify_area() calls |
17 | * Alan Cox : stopped close while in use off icmp |
18 | * messages. Not a fix but a botch that |
19 | * for udp at least is 'valid'. |
20 | * Alan Cox : Fixed icmp handling properly |
21 | * Alan Cox : Correct error for oversized datagrams |
22 | * Alan Cox : Tidied select() semantics. |
23 | * Alan Cox : udp_err() fixed properly, also now |
24 | * select and read wake correctly on errors |
25 | * Alan Cox : udp_send verify_area moved to avoid mem leak |
26 | * Alan Cox : UDP can count its memory |
27 | * Alan Cox : send to an unknown connection causes |
28 | * an ECONNREFUSED off the icmp, but |
29 | * does NOT close. |
30 | * Alan Cox : Switched to new sk_buff handlers. No more backlog! |
31 | * Alan Cox : Using generic datagram code. Even smaller and the PEEK |
32 | * bug no longer crashes it. |
33 | * Fred Van Kempen : Net2e support for sk->broadcast. |
34 | * Alan Cox : Uses skb_free_datagram |
35 | * Alan Cox : Added get/set sockopt support. |
36 | * Alan Cox : Broadcasting without option set returns EACCES. |
37 | * Alan Cox : No wakeup calls. Instead we now use the callbacks. |
38 | * Alan Cox : Use ip_tos and ip_ttl |
39 | * Alan Cox : SNMP Mibs |
40 | * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. |
41 | * Matt Dillon : UDP length checks. |
42 | * Alan Cox : Smarter af_inet used properly. |
43 | * Alan Cox : Use new kernel side addressing. |
44 | * Alan Cox : Incorrect return on truncated datagram receive. |
45 | * Arnt Gulbrandsen : New udp_send and stuff |
46 | * Alan Cox : Cache last socket |
47 | * Alan Cox : Route cache |
48 | * Jon Peatfield : Minor efficiency fix to sendto(). |
49 | * Mike Shaver : RFC1122 checks. |
50 | * Alan Cox : Nonblocking error fix. |
51 | * Willy Konynenberg : Transparent proxying support. |
52 | * Mike McLagan : Routing by source |
53 | * David S. Miller : New socket lookup architecture. |
54 | * Last socket cache retained as it |
55 | * does have a high hit rate. |
56 | * Olaf Kirch : Don't linearise iovec on sendmsg. |
57 | * Andi Kleen : Some cleanups, cache destination entry |
58 | * for connect. |
59 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. |
60 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), |
61 | * return ENOTCONN for unconnected sockets (POSIX) |
62 | * Janos Farkas : don't deliver multi/broadcasts to a different |
63 | * bound-to-device socket |
64 | * Hirokazu Takahashi : HW checksumming for outgoing UDP |
65 | * datagrams. |
66 | * Hirokazu Takahashi : sendfile() on UDP works now. |
67 | * Arnaldo C. Melo : convert /proc/net/udp to seq_file |
68 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which |
69 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind |
70 | * a single port at the same time. |
71 | * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support |
72 | * James Chapman : Add L2TP encapsulation type. |
73 | */ |
74 | |
75 | #define pr_fmt(fmt) "UDP: " fmt |
76 | |
77 | #include <linux/bpf-cgroup.h> |
78 | #include <linux/uaccess.h> |
79 | #include <asm/ioctls.h> |
80 | #include <linux/memblock.h> |
81 | #include <linux/highmem.h> |
82 | #include <linux/types.h> |
83 | #include <linux/fcntl.h> |
84 | #include <linux/module.h> |
85 | #include <linux/socket.h> |
86 | #include <linux/sockios.h> |
87 | #include <linux/igmp.h> |
88 | #include <linux/inetdevice.h> |
89 | #include <linux/in.h> |
90 | #include <linux/errno.h> |
91 | #include <linux/timer.h> |
92 | #include <linux/mm.h> |
93 | #include <linux/inet.h> |
94 | #include <linux/netdevice.h> |
95 | #include <linux/slab.h> |
96 | #include <linux/sock_diag.h> |
97 | #include <net/tcp_states.h> |
98 | #include <linux/skbuff.h> |
99 | #include <linux/proc_fs.h> |
100 | #include <linux/seq_file.h> |
101 | #include <net/net_namespace.h> |
102 | #include <net/icmp.h> |
103 | #include <net/inet_hashtables.h> |
104 | #include <net/ip.h> |
105 | #include <net/ip_tunnels.h> |
106 | #include <net/route.h> |
107 | #include <net/checksum.h> |
108 | #include <net/gso.h> |
109 | #include <net/xfrm.h> |
110 | #include <trace/events/udp.h> |
111 | #include <linux/static_key.h> |
112 | #include <linux/btf_ids.h> |
113 | #include <trace/events/skb.h> |
114 | #include <net/busy_poll.h> |
115 | #include "udp_impl.h" |
116 | #include <net/sock_reuseport.h> |
117 | #include <net/addrconf.h> |
118 | #include <net/udp_tunnel.h> |
119 | #include <net/gro.h> |
120 | #if IS_ENABLED(CONFIG_IPV6) |
121 | #include <net/ipv6_stubs.h> |
122 | #endif |
123 | #include <net/rps.h> |
124 | |
125 | struct udp_table udp_table __read_mostly; |
126 | |
127 | long sysctl_udp_mem[3] __read_mostly; |
128 | EXPORT_IPV6_MOD(sysctl_udp_mem); |
129 | |
130 | atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; |
131 | EXPORT_IPV6_MOD(udp_memory_allocated); |
132 | DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); |
133 | EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc); |
134 | |
135 | #define MAX_UDP_PORTS 65536 |
136 | #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN_PERNET) |
137 | |
138 | static struct udp_table *udp_get_table_prot(struct sock *sk) |
139 | { |
140 | return sk->sk_prot->h.udp_table ? : sock_net(sk)->ipv4.udp_table; |
141 | } |
142 | |
143 | static int udp_lib_lport_inuse(struct net *net, __u16 num, |
144 | const struct udp_hslot *hslot, |
145 | unsigned long *bitmap, |
146 | struct sock *sk, unsigned int log) |
147 | { |
148 | struct sock *sk2; |
149 | kuid_t uid = sock_i_uid(sk); |
150 | |
151 | sk_for_each(sk2, &hslot->head) { |
152 | if (net_eq(net1: sock_net(sk: sk2), net2: net) && |
153 | sk2 != sk && |
154 | (bitmap || udp_sk(sk2)->udp_port_hash == num) && |
155 | (!sk2->sk_reuse || !sk->sk_reuse) && |
156 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || |
157 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
158 | inet_rcv_saddr_equal(sk, sk2, match_wildcard: true)) { |
159 | if (sk2->sk_reuseport && sk->sk_reuseport && |
160 | !rcu_access_pointer(sk->sk_reuseport_cb) && |
161 | uid_eq(left: uid, right: sock_i_uid(sk: sk2))) { |
162 | if (!bitmap) |
163 | return 0; |
164 | } else { |
165 | if (!bitmap) |
166 | return 1; |
167 | __set_bit(udp_sk(sk2)->udp_port_hash >> log, |
168 | bitmap); |
169 | } |
170 | } |
171 | } |
172 | return 0; |
173 | } |
174 | |
175 | /* |
176 | * Note: we still hold spinlock of primary hash chain, so no other writer |
177 | * can insert/delete a socket with local_port == num |
178 | */ |
179 | static int udp_lib_lport_inuse2(struct net *net, __u16 num, |
180 | struct udp_hslot *hslot2, |
181 | struct sock *sk) |
182 | { |
183 | struct sock *sk2; |
184 | kuid_t uid = sock_i_uid(sk); |
185 | int res = 0; |
186 | |
187 | spin_lock(lock: &hslot2->lock); |
188 | udp_portaddr_for_each_entry(sk2, &hslot2->head) { |
189 | if (net_eq(net1: sock_net(sk: sk2), net2: net) && |
190 | sk2 != sk && |
191 | (udp_sk(sk2)->udp_port_hash == num) && |
192 | (!sk2->sk_reuse || !sk->sk_reuse) && |
193 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || |
194 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
195 | inet_rcv_saddr_equal(sk, sk2, match_wildcard: true)) { |
196 | if (sk2->sk_reuseport && sk->sk_reuseport && |
197 | !rcu_access_pointer(sk->sk_reuseport_cb) && |
198 | uid_eq(left: uid, right: sock_i_uid(sk: sk2))) { |
199 | res = 0; |
200 | } else { |
201 | res = 1; |
202 | } |
203 | break; |
204 | } |
205 | } |
206 | spin_unlock(lock: &hslot2->lock); |
207 | return res; |
208 | } |
209 | |
210 | static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) |
211 | { |
212 | struct net *net = sock_net(sk); |
213 | kuid_t uid = sock_i_uid(sk); |
214 | struct sock *sk2; |
215 | |
216 | sk_for_each(sk2, &hslot->head) { |
217 | if (net_eq(net1: sock_net(sk: sk2), net2: net) && |
218 | sk2 != sk && |
219 | sk2->sk_family == sk->sk_family && |
220 | ipv6_only_sock(sk2) == ipv6_only_sock(sk) && |
221 | (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && |
222 | (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
223 | sk2->sk_reuseport && uid_eq(left: uid, right: sock_i_uid(sk: sk2)) && |
224 | inet_rcv_saddr_equal(sk, sk2, match_wildcard: false)) { |
225 | return reuseport_add_sock(sk, sk2, |
226 | bind_inany: inet_rcv_saddr_any(sk)); |
227 | } |
228 | } |
229 | |
230 | return reuseport_alloc(sk, bind_inany: inet_rcv_saddr_any(sk)); |
231 | } |
232 | |
233 | /** |
234 | * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 |
235 | * |
236 | * @sk: socket struct in question |
237 | * @snum: port number to look up |
238 | * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, |
239 | * with NULL address |
240 | */ |
241 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
242 | unsigned int hash2_nulladdr) |
243 | { |
244 | struct udp_table *udptable = udp_get_table_prot(sk); |
245 | struct udp_hslot *hslot, *hslot2; |
246 | struct net *net = sock_net(sk); |
247 | int error = -EADDRINUSE; |
248 | |
249 | if (!snum) { |
250 | DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); |
251 | unsigned short first, last; |
252 | int low, high, remaining; |
253 | unsigned int rand; |
254 | |
255 | inet_sk_get_local_port_range(sk, low: &low, high: &high); |
256 | remaining = (high - low) + 1; |
257 | |
258 | rand = get_random_u32(); |
259 | first = reciprocal_scale(val: rand, ep_ro: remaining) + low; |
260 | /* |
261 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
262 | */ |
263 | rand = (rand | 1) * (udptable->mask + 1); |
264 | last = first + udptable->mask + 1; |
265 | do { |
266 | hslot = udp_hashslot(table: udptable, net, num: first); |
267 | bitmap_zero(dst: bitmap, PORTS_PER_CHAIN); |
268 | spin_lock_bh(lock: &hslot->lock); |
269 | udp_lib_lport_inuse(net, num: snum, hslot, bitmap, sk, |
270 | log: udptable->log); |
271 | |
272 | snum = first; |
273 | /* |
274 | * Iterate on all possible values of snum for this hash. |
275 | * Using steps of an odd multiple of UDP_HTABLE_SIZE |
276 | * give us randomization and full range coverage. |
277 | */ |
278 | do { |
279 | if (low <= snum && snum <= high && |
280 | !test_bit(snum >> udptable->log, bitmap) && |
281 | !inet_is_local_reserved_port(net, port: snum)) |
282 | goto found; |
283 | snum += rand; |
284 | } while (snum != first); |
285 | spin_unlock_bh(lock: &hslot->lock); |
286 | cond_resched(); |
287 | } while (++first != last); |
288 | goto fail; |
289 | } else { |
290 | hslot = udp_hashslot(table: udptable, net, num: snum); |
291 | spin_lock_bh(lock: &hslot->lock); |
292 | if (hslot->count > 10) { |
293 | int exist; |
294 | unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; |
295 | |
296 | slot2 &= udptable->mask; |
297 | hash2_nulladdr &= udptable->mask; |
298 | |
299 | hslot2 = udp_hashslot2(table: udptable, hash: slot2); |
300 | if (hslot->count < hslot2->count) |
301 | goto scan_primary_hash; |
302 | |
303 | exist = udp_lib_lport_inuse2(net, num: snum, hslot2, sk); |
304 | if (!exist && (hash2_nulladdr != slot2)) { |
305 | hslot2 = udp_hashslot2(table: udptable, hash: hash2_nulladdr); |
306 | exist = udp_lib_lport_inuse2(net, num: snum, hslot2, |
307 | sk); |
308 | } |
309 | if (exist) |
310 | goto fail_unlock; |
311 | else |
312 | goto found; |
313 | } |
314 | scan_primary_hash: |
315 | if (udp_lib_lport_inuse(net, num: snum, hslot, NULL, sk, log: 0)) |
316 | goto fail_unlock; |
317 | } |
318 | found: |
319 | inet_sk(sk)->inet_num = snum; |
320 | udp_sk(sk)->udp_port_hash = snum; |
321 | udp_sk(sk)->udp_portaddr_hash ^= snum; |
322 | if (sk_unhashed(sk)) { |
323 | if (sk->sk_reuseport && |
324 | udp_reuseport_add_sock(sk, hslot)) { |
325 | inet_sk(sk)->inet_num = 0; |
326 | udp_sk(sk)->udp_port_hash = 0; |
327 | udp_sk(sk)->udp_portaddr_hash ^= snum; |
328 | goto fail_unlock; |
329 | } |
330 | |
331 | sock_set_flag(sk, flag: SOCK_RCU_FREE); |
332 | |
333 | sk_add_node_rcu(sk, list: &hslot->head); |
334 | hslot->count++; |
335 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: 1); |
336 | |
337 | hslot2 = udp_hashslot2(table: udptable, udp_sk(sk)->udp_portaddr_hash); |
338 | spin_lock(lock: &hslot2->lock); |
339 | if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && |
340 | sk->sk_family == AF_INET6) |
341 | hlist_add_tail_rcu(n: &udp_sk(sk)->udp_portaddr_node, |
342 | h: &hslot2->head); |
343 | else |
344 | hlist_add_head_rcu(n: &udp_sk(sk)->udp_portaddr_node, |
345 | h: &hslot2->head); |
346 | hslot2->count++; |
347 | spin_unlock(lock: &hslot2->lock); |
348 | } |
349 | |
350 | error = 0; |
351 | fail_unlock: |
352 | spin_unlock_bh(lock: &hslot->lock); |
353 | fail: |
354 | return error; |
355 | } |
356 | EXPORT_IPV6_MOD(udp_lib_get_port); |
357 | |
358 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
359 | { |
360 | unsigned int hash2_nulladdr = |
361 | ipv4_portaddr_hash(net: sock_net(sk), htonl(INADDR_ANY), port: snum); |
362 | unsigned int hash2_partial = |
363 | ipv4_portaddr_hash(net: sock_net(sk), inet_sk(sk)->inet_rcv_saddr, port: 0); |
364 | |
365 | /* precompute partial secondary hash */ |
366 | udp_sk(sk)->udp_portaddr_hash = hash2_partial; |
367 | return udp_lib_get_port(sk, snum, hash2_nulladdr); |
368 | } |
369 | |
370 | static int compute_score(struct sock *sk, const struct net *net, |
371 | __be32 saddr, __be16 sport, |
372 | __be32 daddr, unsigned short hnum, |
373 | int dif, int sdif) |
374 | { |
375 | int score; |
376 | struct inet_sock *inet; |
377 | bool dev_match; |
378 | |
379 | if (!net_eq(net1: sock_net(sk), net2: net) || |
380 | udp_sk(sk)->udp_port_hash != hnum || |
381 | ipv6_only_sock(sk)) |
382 | return -1; |
383 | |
384 | if (sk->sk_rcv_saddr != daddr) |
385 | return -1; |
386 | |
387 | score = (sk->sk_family == PF_INET) ? 2 : 1; |
388 | |
389 | inet = inet_sk(sk); |
390 | if (inet->inet_daddr) { |
391 | if (inet->inet_daddr != saddr) |
392 | return -1; |
393 | score += 4; |
394 | } |
395 | |
396 | if (inet->inet_dport) { |
397 | if (inet->inet_dport != sport) |
398 | return -1; |
399 | score += 4; |
400 | } |
401 | |
402 | dev_match = udp_sk_bound_dev_eq(net, bound_dev_if: sk->sk_bound_dev_if, |
403 | dif, sdif); |
404 | if (!dev_match) |
405 | return -1; |
406 | if (sk->sk_bound_dev_if) |
407 | score += 4; |
408 | |
409 | if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) |
410 | score++; |
411 | return score; |
412 | } |
413 | |
414 | u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, |
415 | const __be32 faddr, const __be16 fport) |
416 | { |
417 | net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); |
418 | |
419 | return __inet_ehashfn(laddr, lport, faddr, fport, |
420 | udp_ehash_secret + net_hash_mix(net)); |
421 | } |
422 | EXPORT_IPV6_MOD(udp_ehashfn); |
423 | |
424 | /** |
425 | * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port) |
426 | * @net: Network namespace |
427 | * @saddr: Source address, network order |
428 | * @sport: Source port, network order |
429 | * @daddr: Destination address, network order |
430 | * @hnum: Destination port, host order |
431 | * @dif: Destination interface index |
432 | * @sdif: Destination bridge port index, if relevant |
433 | * @udptable: Set of UDP hash tables |
434 | * |
435 | * Simplified lookup to be used as fallback if no sockets are found due to a |
436 | * potential race between (receive) address change, and lookup happening before |
437 | * the rehash operation. This function ignores SO_REUSEPORT groups while scoring |
438 | * result sockets, because if we have one, we don't need the fallback at all. |
439 | * |
440 | * Called under rcu_read_lock(). |
441 | * |
442 | * Return: socket with highest matching score if any, NULL if none |
443 | */ |
444 | static struct sock *udp4_lib_lookup1(const struct net *net, |
445 | __be32 saddr, __be16 sport, |
446 | __be32 daddr, unsigned int hnum, |
447 | int dif, int sdif, |
448 | const struct udp_table *udptable) |
449 | { |
450 | unsigned int slot = udp_hashfn(net, num: hnum, mask: udptable->mask); |
451 | struct udp_hslot *hslot = &udptable->hash[slot]; |
452 | struct sock *sk, *result = NULL; |
453 | int score, badness = 0; |
454 | |
455 | sk_for_each_rcu(sk, &hslot->head) { |
456 | score = compute_score(sk, net, |
457 | saddr, sport, daddr, hnum, dif, sdif); |
458 | if (score > badness) { |
459 | result = sk; |
460 | badness = score; |
461 | } |
462 | } |
463 | |
464 | return result; |
465 | } |
466 | |
467 | /* called with rcu_read_lock() */ |
468 | static struct sock *udp4_lib_lookup2(const struct net *net, |
469 | __be32 saddr, __be16 sport, |
470 | __be32 daddr, unsigned int hnum, |
471 | int dif, int sdif, |
472 | struct udp_hslot *hslot2, |
473 | struct sk_buff *skb) |
474 | { |
475 | struct sock *sk, *result; |
476 | int score, badness; |
477 | bool need_rescore; |
478 | |
479 | result = NULL; |
480 | badness = 0; |
481 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { |
482 | need_rescore = false; |
483 | rescore: |
484 | score = compute_score(sk: need_rescore ? result : sk, net, saddr, |
485 | sport, daddr, hnum, dif, sdif); |
486 | if (score > badness) { |
487 | badness = score; |
488 | |
489 | if (need_rescore) |
490 | continue; |
491 | |
492 | if (sk->sk_state == TCP_ESTABLISHED) { |
493 | result = sk; |
494 | continue; |
495 | } |
496 | |
497 | result = inet_lookup_reuseport(net, sk, skb, doff: sizeof(struct udphdr), |
498 | saddr, sport, daddr, hnum, ehashfn: udp_ehashfn); |
499 | if (!result) { |
500 | result = sk; |
501 | continue; |
502 | } |
503 | |
504 | /* Fall back to scoring if group has connections */ |
505 | if (!reuseport_has_conns(sk)) |
506 | return result; |
507 | |
508 | /* Reuseport logic returned an error, keep original score. */ |
509 | if (IS_ERR(ptr: result)) |
510 | continue; |
511 | |
512 | /* compute_score is too long of a function to be |
513 | * inlined, and calling it again here yields |
514 | * measureable overhead for some |
515 | * workloads. Work around it by jumping |
516 | * backwards to rescore 'result'. |
517 | */ |
518 | need_rescore = true; |
519 | goto rescore; |
520 | } |
521 | } |
522 | return result; |
523 | } |
524 | |
525 | #if IS_ENABLED(CONFIG_BASE_SMALL) |
526 | static struct sock *udp4_lib_lookup4(const struct net *net, |
527 | __be32 saddr, __be16 sport, |
528 | __be32 daddr, unsigned int hnum, |
529 | int dif, int sdif, |
530 | struct udp_table *udptable) |
531 | { |
532 | return NULL; |
533 | } |
534 | |
535 | static void udp_rehash4(struct udp_table *udptable, struct sock *sk, |
536 | u16 newhash4) |
537 | { |
538 | } |
539 | |
540 | static void udp_unhash4(struct udp_table *udptable, struct sock *sk) |
541 | { |
542 | } |
543 | #else /* !CONFIG_BASE_SMALL */ |
544 | static struct sock *udp4_lib_lookup4(const struct net *net, |
545 | __be32 saddr, __be16 sport, |
546 | __be32 daddr, unsigned int hnum, |
547 | int dif, int sdif, |
548 | struct udp_table *udptable) |
549 | { |
550 | const __portpair ports = INET_COMBINED_PORTS(sport, hnum); |
551 | const struct hlist_nulls_node *node; |
552 | struct udp_hslot *hslot4; |
553 | unsigned int hash4, slot; |
554 | struct udp_sock *up; |
555 | struct sock *sk; |
556 | |
557 | hash4 = udp_ehashfn(net, daddr, hnum, saddr, sport); |
558 | slot = hash4 & udptable->mask; |
559 | hslot4 = &udptable->hash4[slot]; |
560 | INET_ADDR_COOKIE(acookie, saddr, daddr); |
561 | |
562 | begin: |
563 | /* SLAB_TYPESAFE_BY_RCU not used, so we don't need to touch sk_refcnt */ |
564 | udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) { |
565 | sk = (struct sock *)up; |
566 | if (inet_match(net, sk, acookie, ports, dif, sdif)) |
567 | return sk; |
568 | } |
569 | |
570 | /* if the nulls value we got at the end of this lookup is not the |
571 | * expected one, we must restart lookup. We probably met an item that |
572 | * was moved to another chain due to rehash. |
573 | */ |
574 | if (get_nulls_value(node) != slot) |
575 | goto begin; |
576 | |
577 | return NULL; |
578 | } |
579 | |
580 | /* udp_rehash4() only checks hslot4, and hash4_cnt is not processed. */ |
581 | static void udp_rehash4(struct udp_table *udptable, struct sock *sk, |
582 | u16 newhash4) |
583 | { |
584 | struct udp_hslot *hslot4, *nhslot4; |
585 | |
586 | hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); |
587 | nhslot4 = udp_hashslot4(udptable, newhash4); |
588 | udp_sk(sk)->udp_lrpa_hash = newhash4; |
589 | |
590 | if (hslot4 != nhslot4) { |
591 | spin_lock_bh(&hslot4->lock); |
592 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); |
593 | hslot4->count--; |
594 | spin_unlock_bh(&hslot4->lock); |
595 | |
596 | spin_lock_bh(&nhslot4->lock); |
597 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, |
598 | &nhslot4->nulls_head); |
599 | nhslot4->count++; |
600 | spin_unlock_bh(&nhslot4->lock); |
601 | } |
602 | } |
603 | |
604 | static void udp_unhash4(struct udp_table *udptable, struct sock *sk) |
605 | { |
606 | struct udp_hslot *hslot2, *hslot4; |
607 | |
608 | if (udp_hashed4(sk)) { |
609 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); |
610 | hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); |
611 | |
612 | spin_lock(&hslot4->lock); |
613 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); |
614 | hslot4->count--; |
615 | spin_unlock(&hslot4->lock); |
616 | |
617 | spin_lock(&hslot2->lock); |
618 | udp_hash4_dec(hslot2); |
619 | spin_unlock(&hslot2->lock); |
620 | } |
621 | } |
622 | |
623 | void udp_lib_hash4(struct sock *sk, u16 hash) |
624 | { |
625 | struct udp_hslot *hslot, *hslot2, *hslot4; |
626 | struct net *net = sock_net(sk); |
627 | struct udp_table *udptable; |
628 | |
629 | /* Connected udp socket can re-connect to another remote address, which |
630 | * will be handled by rehash. Thus no need to redo hash4 here. |
631 | */ |
632 | if (udp_hashed4(sk)) |
633 | return; |
634 | |
635 | udptable = net->ipv4.udp_table; |
636 | hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash); |
637 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); |
638 | hslot4 = udp_hashslot4(udptable, hash); |
639 | udp_sk(sk)->udp_lrpa_hash = hash; |
640 | |
641 | spin_lock_bh(&hslot->lock); |
642 | if (rcu_access_pointer(sk->sk_reuseport_cb)) |
643 | reuseport_detach_sock(sk); |
644 | |
645 | spin_lock(&hslot4->lock); |
646 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, |
647 | &hslot4->nulls_head); |
648 | hslot4->count++; |
649 | spin_unlock(&hslot4->lock); |
650 | |
651 | spin_lock(&hslot2->lock); |
652 | udp_hash4_inc(hslot2); |
653 | spin_unlock(&hslot2->lock); |
654 | |
655 | spin_unlock_bh(&hslot->lock); |
656 | } |
657 | EXPORT_IPV6_MOD(udp_lib_hash4); |
658 | |
659 | /* call with sock lock */ |
660 | void udp4_hash4(struct sock *sk) |
661 | { |
662 | struct net *net = sock_net(sk); |
663 | unsigned int hash; |
664 | |
665 | if (sk_unhashed(sk) || sk->sk_rcv_saddr == htonl(INADDR_ANY)) |
666 | return; |
667 | |
668 | hash = udp_ehashfn(net, sk->sk_rcv_saddr, sk->sk_num, |
669 | sk->sk_daddr, sk->sk_dport); |
670 | |
671 | udp_lib_hash4(sk, hash); |
672 | } |
673 | EXPORT_IPV6_MOD(udp4_hash4); |
674 | #endif /* CONFIG_BASE_SMALL */ |
675 | |
676 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try |
677 | * harder than this. -DaveM |
678 | */ |
679 | struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr, |
680 | __be16 sport, __be32 daddr, __be16 dport, int dif, |
681 | int sdif, struct udp_table *udptable, struct sk_buff *skb) |
682 | { |
683 | unsigned short hnum = ntohs(dport); |
684 | struct udp_hslot *hslot2; |
685 | struct sock *result, *sk; |
686 | unsigned int hash2; |
687 | |
688 | hash2 = ipv4_portaddr_hash(net, saddr: daddr, port: hnum); |
689 | hslot2 = udp_hashslot2(table: udptable, hash: hash2); |
690 | |
691 | if (udp_has_hash4(hslot2)) { |
692 | result = udp4_lib_lookup4(net, saddr, sport, daddr, hnum, |
693 | dif, sdif, udptable); |
694 | if (result) /* udp4_lib_lookup4 return sk or NULL */ |
695 | return result; |
696 | } |
697 | |
698 | /* Lookup connected or non-wildcard socket */ |
699 | result = udp4_lib_lookup2(net, saddr, sport, |
700 | daddr, hnum, dif, sdif, |
701 | hslot2, skb); |
702 | if (!IS_ERR_OR_NULL(ptr: result) && result->sk_state == TCP_ESTABLISHED) |
703 | goto done; |
704 | |
705 | /* Lookup redirect from BPF */ |
706 | if (static_branch_unlikely(&bpf_sk_lookup_enabled) && |
707 | udptable == net->ipv4.udp_table) { |
708 | sk = inet_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, doff: sizeof(struct udphdr), |
709 | saddr, sport, daddr, hnum, dif, |
710 | ehashfn: udp_ehashfn); |
711 | if (sk) { |
712 | result = sk; |
713 | goto done; |
714 | } |
715 | } |
716 | |
717 | /* Got non-wildcard socket or error on first lookup */ |
718 | if (result) |
719 | goto done; |
720 | |
721 | /* Lookup wildcard sockets */ |
722 | hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), port: hnum); |
723 | hslot2 = udp_hashslot2(table: udptable, hash: hash2); |
724 | |
725 | result = udp4_lib_lookup2(net, saddr, sport, |
726 | htonl(INADDR_ANY), hnum, dif, sdif, |
727 | hslot2, skb); |
728 | if (!IS_ERR_OR_NULL(ptr: result)) |
729 | goto done; |
730 | |
731 | /* Primary hash (destination port) lookup as fallback for this race: |
732 | * 1. __ip4_datagram_connect() sets sk_rcv_saddr |
733 | * 2. lookup (this function): new sk_rcv_saddr, hashes not updated yet |
734 | * 3. rehash operation updating _secondary and four-tuple_ hashes |
735 | * The primary hash doesn't need an update after 1., so, thanks to this |
736 | * further step, 1. and 3. don't need to be atomic against the lookup. |
737 | */ |
738 | result = udp4_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif, |
739 | udptable); |
740 | |
741 | done: |
742 | if (IS_ERR(ptr: result)) |
743 | return NULL; |
744 | return result; |
745 | } |
746 | EXPORT_SYMBOL_GPL(__udp4_lib_lookup); |
747 | |
748 | static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, |
749 | __be16 sport, __be16 dport, |
750 | struct udp_table *udptable) |
751 | { |
752 | const struct iphdr *iph = ip_hdr(skb); |
753 | |
754 | return __udp4_lib_lookup(dev_net(dev: skb->dev), iph->saddr, sport, |
755 | iph->daddr, dport, inet_iif(skb), |
756 | inet_sdif(skb), udptable, skb); |
757 | } |
758 | |
759 | struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, |
760 | __be16 sport, __be16 dport) |
761 | { |
762 | const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; |
763 | const struct iphdr *iph = (struct iphdr *)(skb->data + offset); |
764 | struct net *net = dev_net(dev: skb->dev); |
765 | int iif, sdif; |
766 | |
767 | inet_get_iif_sdif(skb, iif: &iif, sdif: &sdif); |
768 | |
769 | return __udp4_lib_lookup(net, iph->saddr, sport, |
770 | iph->daddr, dport, iif, |
771 | sdif, net->ipv4.udp_table, NULL); |
772 | } |
773 | |
774 | /* Must be called under rcu_read_lock(). |
775 | * Does increment socket refcount. |
776 | */ |
777 | #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4) |
778 | struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport, |
779 | __be32 daddr, __be16 dport, int dif) |
780 | { |
781 | struct sock *sk; |
782 | |
783 | sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, |
784 | dif, 0, net->ipv4.udp_table, NULL); |
785 | if (sk && !refcount_inc_not_zero(r: &sk->sk_refcnt)) |
786 | sk = NULL; |
787 | return sk; |
788 | } |
789 | EXPORT_SYMBOL_GPL(udp4_lib_lookup); |
790 | #endif |
791 | |
792 | static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk, |
793 | __be16 loc_port, __be32 loc_addr, |
794 | __be16 rmt_port, __be32 rmt_addr, |
795 | int dif, int sdif, unsigned short hnum) |
796 | { |
797 | const struct inet_sock *inet = inet_sk(sk); |
798 | |
799 | if (!net_eq(net1: sock_net(sk), net2: net) || |
800 | udp_sk(sk)->udp_port_hash != hnum || |
801 | (inet->inet_daddr && inet->inet_daddr != rmt_addr) || |
802 | (inet->inet_dport != rmt_port && inet->inet_dport) || |
803 | (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || |
804 | ipv6_only_sock(sk) || |
805 | !udp_sk_bound_dev_eq(net, bound_dev_if: sk->sk_bound_dev_if, dif, sdif)) |
806 | return false; |
807 | if (!ip_mc_sf_allow(sk, local: loc_addr, rmt: rmt_addr, dif, sdif)) |
808 | return false; |
809 | return true; |
810 | } |
811 | |
812 | DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); |
813 | EXPORT_IPV6_MOD(udp_encap_needed_key); |
814 | |
815 | #if IS_ENABLED(CONFIG_IPV6) |
816 | DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); |
817 | EXPORT_IPV6_MOD(udpv6_encap_needed_key); |
818 | #endif |
819 | |
820 | void udp_encap_enable(void) |
821 | { |
822 | static_branch_inc(&udp_encap_needed_key); |
823 | } |
824 | EXPORT_SYMBOL(udp_encap_enable); |
825 | |
826 | void udp_encap_disable(void) |
827 | { |
828 | static_branch_dec(&udp_encap_needed_key); |
829 | } |
830 | EXPORT_SYMBOL(udp_encap_disable); |
831 | |
832 | /* Handler for tunnels with arbitrary destination ports: no socket lookup, go |
833 | * through error handlers in encapsulations looking for a match. |
834 | */ |
835 | static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) |
836 | { |
837 | int i; |
838 | |
839 | for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { |
840 | int (*handler)(struct sk_buff *skb, u32 info); |
841 | const struct ip_tunnel_encap_ops *encap; |
842 | |
843 | encap = rcu_dereference(iptun_encaps[i]); |
844 | if (!encap) |
845 | continue; |
846 | handler = encap->err_handler; |
847 | if (handler && !handler(skb, info)) |
848 | return 0; |
849 | } |
850 | |
851 | return -ENOENT; |
852 | } |
853 | |
854 | /* Try to match ICMP errors to UDP tunnels by looking up a socket without |
855 | * reversing source and destination port: this will match tunnels that force the |
856 | * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that |
857 | * lwtunnels might actually break this assumption by being configured with |
858 | * different destination ports on endpoints, in this case we won't be able to |
859 | * trace ICMP messages back to them. |
860 | * |
861 | * If this doesn't match any socket, probe tunnels with arbitrary destination |
862 | * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port |
863 | * we've sent packets to won't necessarily match the local destination port. |
864 | * |
865 | * Then ask the tunnel implementation to match the error against a valid |
866 | * association. |
867 | * |
868 | * Return an error if we can't find a match, the socket if we need further |
869 | * processing, zero otherwise. |
870 | */ |
871 | static struct sock *__udp4_lib_err_encap(struct net *net, |
872 | const struct iphdr *iph, |
873 | struct udphdr *uh, |
874 | struct udp_table *udptable, |
875 | struct sock *sk, |
876 | struct sk_buff *skb, u32 info) |
877 | { |
878 | int (*lookup)(struct sock *sk, struct sk_buff *skb); |
879 | int network_offset, transport_offset; |
880 | struct udp_sock *up; |
881 | |
882 | network_offset = skb_network_offset(skb); |
883 | transport_offset = skb_transport_offset(skb); |
884 | |
885 | /* Network header needs to point to the outer IPv4 header inside ICMP */ |
886 | skb_reset_network_header(skb); |
887 | |
888 | /* Transport header needs to point to the UDP header */ |
889 | skb_set_transport_header(skb, offset: iph->ihl << 2); |
890 | |
891 | if (sk) { |
892 | up = udp_sk(sk); |
893 | |
894 | lookup = READ_ONCE(up->encap_err_lookup); |
895 | if (lookup && lookup(sk, skb)) |
896 | sk = NULL; |
897 | |
898 | goto out; |
899 | } |
900 | |
901 | sk = __udp4_lib_lookup(net, iph->daddr, uh->source, |
902 | iph->saddr, uh->dest, skb->dev->ifindex, 0, |
903 | udptable, NULL); |
904 | if (sk) { |
905 | up = udp_sk(sk); |
906 | |
907 | lookup = READ_ONCE(up->encap_err_lookup); |
908 | if (!lookup || lookup(sk, skb)) |
909 | sk = NULL; |
910 | } |
911 | |
912 | out: |
913 | if (!sk) |
914 | sk = ERR_PTR(error: __udp4_lib_err_encap_no_sk(skb, info)); |
915 | |
916 | skb_set_transport_header(skb, offset: transport_offset); |
917 | skb_set_network_header(skb, offset: network_offset); |
918 | |
919 | return sk; |
920 | } |
921 | |
922 | /* |
923 | * This routine is called by the ICMP module when it gets some |
924 | * sort of error condition. If err < 0 then the socket should |
925 | * be closed and the error returned to the user. If err > 0 |
926 | * it's just the icmp type << 8 | icmp code. |
927 | * Header points to the ip header of the error packet. We move |
928 | * on past this. Then (as it used to claim before adjustment) |
929 | * header points to the first 8 bytes of the udp header. We need |
930 | * to find the appropriate port. |
931 | */ |
932 | |
933 | int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) |
934 | { |
935 | struct inet_sock *inet; |
936 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
937 | struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); |
938 | const int type = icmp_hdr(skb)->type; |
939 | const int code = icmp_hdr(skb)->code; |
940 | bool tunnel = false; |
941 | struct sock *sk; |
942 | int harderr; |
943 | int err; |
944 | struct net *net = dev_net(dev: skb->dev); |
945 | |
946 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, |
947 | iph->saddr, uh->source, skb->dev->ifindex, |
948 | inet_sdif(skb), udptable, NULL); |
949 | |
950 | if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { |
951 | /* No socket for error: try tunnels before discarding */ |
952 | if (static_branch_unlikely(&udp_encap_needed_key)) { |
953 | sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, |
954 | info); |
955 | if (!sk) |
956 | return 0; |
957 | } else |
958 | sk = ERR_PTR(error: -ENOENT); |
959 | |
960 | if (IS_ERR(ptr: sk)) { |
961 | __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); |
962 | return PTR_ERR(ptr: sk); |
963 | } |
964 | |
965 | tunnel = true; |
966 | } |
967 | |
968 | err = 0; |
969 | harderr = 0; |
970 | inet = inet_sk(sk); |
971 | |
972 | switch (type) { |
973 | default: |
974 | case ICMP_TIME_EXCEEDED: |
975 | err = EHOSTUNREACH; |
976 | break; |
977 | case ICMP_SOURCE_QUENCH: |
978 | goto out; |
979 | case ICMP_PARAMETERPROB: |
980 | err = EPROTO; |
981 | harderr = 1; |
982 | break; |
983 | case ICMP_DEST_UNREACH: |
984 | if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ |
985 | ipv4_sk_update_pmtu(skb, sk, mtu: info); |
986 | if (READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT) { |
987 | err = EMSGSIZE; |
988 | harderr = 1; |
989 | break; |
990 | } |
991 | goto out; |
992 | } |
993 | err = EHOSTUNREACH; |
994 | if (code <= NR_ICMP_UNREACH) { |
995 | harderr = icmp_err_convert[code].fatal; |
996 | err = icmp_err_convert[code].errno; |
997 | } |
998 | break; |
999 | case ICMP_REDIRECT: |
1000 | ipv4_sk_redirect(skb, sk); |
1001 | goto out; |
1002 | } |
1003 | |
1004 | /* |
1005 | * RFC1122: OK. Passes ICMP errors back to application, as per |
1006 | * 4.1.3.3. |
1007 | */ |
1008 | if (tunnel) { |
1009 | /* ...not for tunnels though: we don't have a sending socket */ |
1010 | if (udp_sk(sk)->encap_err_rcv) |
1011 | udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, info, |
1012 | (u8 *)(uh+1)); |
1013 | goto out; |
1014 | } |
1015 | if (!inet_test_bit(RECVERR, sk)) { |
1016 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
1017 | goto out; |
1018 | } else |
1019 | ip_icmp_error(sk, skb, err, port: uh->dest, info, payload: (u8 *)(uh+1)); |
1020 | |
1021 | sk->sk_err = err; |
1022 | sk_error_report(sk); |
1023 | out: |
1024 | return 0; |
1025 | } |
1026 | |
1027 | int udp_err(struct sk_buff *skb, u32 info) |
1028 | { |
1029 | return __udp4_lib_err(skb, info, udptable: dev_net(dev: skb->dev)->ipv4.udp_table); |
1030 | } |
1031 | |
1032 | /* |
1033 | * Throw away all pending data and cancel the corking. Socket is locked. |
1034 | */ |
1035 | void udp_flush_pending_frames(struct sock *sk) |
1036 | { |
1037 | struct udp_sock *up = udp_sk(sk); |
1038 | |
1039 | if (up->pending) { |
1040 | up->len = 0; |
1041 | WRITE_ONCE(up->pending, 0); |
1042 | ip_flush_pending_frames(sk); |
1043 | } |
1044 | } |
1045 | EXPORT_IPV6_MOD(udp_flush_pending_frames); |
1046 | |
1047 | /** |
1048 | * udp4_hwcsum - handle outgoing HW checksumming |
1049 | * @skb: sk_buff containing the filled-in UDP header |
1050 | * (checksum field must be zeroed out) |
1051 | * @src: source IP address |
1052 | * @dst: destination IP address |
1053 | */ |
1054 | void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) |
1055 | { |
1056 | struct udphdr *uh = udp_hdr(skb); |
1057 | int offset = skb_transport_offset(skb); |
1058 | int len = skb->len - offset; |
1059 | int hlen = len; |
1060 | __wsum csum = 0; |
1061 | |
1062 | if (!skb_has_frag_list(skb)) { |
1063 | /* |
1064 | * Only one fragment on the socket. |
1065 | */ |
1066 | skb->csum_start = skb_transport_header(skb) - skb->head; |
1067 | skb->csum_offset = offsetof(struct udphdr, check); |
1068 | uh->check = ~csum_tcpudp_magic(saddr: src, daddr: dst, len, |
1069 | IPPROTO_UDP, sum: 0); |
1070 | } else { |
1071 | struct sk_buff *frags; |
1072 | |
1073 | /* |
1074 | * HW-checksum won't work as there are two or more |
1075 | * fragments on the socket so that all csums of sk_buffs |
1076 | * should be together |
1077 | */ |
1078 | skb_walk_frags(skb, frags) { |
1079 | csum = csum_add(csum, addend: frags->csum); |
1080 | hlen -= frags->len; |
1081 | } |
1082 | |
1083 | csum = skb_checksum(skb, offset, len: hlen, csum); |
1084 | skb->ip_summed = CHECKSUM_NONE; |
1085 | |
1086 | uh->check = csum_tcpudp_magic(saddr: src, daddr: dst, len, IPPROTO_UDP, sum: csum); |
1087 | if (uh->check == 0) |
1088 | uh->check = CSUM_MANGLED_0; |
1089 | } |
1090 | } |
1091 | EXPORT_SYMBOL_GPL(udp4_hwcsum); |
1092 | |
1093 | /* Function to set UDP checksum for an IPv4 UDP packet. This is intended |
1094 | * for the simple case like when setting the checksum for a UDP tunnel. |
1095 | */ |
1096 | void udp_set_csum(bool nocheck, struct sk_buff *skb, |
1097 | __be32 saddr, __be32 daddr, int len) |
1098 | { |
1099 | struct udphdr *uh = udp_hdr(skb); |
1100 | |
1101 | if (nocheck) { |
1102 | uh->check = 0; |
1103 | } else if (skb_is_gso(skb)) { |
1104 | uh->check = ~udp_v4_check(len, saddr, daddr, base: 0); |
1105 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1106 | uh->check = 0; |
1107 | uh->check = udp_v4_check(len, saddr, daddr, base: lco_csum(skb)); |
1108 | if (uh->check == 0) |
1109 | uh->check = CSUM_MANGLED_0; |
1110 | } else { |
1111 | skb->ip_summed = CHECKSUM_PARTIAL; |
1112 | skb->csum_start = skb_transport_header(skb) - skb->head; |
1113 | skb->csum_offset = offsetof(struct udphdr, check); |
1114 | uh->check = ~udp_v4_check(len, saddr, daddr, base: 0); |
1115 | } |
1116 | } |
1117 | EXPORT_SYMBOL(udp_set_csum); |
1118 | |
1119 | static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, |
1120 | struct inet_cork *cork) |
1121 | { |
1122 | struct sock *sk = skb->sk; |
1123 | struct inet_sock *inet = inet_sk(sk); |
1124 | struct udphdr *uh; |
1125 | int err; |
1126 | int is_udplite = IS_UDPLITE(sk); |
1127 | int offset = skb_transport_offset(skb); |
1128 | int len = skb->len - offset; |
1129 | int datalen = len - sizeof(*uh); |
1130 | __wsum csum = 0; |
1131 | |
1132 | /* |
1133 | * Create a UDP header |
1134 | */ |
1135 | uh = udp_hdr(skb); |
1136 | uh->source = inet->inet_sport; |
1137 | uh->dest = fl4->fl4_dport; |
1138 | uh->len = htons(len); |
1139 | uh->check = 0; |
1140 | |
1141 | if (cork->gso_size) { |
1142 | const int hlen = skb_network_header_len(skb) + |
1143 | sizeof(struct udphdr); |
1144 | |
1145 | if (hlen + min(datalen, cork->gso_size) > cork->fragsize) { |
1146 | kfree_skb(skb); |
1147 | return -EMSGSIZE; |
1148 | } |
1149 | if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { |
1150 | kfree_skb(skb); |
1151 | return -EINVAL; |
1152 | } |
1153 | if (sk->sk_no_check_tx) { |
1154 | kfree_skb(skb); |
1155 | return -EINVAL; |
1156 | } |
1157 | if (is_udplite || dst_xfrm(dst: skb_dst(skb))) { |
1158 | kfree_skb(skb); |
1159 | return -EIO; |
1160 | } |
1161 | |
1162 | if (datalen > cork->gso_size) { |
1163 | skb_shinfo(skb)->gso_size = cork->gso_size; |
1164 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; |
1165 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, |
1166 | cork->gso_size); |
1167 | |
1168 | /* Don't checksum the payload, skb will get segmented */ |
1169 | goto csum_partial; |
1170 | } |
1171 | } |
1172 | |
1173 | if (is_udplite) /* UDP-Lite */ |
1174 | csum = udplite_csum(skb); |
1175 | |
1176 | else if (sk->sk_no_check_tx) { /* UDP csum off */ |
1177 | |
1178 | skb->ip_summed = CHECKSUM_NONE; |
1179 | goto send; |
1180 | |
1181 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
1182 | csum_partial: |
1183 | |
1184 | udp4_hwcsum(skb, fl4->saddr, fl4->daddr); |
1185 | goto send; |
1186 | |
1187 | } else |
1188 | csum = udp_csum(skb); |
1189 | |
1190 | /* add protocol-dependent pseudo-header */ |
1191 | uh->check = csum_tcpudp_magic(saddr: fl4->saddr, daddr: fl4->daddr, len, |
1192 | proto: sk->sk_protocol, sum: csum); |
1193 | if (uh->check == 0) |
1194 | uh->check = CSUM_MANGLED_0; |
1195 | |
1196 | send: |
1197 | err = ip_send_skb(net: sock_net(sk), skb); |
1198 | if (err) { |
1199 | if (err == -ENOBUFS && |
1200 | !inet_test_bit(RECVERR, sk)) { |
1201 | UDP_INC_STATS(sock_net(sk), |
1202 | UDP_MIB_SNDBUFERRORS, is_udplite); |
1203 | err = 0; |
1204 | } |
1205 | } else |
1206 | UDP_INC_STATS(sock_net(sk), |
1207 | UDP_MIB_OUTDATAGRAMS, is_udplite); |
1208 | return err; |
1209 | } |
1210 | |
1211 | /* |
1212 | * Push out all pending data as one UDP datagram. Socket is locked. |
1213 | */ |
1214 | int udp_push_pending_frames(struct sock *sk) |
1215 | { |
1216 | struct udp_sock *up = udp_sk(sk); |
1217 | struct inet_sock *inet = inet_sk(sk); |
1218 | struct flowi4 *fl4 = &inet->cork.fl.u.ip4; |
1219 | struct sk_buff *skb; |
1220 | int err = 0; |
1221 | |
1222 | skb = ip_finish_skb(sk, fl4); |
1223 | if (!skb) |
1224 | goto out; |
1225 | |
1226 | err = udp_send_skb(skb, fl4, cork: &inet->cork.base); |
1227 | |
1228 | out: |
1229 | up->len = 0; |
1230 | WRITE_ONCE(up->pending, 0); |
1231 | return err; |
1232 | } |
1233 | EXPORT_IPV6_MOD(udp_push_pending_frames); |
1234 | |
1235 | static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) |
1236 | { |
1237 | switch (cmsg->cmsg_type) { |
1238 | case UDP_SEGMENT: |
1239 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16))) |
1240 | return -EINVAL; |
1241 | *gso_size = *(__u16 *)CMSG_DATA(cmsg); |
1242 | return 0; |
1243 | default: |
1244 | return -EINVAL; |
1245 | } |
1246 | } |
1247 | |
1248 | int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) |
1249 | { |
1250 | struct cmsghdr *cmsg; |
1251 | bool need_ip = false; |
1252 | int err; |
1253 | |
1254 | for_each_cmsghdr(cmsg, msg) { |
1255 | if (!CMSG_OK(msg, cmsg)) |
1256 | return -EINVAL; |
1257 | |
1258 | if (cmsg->cmsg_level != SOL_UDP) { |
1259 | need_ip = true; |
1260 | continue; |
1261 | } |
1262 | |
1263 | err = __udp_cmsg_send(cmsg, gso_size); |
1264 | if (err) |
1265 | return err; |
1266 | } |
1267 | |
1268 | return need_ip; |
1269 | } |
1270 | EXPORT_IPV6_MOD_GPL(udp_cmsg_send); |
1271 | |
1272 | int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
1273 | { |
1274 | struct inet_sock *inet = inet_sk(sk); |
1275 | struct udp_sock *up = udp_sk(sk); |
1276 | DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); |
1277 | struct flowi4 fl4_stack; |
1278 | struct flowi4 *fl4; |
1279 | int ulen = len; |
1280 | struct ipcm_cookie ipc; |
1281 | struct rtable *rt = NULL; |
1282 | int free = 0; |
1283 | int connected = 0; |
1284 | __be32 daddr, faddr, saddr; |
1285 | u8 scope; |
1286 | __be16 dport; |
1287 | int err, is_udplite = IS_UDPLITE(sk); |
1288 | int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; |
1289 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); |
1290 | struct sk_buff *skb; |
1291 | struct ip_options_data opt_copy; |
1292 | int uc_index; |
1293 | |
1294 | if (len > 0xFFFF) |
1295 | return -EMSGSIZE; |
1296 | |
1297 | /* |
1298 | * Check the flags. |
1299 | */ |
1300 | |
1301 | if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ |
1302 | return -EOPNOTSUPP; |
1303 | |
1304 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; |
1305 | |
1306 | fl4 = &inet->cork.fl.u.ip4; |
1307 | if (READ_ONCE(up->pending)) { |
1308 | /* |
1309 | * There are pending frames. |
1310 | * The socket lock must be held while it's corked. |
1311 | */ |
1312 | lock_sock(sk); |
1313 | if (likely(up->pending)) { |
1314 | if (unlikely(up->pending != AF_INET)) { |
1315 | release_sock(sk); |
1316 | return -EINVAL; |
1317 | } |
1318 | goto do_append_data; |
1319 | } |
1320 | release_sock(sk); |
1321 | } |
1322 | ulen += sizeof(struct udphdr); |
1323 | |
1324 | /* |
1325 | * Get and verify the address. |
1326 | */ |
1327 | if (usin) { |
1328 | if (msg->msg_namelen < sizeof(*usin)) |
1329 | return -EINVAL; |
1330 | if (usin->sin_family != AF_INET) { |
1331 | if (usin->sin_family != AF_UNSPEC) |
1332 | return -EAFNOSUPPORT; |
1333 | } |
1334 | |
1335 | daddr = usin->sin_addr.s_addr; |
1336 | dport = usin->sin_port; |
1337 | if (dport == 0) |
1338 | return -EINVAL; |
1339 | } else { |
1340 | if (sk->sk_state != TCP_ESTABLISHED) |
1341 | return -EDESTADDRREQ; |
1342 | daddr = inet->inet_daddr; |
1343 | dport = inet->inet_dport; |
1344 | /* Open fast path for connected socket. |
1345 | Route will not be used, if at least one option is set. |
1346 | */ |
1347 | connected = 1; |
1348 | } |
1349 | |
1350 | ipcm_init_sk(ipcm: &ipc, inet); |
1351 | ipc.gso_size = READ_ONCE(up->gso_size); |
1352 | |
1353 | if (msg->msg_controllen) { |
1354 | err = udp_cmsg_send(sk, msg, gso_size: &ipc.gso_size); |
1355 | if (err > 0) { |
1356 | err = ip_cmsg_send(sk, msg, ipc: &ipc, |
1357 | allow_ipv6: sk->sk_family == AF_INET6); |
1358 | connected = 0; |
1359 | } |
1360 | if (unlikely(err < 0)) { |
1361 | kfree(objp: ipc.opt); |
1362 | return err; |
1363 | } |
1364 | if (ipc.opt) |
1365 | free = 1; |
1366 | } |
1367 | if (!ipc.opt) { |
1368 | struct ip_options_rcu *inet_opt; |
1369 | |
1370 | rcu_read_lock(); |
1371 | inet_opt = rcu_dereference(inet->inet_opt); |
1372 | if (inet_opt) { |
1373 | memcpy(&opt_copy, inet_opt, |
1374 | sizeof(*inet_opt) + inet_opt->opt.optlen); |
1375 | ipc.opt = &opt_copy.opt; |
1376 | } |
1377 | rcu_read_unlock(); |
1378 | } |
1379 | |
1380 | if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) { |
1381 | err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, |
1382 | (struct sockaddr *)usin, |
1383 | &msg->msg_namelen, |
1384 | &ipc.addr); |
1385 | if (err) |
1386 | goto out_free; |
1387 | if (usin) { |
1388 | if (usin->sin_port == 0) { |
1389 | /* BPF program set invalid port. Reject it. */ |
1390 | err = -EINVAL; |
1391 | goto out_free; |
1392 | } |
1393 | daddr = usin->sin_addr.s_addr; |
1394 | dport = usin->sin_port; |
1395 | } |
1396 | } |
1397 | |
1398 | saddr = ipc.addr; |
1399 | ipc.addr = faddr = daddr; |
1400 | |
1401 | if (ipc.opt && ipc.opt->opt.srr) { |
1402 | if (!daddr) { |
1403 | err = -EINVAL; |
1404 | goto out_free; |
1405 | } |
1406 | faddr = ipc.opt->opt.faddr; |
1407 | connected = 0; |
1408 | } |
1409 | scope = ip_sendmsg_scope(inet, ipc: &ipc, msg); |
1410 | if (scope == RT_SCOPE_LINK) |
1411 | connected = 0; |
1412 | |
1413 | uc_index = READ_ONCE(inet->uc_index); |
1414 | if (ipv4_is_multicast(addr: daddr)) { |
1415 | if (!ipc.oif || netif_index_is_l3_master(net: sock_net(sk), ifindex: ipc.oif)) |
1416 | ipc.oif = READ_ONCE(inet->mc_index); |
1417 | if (!saddr) |
1418 | saddr = READ_ONCE(inet->mc_addr); |
1419 | connected = 0; |
1420 | } else if (!ipc.oif) { |
1421 | ipc.oif = uc_index; |
1422 | } else if (ipv4_is_lbcast(addr: daddr) && uc_index) { |
1423 | /* oif is set, packet is to local broadcast and |
1424 | * uc_index is set. oif is most likely set |
1425 | * by sk_bound_dev_if. If uc_index != oif check if the |
1426 | * oif is an L3 master and uc_index is an L3 slave. |
1427 | * If so, we want to allow the send using the uc_index. |
1428 | */ |
1429 | if (ipc.oif != uc_index && |
1430 | ipc.oif == l3mdev_master_ifindex_by_index(net: sock_net(sk), |
1431 | ifindex: uc_index)) { |
1432 | ipc.oif = uc_index; |
1433 | } |
1434 | } |
1435 | |
1436 | if (connected) |
1437 | rt = dst_rtable(sk_dst_check(sk, 0)); |
1438 | |
1439 | if (!rt) { |
1440 | struct net *net = sock_net(sk); |
1441 | __u8 flow_flags = inet_sk_flowi_flags(sk); |
1442 | |
1443 | fl4 = &fl4_stack; |
1444 | |
1445 | flowi4_init_output(fl4, oif: ipc.oif, mark: ipc.sockc.mark, |
1446 | tos: ipc.tos & INET_DSCP_MASK, scope, |
1447 | proto: sk->sk_protocol, flags: flow_flags, daddr: faddr, saddr, |
1448 | dport, sport: inet->inet_sport, uid: sk->sk_uid); |
1449 | |
1450 | security_sk_classify_flow(sk, flic: flowi4_to_flowi_common(fl4)); |
1451 | rt = ip_route_output_flow(net, flp: fl4, sk); |
1452 | if (IS_ERR(ptr: rt)) { |
1453 | err = PTR_ERR(ptr: rt); |
1454 | rt = NULL; |
1455 | if (err == -ENETUNREACH) |
1456 | IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
1457 | goto out; |
1458 | } |
1459 | |
1460 | err = -EACCES; |
1461 | if ((rt->rt_flags & RTCF_BROADCAST) && |
1462 | !sock_flag(sk, flag: SOCK_BROADCAST)) |
1463 | goto out; |
1464 | if (connected) |
1465 | sk_dst_set(sk, dst: dst_clone(dst: &rt->dst)); |
1466 | } |
1467 | |
1468 | if (msg->msg_flags&MSG_CONFIRM) |
1469 | goto do_confirm; |
1470 | back_from_confirm: |
1471 | |
1472 | saddr = fl4->saddr; |
1473 | if (!ipc.addr) |
1474 | daddr = ipc.addr = fl4->daddr; |
1475 | |
1476 | /* Lockless fast path for the non-corking case. */ |
1477 | if (!corkreq) { |
1478 | struct inet_cork cork; |
1479 | |
1480 | skb = ip_make_skb(sk, fl4, getfrag, from: msg, length: ulen, |
1481 | transhdrlen: sizeof(struct udphdr), ipc: &ipc, rtp: &rt, |
1482 | cork: &cork, flags: msg->msg_flags); |
1483 | err = PTR_ERR(ptr: skb); |
1484 | if (!IS_ERR_OR_NULL(ptr: skb)) |
1485 | err = udp_send_skb(skb, fl4, cork: &cork); |
1486 | goto out; |
1487 | } |
1488 | |
1489 | lock_sock(sk); |
1490 | if (unlikely(up->pending)) { |
1491 | /* The socket is already corked while preparing it. */ |
1492 | /* ... which is an evident application bug. --ANK */ |
1493 | release_sock(sk); |
1494 | |
1495 | net_dbg_ratelimited("socket already corked\n"); |
1496 | err = -EINVAL; |
1497 | goto out; |
1498 | } |
1499 | /* |
1500 | * Now cork the socket to pend data. |
1501 | */ |
1502 | fl4 = &inet->cork.fl.u.ip4; |
1503 | fl4->daddr = daddr; |
1504 | fl4->saddr = saddr; |
1505 | fl4->fl4_dport = dport; |
1506 | fl4->fl4_sport = inet->inet_sport; |
1507 | WRITE_ONCE(up->pending, AF_INET); |
1508 | |
1509 | do_append_data: |
1510 | up->len += ulen; |
1511 | err = ip_append_data(sk, fl4, getfrag, from: msg, len: ulen, |
1512 | protolen: sizeof(struct udphdr), ipc: &ipc, rt: &rt, |
1513 | flags: corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); |
1514 | if (err) |
1515 | udp_flush_pending_frames(sk); |
1516 | else if (!corkreq) |
1517 | err = udp_push_pending_frames(sk); |
1518 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) |
1519 | WRITE_ONCE(up->pending, 0); |
1520 | release_sock(sk); |
1521 | |
1522 | out: |
1523 | ip_rt_put(rt); |
1524 | out_free: |
1525 | if (free) |
1526 | kfree(objp: ipc.opt); |
1527 | if (!err) |
1528 | return len; |
1529 | /* |
1530 | * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting |
1531 | * ENOBUFS might not be good (it's not tunable per se), but otherwise |
1532 | * we don't have a good statistic (IpOutDiscards but it can be too many |
1533 | * things). We could add another new stat but at least for now that |
1534 | * seems like overkill. |
1535 | */ |
1536 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
1537 | UDP_INC_STATS(sock_net(sk), |
1538 | UDP_MIB_SNDBUFERRORS, is_udplite); |
1539 | } |
1540 | return err; |
1541 | |
1542 | do_confirm: |
1543 | if (msg->msg_flags & MSG_PROBE) |
1544 | dst_confirm_neigh(dst: &rt->dst, daddr: &fl4->daddr); |
1545 | if (!(msg->msg_flags&MSG_PROBE) || len) |
1546 | goto back_from_confirm; |
1547 | err = 0; |
1548 | goto out; |
1549 | } |
1550 | EXPORT_SYMBOL(udp_sendmsg); |
1551 | |
1552 | void udp_splice_eof(struct socket *sock) |
1553 | { |
1554 | struct sock *sk = sock->sk; |
1555 | struct udp_sock *up = udp_sk(sk); |
1556 | |
1557 | if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk)) |
1558 | return; |
1559 | |
1560 | lock_sock(sk); |
1561 | if (up->pending && !udp_test_bit(CORK, sk)) |
1562 | udp_push_pending_frames(sk); |
1563 | release_sock(sk); |
1564 | } |
1565 | EXPORT_IPV6_MOD_GPL(udp_splice_eof); |
1566 | |
1567 | #define UDP_SKB_IS_STATELESS 0x80000000 |
1568 | |
1569 | /* all head states (dst, sk, nf conntrack) except skb extensions are |
1570 | * cleared by udp_rcv(). |
1571 | * |
1572 | * We need to preserve secpath, if present, to eventually process |
1573 | * IP_CMSG_PASSSEC at recvmsg() time. |
1574 | * |
1575 | * Other extensions can be cleared. |
1576 | */ |
1577 | static bool udp_try_make_stateless(struct sk_buff *skb) |
1578 | { |
1579 | if (!skb_has_extensions(skb)) |
1580 | return true; |
1581 | |
1582 | if (!secpath_exists(skb)) { |
1583 | skb_ext_reset(skb); |
1584 | return true; |
1585 | } |
1586 | |
1587 | return false; |
1588 | } |
1589 | |
1590 | static void udp_set_dev_scratch(struct sk_buff *skb) |
1591 | { |
1592 | struct udp_dev_scratch *scratch = udp_skb_scratch(skb); |
1593 | |
1594 | BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); |
1595 | scratch->_tsize_state = skb->truesize; |
1596 | #if BITS_PER_LONG == 64 |
1597 | scratch->len = skb->len; |
1598 | scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); |
1599 | scratch->is_linear = !skb_is_nonlinear(skb); |
1600 | #endif |
1601 | if (udp_try_make_stateless(skb)) |
1602 | scratch->_tsize_state |= UDP_SKB_IS_STATELESS; |
1603 | } |
1604 | |
1605 | static void udp_skb_csum_unnecessary_set(struct sk_buff *skb) |
1606 | { |
1607 | /* We come here after udp_lib_checksum_complete() returned 0. |
1608 | * This means that __skb_checksum_complete() might have |
1609 | * set skb->csum_valid to 1. |
1610 | * On 64bit platforms, we can set csum_unnecessary |
1611 | * to true, but only if the skb is not shared. |
1612 | */ |
1613 | #if BITS_PER_LONG == 64 |
1614 | if (!skb_shared(skb)) |
1615 | udp_skb_scratch(skb)->csum_unnecessary = true; |
1616 | #endif |
1617 | } |
1618 | |
1619 | static int udp_skb_truesize(struct sk_buff *skb) |
1620 | { |
1621 | return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; |
1622 | } |
1623 | |
1624 | static bool udp_skb_has_head_state(struct sk_buff *skb) |
1625 | { |
1626 | return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); |
1627 | } |
1628 | |
1629 | /* fully reclaim rmem/fwd memory allocated for skb */ |
1630 | static void udp_rmem_release(struct sock *sk, unsigned int size, |
1631 | int partial, bool rx_queue_lock_held) |
1632 | { |
1633 | struct udp_sock *up = udp_sk(sk); |
1634 | struct sk_buff_head *sk_queue; |
1635 | unsigned int amt; |
1636 | |
1637 | if (likely(partial)) { |
1638 | up->forward_deficit += size; |
1639 | size = up->forward_deficit; |
1640 | if (size < READ_ONCE(up->forward_threshold) && |
1641 | !skb_queue_empty(list: &up->reader_queue)) |
1642 | return; |
1643 | } else { |
1644 | size += up->forward_deficit; |
1645 | } |
1646 | up->forward_deficit = 0; |
1647 | |
1648 | /* acquire the sk_receive_queue for fwd allocated memory scheduling, |
1649 | * if the called don't held it already |
1650 | */ |
1651 | sk_queue = &sk->sk_receive_queue; |
1652 | if (!rx_queue_lock_held) |
1653 | spin_lock(lock: &sk_queue->lock); |
1654 | |
1655 | amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); |
1656 | sk_forward_alloc_add(sk, val: size - amt); |
1657 | |
1658 | if (amt) |
1659 | __sk_mem_reduce_allocated(sk, amount: amt >> PAGE_SHIFT); |
1660 | |
1661 | atomic_sub(i: size, v: &sk->sk_rmem_alloc); |
1662 | |
1663 | /* this can save us from acquiring the rx queue lock on next receive */ |
1664 | skb_queue_splice_tail_init(list: sk_queue, head: &up->reader_queue); |
1665 | |
1666 | if (!rx_queue_lock_held) |
1667 | spin_unlock(lock: &sk_queue->lock); |
1668 | } |
1669 | |
1670 | /* Note: called with reader_queue.lock held. |
1671 | * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch |
1672 | * This avoids a cache line miss while receive_queue lock is held. |
1673 | * Look at __udp_enqueue_schedule_skb() to find where this copy is done. |
1674 | */ |
1675 | void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) |
1676 | { |
1677 | prefetch(&skb->data); |
1678 | udp_rmem_release(sk, size: udp_skb_truesize(skb), partial: 1, rx_queue_lock_held: false); |
1679 | } |
1680 | EXPORT_IPV6_MOD(udp_skb_destructor); |
1681 | |
1682 | /* as above, but the caller held the rx queue lock, too */ |
1683 | static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) |
1684 | { |
1685 | prefetch(&skb->data); |
1686 | udp_rmem_release(sk, size: udp_skb_truesize(skb), partial: 1, rx_queue_lock_held: true); |
1687 | } |
1688 | |
1689 | /* Idea of busylocks is to let producers grab an extra spinlock |
1690 | * to relieve pressure on the receive_queue spinlock shared by consumer. |
1691 | * Under flood, this means that only one producer can be in line |
1692 | * trying to acquire the receive_queue spinlock. |
1693 | * These busylock can be allocated on a per cpu manner, instead of a |
1694 | * per socket one (that would consume a cache line per socket) |
1695 | */ |
1696 | static int udp_busylocks_log __read_mostly; |
1697 | static spinlock_t *udp_busylocks __read_mostly; |
1698 | |
1699 | static spinlock_t *busylock_acquire(void *ptr) |
1700 | { |
1701 | spinlock_t *busy; |
1702 | |
1703 | busy = udp_busylocks + hash_ptr(ptr, bits: udp_busylocks_log); |
1704 | spin_lock(lock: busy); |
1705 | return busy; |
1706 | } |
1707 | |
1708 | static void busylock_release(spinlock_t *busy) |
1709 | { |
1710 | if (busy) |
1711 | spin_unlock(lock: busy); |
1712 | } |
1713 | |
1714 | static int udp_rmem_schedule(struct sock *sk, int size) |
1715 | { |
1716 | int delta; |
1717 | |
1718 | delta = size - sk->sk_forward_alloc; |
1719 | if (delta > 0 && !__sk_mem_schedule(sk, size: delta, SK_MEM_RECV)) |
1720 | return -ENOBUFS; |
1721 | |
1722 | return 0; |
1723 | } |
1724 | |
1725 | int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) |
1726 | { |
1727 | struct sk_buff_head *list = &sk->sk_receive_queue; |
1728 | unsigned int rmem, rcvbuf; |
1729 | spinlock_t *busy = NULL; |
1730 | int size, err = -ENOMEM; |
1731 | |
1732 | rmem = atomic_read(v: &sk->sk_rmem_alloc); |
1733 | rcvbuf = READ_ONCE(sk->sk_rcvbuf); |
1734 | size = skb->truesize; |
1735 | |
1736 | /* Immediately drop when the receive queue is full. |
1737 | * Cast to unsigned int performs the boundary check for INT_MAX. |
1738 | */ |
1739 | if (rmem + size > rcvbuf) { |
1740 | if (rcvbuf > INT_MAX >> 1) |
1741 | goto drop; |
1742 | |
1743 | /* Always allow at least one packet for small buffer. */ |
1744 | if (rmem > rcvbuf) |
1745 | goto drop; |
1746 | } |
1747 | |
1748 | /* Under mem pressure, it might be helpful to help udp_recvmsg() |
1749 | * having linear skbs : |
1750 | * - Reduce memory overhead and thus increase receive queue capacity |
1751 | * - Less cache line misses at copyout() time |
1752 | * - Less work at consume_skb() (less alien page frag freeing) |
1753 | */ |
1754 | if (rmem > (rcvbuf >> 1)) { |
1755 | skb_condense(skb); |
1756 | size = skb->truesize; |
1757 | busy = busylock_acquire(ptr: sk); |
1758 | } |
1759 | |
1760 | udp_set_dev_scratch(skb); |
1761 | |
1762 | atomic_add(i: size, v: &sk->sk_rmem_alloc); |
1763 | |
1764 | spin_lock(lock: &list->lock); |
1765 | err = udp_rmem_schedule(sk, size); |
1766 | if (err) { |
1767 | spin_unlock(lock: &list->lock); |
1768 | goto uncharge_drop; |
1769 | } |
1770 | |
1771 | sk_forward_alloc_add(sk, val: -size); |
1772 | |
1773 | /* no need to setup a destructor, we will explicitly release the |
1774 | * forward allocated memory on dequeue |
1775 | */ |
1776 | sock_skb_set_dropcount(sk, skb); |
1777 | |
1778 | __skb_queue_tail(list, newsk: skb); |
1779 | spin_unlock(lock: &list->lock); |
1780 | |
1781 | if (!sock_flag(sk, flag: SOCK_DEAD)) |
1782 | INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk); |
1783 | |
1784 | busylock_release(busy); |
1785 | return 0; |
1786 | |
1787 | uncharge_drop: |
1788 | atomic_sub(i: skb->truesize, v: &sk->sk_rmem_alloc); |
1789 | |
1790 | drop: |
1791 | atomic_inc(v: &sk->sk_drops); |
1792 | busylock_release(busy); |
1793 | return err; |
1794 | } |
1795 | EXPORT_IPV6_MOD_GPL(__udp_enqueue_schedule_skb); |
1796 | |
1797 | void udp_destruct_common(struct sock *sk) |
1798 | { |
1799 | /* reclaim completely the forward allocated memory */ |
1800 | struct udp_sock *up = udp_sk(sk); |
1801 | unsigned int total = 0; |
1802 | struct sk_buff *skb; |
1803 | |
1804 | skb_queue_splice_tail_init(list: &sk->sk_receive_queue, head: &up->reader_queue); |
1805 | while ((skb = __skb_dequeue(list: &up->reader_queue)) != NULL) { |
1806 | total += skb->truesize; |
1807 | kfree_skb(skb); |
1808 | } |
1809 | udp_rmem_release(sk, size: total, partial: 0, rx_queue_lock_held: true); |
1810 | } |
1811 | EXPORT_IPV6_MOD_GPL(udp_destruct_common); |
1812 | |
1813 | static void udp_destruct_sock(struct sock *sk) |
1814 | { |
1815 | udp_destruct_common(sk); |
1816 | inet_sock_destruct(sk); |
1817 | } |
1818 | |
1819 | int udp_init_sock(struct sock *sk) |
1820 | { |
1821 | udp_lib_init_sock(sk); |
1822 | sk->sk_destruct = udp_destruct_sock; |
1823 | set_bit(nr: SOCK_SUPPORT_ZC, addr: &sk->sk_socket->flags); |
1824 | return 0; |
1825 | } |
1826 | |
1827 | void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) |
1828 | { |
1829 | if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset))) |
1830 | sk_peek_offset_bwd(sk, val: len); |
1831 | |
1832 | if (!skb_unref(skb)) |
1833 | return; |
1834 | |
1835 | /* In the more common cases we cleared the head states previously, |
1836 | * see __udp_queue_rcv_skb(). |
1837 | */ |
1838 | if (unlikely(udp_skb_has_head_state(skb))) |
1839 | skb_release_head_state(skb); |
1840 | __consume_stateless_skb(skb); |
1841 | } |
1842 | EXPORT_IPV6_MOD_GPL(skb_consume_udp); |
1843 | |
1844 | static struct sk_buff *__first_packet_length(struct sock *sk, |
1845 | struct sk_buff_head *rcvq, |
1846 | unsigned int *total) |
1847 | { |
1848 | struct sk_buff *skb; |
1849 | |
1850 | while ((skb = skb_peek(list_: rcvq)) != NULL) { |
1851 | if (udp_lib_checksum_complete(skb)) { |
1852 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, |
1853 | IS_UDPLITE(sk)); |
1854 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, |
1855 | IS_UDPLITE(sk)); |
1856 | atomic_inc(v: &sk->sk_drops); |
1857 | __skb_unlink(skb, list: rcvq); |
1858 | *total += skb->truesize; |
1859 | kfree_skb_reason(skb, reason: SKB_DROP_REASON_UDP_CSUM); |
1860 | } else { |
1861 | udp_skb_csum_unnecessary_set(skb); |
1862 | break; |
1863 | } |
1864 | } |
1865 | return skb; |
1866 | } |
1867 | |
1868 | /** |
1869 | * first_packet_length - return length of first packet in receive queue |
1870 | * @sk: socket |
1871 | * |
1872 | * Drops all bad checksum frames, until a valid one is found. |
1873 | * Returns the length of found skb, or -1 if none is found. |
1874 | */ |
1875 | static int first_packet_length(struct sock *sk) |
1876 | { |
1877 | struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; |
1878 | struct sk_buff_head *sk_queue = &sk->sk_receive_queue; |
1879 | unsigned int total = 0; |
1880 | struct sk_buff *skb; |
1881 | int res; |
1882 | |
1883 | spin_lock_bh(lock: &rcvq->lock); |
1884 | skb = __first_packet_length(sk, rcvq, total: &total); |
1885 | if (!skb && !skb_queue_empty_lockless(list: sk_queue)) { |
1886 | spin_lock(lock: &sk_queue->lock); |
1887 | skb_queue_splice_tail_init(list: sk_queue, head: rcvq); |
1888 | spin_unlock(lock: &sk_queue->lock); |
1889 | |
1890 | skb = __first_packet_length(sk, rcvq, total: &total); |
1891 | } |
1892 | res = skb ? skb->len : -1; |
1893 | if (total) |
1894 | udp_rmem_release(sk, size: total, partial: 1, rx_queue_lock_held: false); |
1895 | spin_unlock_bh(lock: &rcvq->lock); |
1896 | return res; |
1897 | } |
1898 | |
1899 | /* |
1900 | * IOCTL requests applicable to the UDP protocol |
1901 | */ |
1902 | |
1903 | int udp_ioctl(struct sock *sk, int cmd, int *karg) |
1904 | { |
1905 | switch (cmd) { |
1906 | case SIOCOUTQ: |
1907 | { |
1908 | *karg = sk_wmem_alloc_get(sk); |
1909 | return 0; |
1910 | } |
1911 | |
1912 | case SIOCINQ: |
1913 | { |
1914 | *karg = max_t(int, 0, first_packet_length(sk)); |
1915 | return 0; |
1916 | } |
1917 | |
1918 | default: |
1919 | return -ENOIOCTLCMD; |
1920 | } |
1921 | |
1922 | return 0; |
1923 | } |
1924 | EXPORT_IPV6_MOD(udp_ioctl); |
1925 | |
1926 | struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, |
1927 | int *off, int *err) |
1928 | { |
1929 | struct sk_buff_head *sk_queue = &sk->sk_receive_queue; |
1930 | struct sk_buff_head *queue; |
1931 | struct sk_buff *last; |
1932 | long timeo; |
1933 | int error; |
1934 | |
1935 | queue = &udp_sk(sk)->reader_queue; |
1936 | timeo = sock_rcvtimeo(sk, noblock: flags & MSG_DONTWAIT); |
1937 | do { |
1938 | struct sk_buff *skb; |
1939 | |
1940 | error = sock_error(sk); |
1941 | if (error) |
1942 | break; |
1943 | |
1944 | error = -EAGAIN; |
1945 | do { |
1946 | spin_lock_bh(lock: &queue->lock); |
1947 | skb = __skb_try_recv_from_queue(queue, flags, off, err, |
1948 | last: &last); |
1949 | if (skb) { |
1950 | if (!(flags & MSG_PEEK)) |
1951 | udp_skb_destructor(sk, skb); |
1952 | spin_unlock_bh(lock: &queue->lock); |
1953 | return skb; |
1954 | } |
1955 | |
1956 | if (skb_queue_empty_lockless(list: sk_queue)) { |
1957 | spin_unlock_bh(lock: &queue->lock); |
1958 | goto busy_check; |
1959 | } |
1960 | |
1961 | /* refill the reader queue and walk it again |
1962 | * keep both queues locked to avoid re-acquiring |
1963 | * the sk_receive_queue lock if fwd memory scheduling |
1964 | * is needed. |
1965 | */ |
1966 | spin_lock(lock: &sk_queue->lock); |
1967 | skb_queue_splice_tail_init(list: sk_queue, head: queue); |
1968 | |
1969 | skb = __skb_try_recv_from_queue(queue, flags, off, err, |
1970 | last: &last); |
1971 | if (skb && !(flags & MSG_PEEK)) |
1972 | udp_skb_dtor_locked(sk, skb); |
1973 | spin_unlock(lock: &sk_queue->lock); |
1974 | spin_unlock_bh(lock: &queue->lock); |
1975 | if (skb) |
1976 | return skb; |
1977 | |
1978 | busy_check: |
1979 | if (!sk_can_busy_loop(sk)) |
1980 | break; |
1981 | |
1982 | sk_busy_loop(sk, nonblock: flags & MSG_DONTWAIT); |
1983 | } while (!skb_queue_empty_lockless(list: sk_queue)); |
1984 | |
1985 | /* sk_queue is empty, reader_queue may contain peeked packets */ |
1986 | } while (timeo && |
1987 | !__skb_wait_for_more_packets(sk, queue: &sk->sk_receive_queue, |
1988 | err: &error, timeo_p: &timeo, |
1989 | skb: (struct sk_buff *)sk_queue)); |
1990 | |
1991 | *err = error; |
1992 | return NULL; |
1993 | } |
1994 | EXPORT_SYMBOL(__skb_recv_udp); |
1995 | |
1996 | int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) |
1997 | { |
1998 | struct sk_buff *skb; |
1999 | int err; |
2000 | |
2001 | try_again: |
2002 | skb = skb_recv_udp(sk, MSG_DONTWAIT, err: &err); |
2003 | if (!skb) |
2004 | return err; |
2005 | |
2006 | if (udp_lib_checksum_complete(skb)) { |
2007 | int is_udplite = IS_UDPLITE(sk); |
2008 | struct net *net = sock_net(sk); |
2009 | |
2010 | __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); |
2011 | __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); |
2012 | atomic_inc(v: &sk->sk_drops); |
2013 | kfree_skb_reason(skb, reason: SKB_DROP_REASON_UDP_CSUM); |
2014 | goto try_again; |
2015 | } |
2016 | |
2017 | WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); |
2018 | return recv_actor(sk, skb); |
2019 | } |
2020 | EXPORT_IPV6_MOD(udp_read_skb); |
2021 | |
2022 | /* |
2023 | * This should be easy, if there is something there we |
2024 | * return it, otherwise we block. |
2025 | */ |
2026 | |
2027 | int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, |
2028 | int *addr_len) |
2029 | { |
2030 | struct inet_sock *inet = inet_sk(sk); |
2031 | DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); |
2032 | struct sk_buff *skb; |
2033 | unsigned int ulen, copied; |
2034 | int off, err, peeking = flags & MSG_PEEK; |
2035 | int is_udplite = IS_UDPLITE(sk); |
2036 | bool checksum_valid = false; |
2037 | |
2038 | if (flags & MSG_ERRQUEUE) |
2039 | return ip_recv_error(sk, msg, len, addr_len); |
2040 | |
2041 | try_again: |
2042 | off = sk_peek_offset(sk, flags); |
2043 | skb = __skb_recv_udp(sk, flags, &off, &err); |
2044 | if (!skb) |
2045 | return err; |
2046 | |
2047 | ulen = udp_skb_len(skb); |
2048 | copied = len; |
2049 | if (copied > ulen - off) |
2050 | copied = ulen - off; |
2051 | else if (copied < ulen) |
2052 | msg->msg_flags |= MSG_TRUNC; |
2053 | |
2054 | /* |
2055 | * If checksum is needed at all, try to do it while copying the |
2056 | * data. If the data is truncated, or if we only want a partial |
2057 | * coverage checksum (UDP-Lite), do it before the copy. |
2058 | */ |
2059 | |
2060 | if (copied < ulen || peeking || |
2061 | (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { |
2062 | checksum_valid = udp_skb_csum_unnecessary(skb) || |
2063 | !__udp_lib_checksum_complete(skb); |
2064 | if (!checksum_valid) |
2065 | goto csum_copy_err; |
2066 | } |
2067 | |
2068 | if (checksum_valid || udp_skb_csum_unnecessary(skb)) { |
2069 | if (udp_skb_is_linear(skb)) |
2070 | err = copy_linear_skb(skb, len: copied, off, to: &msg->msg_iter); |
2071 | else |
2072 | err = skb_copy_datagram_msg(from: skb, offset: off, msg, size: copied); |
2073 | } else { |
2074 | err = skb_copy_and_csum_datagram_msg(skb, hlen: off, msg); |
2075 | |
2076 | if (err == -EINVAL) |
2077 | goto csum_copy_err; |
2078 | } |
2079 | |
2080 | if (unlikely(err)) { |
2081 | if (!peeking) { |
2082 | atomic_inc(v: &sk->sk_drops); |
2083 | UDP_INC_STATS(sock_net(sk), |
2084 | UDP_MIB_INERRORS, is_udplite); |
2085 | } |
2086 | kfree_skb(skb); |
2087 | return err; |
2088 | } |
2089 | |
2090 | if (!peeking) |
2091 | UDP_INC_STATS(sock_net(sk), |
2092 | UDP_MIB_INDATAGRAMS, is_udplite); |
2093 | |
2094 | sock_recv_cmsgs(msg, sk, skb); |
2095 | |
2096 | /* Copy the address. */ |
2097 | if (sin) { |
2098 | sin->sin_family = AF_INET; |
2099 | sin->sin_port = udp_hdr(skb)->source; |
2100 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
2101 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
2102 | *addr_len = sizeof(*sin); |
2103 | |
2104 | BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, |
2105 | (struct sockaddr *)sin, |
2106 | addr_len); |
2107 | } |
2108 | |
2109 | if (udp_test_bit(GRO_ENABLED, sk)) |
2110 | udp_cmsg_recv(msg, sk, skb); |
2111 | |
2112 | if (inet_cmsg_flags(inet)) |
2113 | ip_cmsg_recv_offset(msg, sk, skb, tlen: sizeof(struct udphdr), offset: off); |
2114 | |
2115 | err = copied; |
2116 | if (flags & MSG_TRUNC) |
2117 | err = ulen; |
2118 | |
2119 | skb_consume_udp(sk, skb, len: peeking ? -err : err); |
2120 | return err; |
2121 | |
2122 | csum_copy_err: |
2123 | if (!__sk_queue_drop_skb(sk, sk_queue: &udp_sk(sk)->reader_queue, skb, flags, |
2124 | destructor: udp_skb_destructor)) { |
2125 | UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); |
2126 | UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
2127 | } |
2128 | kfree_skb_reason(skb, reason: SKB_DROP_REASON_UDP_CSUM); |
2129 | |
2130 | /* starting over for a new packet, but check if we need to yield */ |
2131 | cond_resched(); |
2132 | msg->msg_flags &= ~MSG_TRUNC; |
2133 | goto try_again; |
2134 | } |
2135 | |
2136 | int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2137 | { |
2138 | /* This check is replicated from __ip4_datagram_connect() and |
2139 | * intended to prevent BPF program called below from accessing bytes |
2140 | * that are out of the bound specified by user in addr_len. |
2141 | */ |
2142 | if (addr_len < sizeof(struct sockaddr_in)) |
2143 | return -EINVAL; |
2144 | |
2145 | return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len); |
2146 | } |
2147 | EXPORT_IPV6_MOD(udp_pre_connect); |
2148 | |
2149 | static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
2150 | { |
2151 | int res; |
2152 | |
2153 | lock_sock(sk); |
2154 | res = __ip4_datagram_connect(sk, uaddr, addr_len); |
2155 | if (!res) |
2156 | udp4_hash4(sk); |
2157 | release_sock(sk); |
2158 | return res; |
2159 | } |
2160 | |
2161 | int __udp_disconnect(struct sock *sk, int flags) |
2162 | { |
2163 | struct inet_sock *inet = inet_sk(sk); |
2164 | /* |
2165 | * 1003.1g - break association. |
2166 | */ |
2167 | |
2168 | sk->sk_state = TCP_CLOSE; |
2169 | inet->inet_daddr = 0; |
2170 | inet->inet_dport = 0; |
2171 | sock_rps_reset_rxhash(sk); |
2172 | sk->sk_bound_dev_if = 0; |
2173 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) { |
2174 | inet_reset_saddr(sk); |
2175 | if (sk->sk_prot->rehash && |
2176 | (sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
2177 | sk->sk_prot->rehash(sk); |
2178 | } |
2179 | |
2180 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { |
2181 | sk->sk_prot->unhash(sk); |
2182 | inet->inet_sport = 0; |
2183 | } |
2184 | sk_dst_reset(sk); |
2185 | return 0; |
2186 | } |
2187 | EXPORT_SYMBOL(__udp_disconnect); |
2188 | |
2189 | int udp_disconnect(struct sock *sk, int flags) |
2190 | { |
2191 | lock_sock(sk); |
2192 | __udp_disconnect(sk, flags); |
2193 | release_sock(sk); |
2194 | return 0; |
2195 | } |
2196 | EXPORT_IPV6_MOD(udp_disconnect); |
2197 | |
2198 | void udp_lib_unhash(struct sock *sk) |
2199 | { |
2200 | if (sk_hashed(sk)) { |
2201 | struct udp_table *udptable = udp_get_table_prot(sk); |
2202 | struct udp_hslot *hslot, *hslot2; |
2203 | |
2204 | sock_rps_delete_flow(sk); |
2205 | hslot = udp_hashslot(table: udptable, net: sock_net(sk), |
2206 | udp_sk(sk)->udp_port_hash); |
2207 | hslot2 = udp_hashslot2(table: udptable, udp_sk(sk)->udp_portaddr_hash); |
2208 | |
2209 | spin_lock_bh(lock: &hslot->lock); |
2210 | if (rcu_access_pointer(sk->sk_reuseport_cb)) |
2211 | reuseport_detach_sock(sk); |
2212 | if (sk_del_node_init_rcu(sk)) { |
2213 | hslot->count--; |
2214 | inet_sk(sk)->inet_num = 0; |
2215 | sock_prot_inuse_add(net: sock_net(sk), prot: sk->sk_prot, val: -1); |
2216 | |
2217 | spin_lock(lock: &hslot2->lock); |
2218 | hlist_del_init_rcu(n: &udp_sk(sk)->udp_portaddr_node); |
2219 | hslot2->count--; |
2220 | spin_unlock(lock: &hslot2->lock); |
2221 | |
2222 | udp_unhash4(udptable, sk); |
2223 | } |
2224 | spin_unlock_bh(lock: &hslot->lock); |
2225 | } |
2226 | } |
2227 | EXPORT_IPV6_MOD(udp_lib_unhash); |
2228 | |
2229 | /* |
2230 | * inet_rcv_saddr was changed, we must rehash secondary hash |
2231 | */ |
2232 | void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4) |
2233 | { |
2234 | if (sk_hashed(sk)) { |
2235 | struct udp_table *udptable = udp_get_table_prot(sk); |
2236 | struct udp_hslot *hslot, *hslot2, *nhslot2; |
2237 | |
2238 | hslot = udp_hashslot(table: udptable, net: sock_net(sk), |
2239 | udp_sk(sk)->udp_port_hash); |
2240 | hslot2 = udp_hashslot2(table: udptable, udp_sk(sk)->udp_portaddr_hash); |
2241 | nhslot2 = udp_hashslot2(table: udptable, hash: newhash); |
2242 | udp_sk(sk)->udp_portaddr_hash = newhash; |
2243 | |
2244 | if (hslot2 != nhslot2 || |
2245 | rcu_access_pointer(sk->sk_reuseport_cb)) { |
2246 | /* we must lock primary chain too */ |
2247 | spin_lock_bh(lock: &hslot->lock); |
2248 | if (rcu_access_pointer(sk->sk_reuseport_cb)) |
2249 | reuseport_detach_sock(sk); |
2250 | |
2251 | if (hslot2 != nhslot2) { |
2252 | spin_lock(lock: &hslot2->lock); |
2253 | hlist_del_init_rcu(n: &udp_sk(sk)->udp_portaddr_node); |
2254 | hslot2->count--; |
2255 | spin_unlock(lock: &hslot2->lock); |
2256 | |
2257 | spin_lock(lock: &nhslot2->lock); |
2258 | hlist_add_head_rcu(n: &udp_sk(sk)->udp_portaddr_node, |
2259 | h: &nhslot2->head); |
2260 | nhslot2->count++; |
2261 | spin_unlock(lock: &nhslot2->lock); |
2262 | } |
2263 | |
2264 | spin_unlock_bh(lock: &hslot->lock); |
2265 | } |
2266 | |
2267 | /* Now process hash4 if necessary: |
2268 | * (1) update hslot4; |
2269 | * (2) update hslot2->hash4_cnt. |
2270 | * Note that hslot2/hslot4 should be checked separately, as |
2271 | * either of them may change with the other unchanged. |
2272 | */ |
2273 | if (udp_hashed4(sk)) { |
2274 | spin_lock_bh(lock: &hslot->lock); |
2275 | |
2276 | udp_rehash4(udptable, sk, newhash4); |
2277 | if (hslot2 != nhslot2) { |
2278 | spin_lock(lock: &hslot2->lock); |
2279 | udp_hash4_dec(hslot2); |
2280 | spin_unlock(lock: &hslot2->lock); |
2281 | |
2282 | spin_lock(lock: &nhslot2->lock); |
2283 | udp_hash4_inc(hslot2: nhslot2); |
2284 | spin_unlock(lock: &nhslot2->lock); |
2285 | } |
2286 | |
2287 | spin_unlock_bh(lock: &hslot->lock); |
2288 | } |
2289 | } |
2290 | } |
2291 | EXPORT_IPV6_MOD(udp_lib_rehash); |
2292 | |
2293 | void udp_v4_rehash(struct sock *sk) |
2294 | { |
2295 | u16 new_hash = ipv4_portaddr_hash(net: sock_net(sk), |
2296 | inet_sk(sk)->inet_rcv_saddr, |
2297 | inet_sk(sk)->inet_num); |
2298 | u16 new_hash4 = udp_ehashfn(net: sock_net(sk), |
2299 | laddr: sk->sk_rcv_saddr, lport: sk->sk_num, |
2300 | faddr: sk->sk_daddr, fport: sk->sk_dport); |
2301 | |
2302 | udp_lib_rehash(sk, newhash: new_hash, newhash4: new_hash4); |
2303 | } |
2304 | |
2305 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
2306 | { |
2307 | int rc; |
2308 | |
2309 | if (inet_sk(sk)->inet_daddr) { |
2310 | sock_rps_save_rxhash(sk, skb); |
2311 | sk_mark_napi_id(sk, skb); |
2312 | sk_incoming_cpu_update(sk); |
2313 | } else { |
2314 | sk_mark_napi_id_once(sk, skb); |
2315 | } |
2316 | |
2317 | rc = __udp_enqueue_schedule_skb(sk, skb); |
2318 | if (rc < 0) { |
2319 | int is_udplite = IS_UDPLITE(sk); |
2320 | int drop_reason; |
2321 | |
2322 | /* Note that an ENOMEM error is charged twice */ |
2323 | if (rc == -ENOMEM) { |
2324 | UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
2325 | is_udplite); |
2326 | drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; |
2327 | } else { |
2328 | UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS, |
2329 | is_udplite); |
2330 | drop_reason = SKB_DROP_REASON_PROTO_MEM; |
2331 | } |
2332 | UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
2333 | trace_udp_fail_queue_rcv_skb(rc, sk, skb); |
2334 | sk_skb_reason_drop(sk, skb, reason: drop_reason); |
2335 | return -1; |
2336 | } |
2337 | |
2338 | return 0; |
2339 | } |
2340 | |
2341 | /* returns: |
2342 | * -1: error |
2343 | * 0: success |
2344 | * >0: "udp encap" protocol resubmission |
2345 | * |
2346 | * Note that in the success and error cases, the skb is assumed to |
2347 | * have either been requeued or freed. |
2348 | */ |
2349 | static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) |
2350 | { |
2351 | int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; |
2352 | struct udp_sock *up = udp_sk(sk); |
2353 | int is_udplite = IS_UDPLITE(sk); |
2354 | |
2355 | /* |
2356 | * Charge it to the socket, dropping if the queue is full. |
2357 | */ |
2358 | if (!xfrm4_policy_check(sk, dir: XFRM_POLICY_IN, skb)) { |
2359 | drop_reason = SKB_DROP_REASON_XFRM_POLICY; |
2360 | goto drop; |
2361 | } |
2362 | nf_reset_ct(skb); |
2363 | |
2364 | if (static_branch_unlikely(&udp_encap_needed_key) && |
2365 | READ_ONCE(up->encap_type)) { |
2366 | int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); |
2367 | |
2368 | /* |
2369 | * This is an encapsulation socket so pass the skb to |
2370 | * the socket's udp_encap_rcv() hook. Otherwise, just |
2371 | * fall through and pass this up the UDP socket. |
2372 | * up->encap_rcv() returns the following value: |
2373 | * =0 if skb was successfully passed to the encap |
2374 | * handler or was discarded by it. |
2375 | * >0 if skb should be passed on to UDP. |
2376 | * <0 if skb should be resubmitted as proto -N |
2377 | */ |
2378 | |
2379 | /* if we're overly short, let UDP handle it */ |
2380 | encap_rcv = READ_ONCE(up->encap_rcv); |
2381 | if (encap_rcv) { |
2382 | int ret; |
2383 | |
2384 | /* Verify checksum before giving to encap */ |
2385 | if (udp_lib_checksum_complete(skb)) |
2386 | goto csum_error; |
2387 | |
2388 | ret = encap_rcv(sk, skb); |
2389 | if (ret <= 0) { |
2390 | __UDP_INC_STATS(sock_net(sk), |
2391 | UDP_MIB_INDATAGRAMS, |
2392 | is_udplite); |
2393 | return -ret; |
2394 | } |
2395 | } |
2396 | |
2397 | /* FALLTHROUGH -- it's a UDP Packet */ |
2398 | } |
2399 | |
2400 | /* |
2401 | * UDP-Lite specific tests, ignored on UDP sockets |
2402 | */ |
2403 | if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { |
2404 | u16 pcrlen = READ_ONCE(up->pcrlen); |
2405 | |
2406 | /* |
2407 | * MIB statistics other than incrementing the error count are |
2408 | * disabled for the following two types of errors: these depend |
2409 | * on the application settings, not on the functioning of the |
2410 | * protocol stack as such. |
2411 | * |
2412 | * RFC 3828 here recommends (sec 3.3): "There should also be a |
2413 | * way ... to ... at least let the receiving application block |
2414 | * delivery of packets with coverage values less than a value |
2415 | * provided by the application." |
2416 | */ |
2417 | if (pcrlen == 0) { /* full coverage was set */ |
2418 | net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", |
2419 | UDP_SKB_CB(skb)->cscov, skb->len); |
2420 | goto drop; |
2421 | } |
2422 | /* The next case involves violating the min. coverage requested |
2423 | * by the receiver. This is subtle: if receiver wants x and x is |
2424 | * greater than the buffersize/MTU then receiver will complain |
2425 | * that it wants x while sender emits packets of smaller size y. |
2426 | * Therefore the above ...()->partial_cov statement is essential. |
2427 | */ |
2428 | if (UDP_SKB_CB(skb)->cscov < pcrlen) { |
2429 | net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", |
2430 | UDP_SKB_CB(skb)->cscov, pcrlen); |
2431 | goto drop; |
2432 | } |
2433 | } |
2434 | |
2435 | prefetch(&sk->sk_rmem_alloc); |
2436 | if (rcu_access_pointer(sk->sk_filter) && |
2437 | udp_lib_checksum_complete(skb)) |
2438 | goto csum_error; |
2439 | |
2440 | if (sk_filter_trim_cap(sk, skb, cap: sizeof(struct udphdr))) { |
2441 | drop_reason = SKB_DROP_REASON_SOCKET_FILTER; |
2442 | goto drop; |
2443 | } |
2444 | |
2445 | udp_csum_pull_header(skb); |
2446 | |
2447 | ipv4_pktinfo_prepare(sk, skb, drop_dst: true); |
2448 | return __udp_queue_rcv_skb(sk, skb); |
2449 | |
2450 | csum_error: |
2451 | drop_reason = SKB_DROP_REASON_UDP_CSUM; |
2452 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); |
2453 | drop: |
2454 | __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
2455 | atomic_inc(v: &sk->sk_drops); |
2456 | sk_skb_reason_drop(sk, skb, reason: drop_reason); |
2457 | return -1; |
2458 | } |
2459 | |
2460 | static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
2461 | { |
2462 | struct sk_buff *next, *segs; |
2463 | int ret; |
2464 | |
2465 | if (likely(!udp_unexpected_gso(sk, skb))) |
2466 | return udp_queue_rcv_one_skb(sk, skb); |
2467 | |
2468 | BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET); |
2469 | __skb_push(skb, len: -skb_mac_offset(skb)); |
2470 | segs = udp_rcv_segment(sk, skb, ipv4: true); |
2471 | skb_list_walk_safe(segs, skb, next) { |
2472 | __skb_pull(skb, len: skb_transport_offset(skb)); |
2473 | |
2474 | udp_post_segment_fix_csum(skb); |
2475 | ret = udp_queue_rcv_one_skb(sk, skb); |
2476 | if (ret > 0) |
2477 | ip_protocol_deliver_rcu(net: dev_net(dev: skb->dev), skb, proto: ret); |
2478 | } |
2479 | return 0; |
2480 | } |
2481 | |
2482 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
2483 | * For UDP, we use xchg() to guard against concurrent changes. |
2484 | */ |
2485 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) |
2486 | { |
2487 | struct dst_entry *old; |
2488 | |
2489 | if (dst_hold_safe(dst)) { |
2490 | old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst))); |
2491 | dst_release(dst: old); |
2492 | return old != dst; |
2493 | } |
2494 | return false; |
2495 | } |
2496 | EXPORT_IPV6_MOD(udp_sk_rx_dst_set); |
2497 | |
2498 | /* |
2499 | * Multicasts and broadcasts go to each listener. |
2500 | * |
2501 | * Note: called only from the BH handler context. |
2502 | */ |
2503 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, |
2504 | struct udphdr *uh, |
2505 | __be32 saddr, __be32 daddr, |
2506 | struct udp_table *udptable, |
2507 | int proto) |
2508 | { |
2509 | struct sock *sk, *first = NULL; |
2510 | unsigned short hnum = ntohs(uh->dest); |
2511 | struct udp_hslot *hslot = udp_hashslot(table: udptable, net, num: hnum); |
2512 | unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); |
2513 | unsigned int offset = offsetof(typeof(*sk), sk_node); |
2514 | int dif = skb->dev->ifindex; |
2515 | int sdif = inet_sdif(skb); |
2516 | struct hlist_node *node; |
2517 | struct sk_buff *nskb; |
2518 | |
2519 | if (use_hash2) { |
2520 | hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), port: hnum) & |
2521 | udptable->mask; |
2522 | hash2 = ipv4_portaddr_hash(net, saddr: daddr, port: hnum) & udptable->mask; |
2523 | start_lookup: |
2524 | hslot = &udptable->hash2[hash2].hslot; |
2525 | offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); |
2526 | } |
2527 | |
2528 | sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { |
2529 | if (!__udp_is_mcast_sock(net, sk, loc_port: uh->dest, loc_addr: daddr, |
2530 | rmt_port: uh->source, rmt_addr: saddr, dif, sdif, hnum)) |
2531 | continue; |
2532 | |
2533 | if (!first) { |
2534 | first = sk; |
2535 | continue; |
2536 | } |
2537 | nskb = skb_clone(skb, GFP_ATOMIC); |
2538 | |
2539 | if (unlikely(!nskb)) { |
2540 | atomic_inc(v: &sk->sk_drops); |
2541 | __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, |
2542 | IS_UDPLITE(sk)); |
2543 | __UDP_INC_STATS(net, UDP_MIB_INERRORS, |
2544 | IS_UDPLITE(sk)); |
2545 | continue; |
2546 | } |
2547 | if (udp_queue_rcv_skb(sk, skb: nskb) > 0) |
2548 | consume_skb(skb: nskb); |
2549 | } |
2550 | |
2551 | /* Also lookup *:port if we are using hash2 and haven't done so yet. */ |
2552 | if (use_hash2 && hash2 != hash2_any) { |
2553 | hash2 = hash2_any; |
2554 | goto start_lookup; |
2555 | } |
2556 | |
2557 | if (first) { |
2558 | if (udp_queue_rcv_skb(sk: first, skb) > 0) |
2559 | consume_skb(skb); |
2560 | } else { |
2561 | kfree_skb(skb); |
2562 | __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, |
2563 | proto == IPPROTO_UDPLITE); |
2564 | } |
2565 | return 0; |
2566 | } |
2567 | |
2568 | /* Initialize UDP checksum. If exited with zero value (success), |
2569 | * CHECKSUM_UNNECESSARY means, that no more checks are required. |
2570 | * Otherwise, csum completion requires checksumming packet body, |
2571 | * including udp header and folding it to skb->csum. |
2572 | */ |
2573 | static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, |
2574 | int proto) |
2575 | { |
2576 | int err; |
2577 | |
2578 | UDP_SKB_CB(skb)->partial_cov = 0; |
2579 | UDP_SKB_CB(skb)->cscov = skb->len; |
2580 | |
2581 | if (proto == IPPROTO_UDPLITE) { |
2582 | err = udplite_checksum_init(skb, uh); |
2583 | if (err) |
2584 | return err; |
2585 | |
2586 | if (UDP_SKB_CB(skb)->partial_cov) { |
2587 | skb->csum = inet_compute_pseudo(skb, proto); |
2588 | return 0; |
2589 | } |
2590 | } |
2591 | |
2592 | /* Note, we are only interested in != 0 or == 0, thus the |
2593 | * force to int. |
2594 | */ |
2595 | err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, |
2596 | inet_compute_pseudo); |
2597 | if (err) |
2598 | return err; |
2599 | |
2600 | if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) { |
2601 | /* If SW calculated the value, we know it's bad */ |
2602 | if (skb->csum_complete_sw) |
2603 | return 1; |
2604 | |
2605 | /* HW says the value is bad. Let's validate that. |
2606 | * skb->csum is no longer the full packet checksum, |
2607 | * so don't treat it as such. |
2608 | */ |
2609 | skb_checksum_complete_unset(skb); |
2610 | } |
2611 | |
2612 | return 0; |
2613 | } |
2614 | |
2615 | /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and |
2616 | * return code conversion for ip layer consumption |
2617 | */ |
2618 | static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, |
2619 | struct udphdr *uh) |
2620 | { |
2621 | int ret; |
2622 | |
2623 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) |
2624 | skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo); |
2625 | |
2626 | ret = udp_queue_rcv_skb(sk, skb); |
2627 | |
2628 | /* a return value > 0 means to resubmit the input, but |
2629 | * it wants the return to be -protocol, or 0 |
2630 | */ |
2631 | if (ret > 0) |
2632 | return -ret; |
2633 | return 0; |
2634 | } |
2635 | |
2636 | /* |
2637 | * All we need to do is get the socket, and then do a checksum. |
2638 | */ |
2639 | |
2640 | int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, |
2641 | int proto) |
2642 | { |
2643 | struct sock *sk = NULL; |
2644 | struct udphdr *uh; |
2645 | unsigned short ulen; |
2646 | struct rtable *rt = skb_rtable(skb); |
2647 | __be32 saddr, daddr; |
2648 | struct net *net = dev_net(dev: skb->dev); |
2649 | bool refcounted; |
2650 | int drop_reason; |
2651 | |
2652 | drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; |
2653 | |
2654 | /* |
2655 | * Validate the packet. |
2656 | */ |
2657 | if (!pskb_may_pull(skb, len: sizeof(struct udphdr))) |
2658 | goto drop; /* No space for header. */ |
2659 | |
2660 | uh = udp_hdr(skb); |
2661 | ulen = ntohs(uh->len); |
2662 | saddr = ip_hdr(skb)->saddr; |
2663 | daddr = ip_hdr(skb)->daddr; |
2664 | |
2665 | if (ulen > skb->len) |
2666 | goto short_packet; |
2667 | |
2668 | if (proto == IPPROTO_UDP) { |
2669 | /* UDP validates ulen. */ |
2670 | if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, len: ulen)) |
2671 | goto short_packet; |
2672 | uh = udp_hdr(skb); |
2673 | } |
2674 | |
2675 | if (udp4_csum_init(skb, uh, proto)) |
2676 | goto csum_error; |
2677 | |
2678 | sk = inet_steal_sock(net, skb, doff: sizeof(struct udphdr), saddr, sport: uh->source, daddr, dport: uh->dest, |
2679 | refcounted: &refcounted, ehashfn: udp_ehashfn); |
2680 | if (IS_ERR(ptr: sk)) |
2681 | goto no_sk; |
2682 | |
2683 | if (sk) { |
2684 | struct dst_entry *dst = skb_dst(skb); |
2685 | int ret; |
2686 | |
2687 | if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) |
2688 | udp_sk_rx_dst_set(sk, dst); |
2689 | |
2690 | ret = udp_unicast_rcv_skb(sk, skb, uh); |
2691 | if (refcounted) |
2692 | sock_put(sk); |
2693 | return ret; |
2694 | } |
2695 | |
2696 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
2697 | return __udp4_lib_mcast_deliver(net, skb, uh, |
2698 | saddr, daddr, udptable, proto); |
2699 | |
2700 | sk = __udp4_lib_lookup_skb(skb, sport: uh->source, dport: uh->dest, udptable); |
2701 | if (sk) |
2702 | return udp_unicast_rcv_skb(sk, skb, uh); |
2703 | no_sk: |
2704 | if (!xfrm4_policy_check(NULL, dir: XFRM_POLICY_IN, skb)) |
2705 | goto drop; |
2706 | nf_reset_ct(skb); |
2707 | |
2708 | /* No socket. Drop packet silently, if checksum is wrong */ |
2709 | if (udp_lib_checksum_complete(skb)) |
2710 | goto csum_error; |
2711 | |
2712 | drop_reason = SKB_DROP_REASON_NO_SOCKET; |
2713 | __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); |
2714 | icmp_send(skb_in: skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, info: 0); |
2715 | |
2716 | /* |
2717 | * Hmm. We got an UDP packet to a port to which we |
2718 | * don't wanna listen. Ignore it. |
2719 | */ |
2720 | sk_skb_reason_drop(sk, skb, reason: drop_reason); |
2721 | return 0; |
2722 | |
2723 | short_packet: |
2724 | drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; |
2725 | net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", |
2726 | proto == IPPROTO_UDPLITE ? "Lite": "", |
2727 | &saddr, ntohs(uh->source), |
2728 | ulen, skb->len, |
2729 | &daddr, ntohs(uh->dest)); |
2730 | goto drop; |
2731 | |
2732 | csum_error: |
2733 | /* |
2734 | * RFC1122: OK. Discards the bad packet silently (as far as |
2735 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). |
2736 | */ |
2737 | drop_reason = SKB_DROP_REASON_UDP_CSUM; |
2738 | net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", |
2739 | proto == IPPROTO_UDPLITE ? "Lite": "", |
2740 | &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), |
2741 | ulen); |
2742 | __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); |
2743 | drop: |
2744 | __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); |
2745 | sk_skb_reason_drop(sk, skb, reason: drop_reason); |
2746 | return 0; |
2747 | } |
2748 | |
2749 | /* We can only early demux multicast if there is a single matching socket. |
2750 | * If more than one socket found returns NULL |
2751 | */ |
2752 | static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, |
2753 | __be16 loc_port, __be32 loc_addr, |
2754 | __be16 rmt_port, __be32 rmt_addr, |
2755 | int dif, int sdif) |
2756 | { |
2757 | struct udp_table *udptable = net->ipv4.udp_table; |
2758 | unsigned short hnum = ntohs(loc_port); |
2759 | struct sock *sk, *result; |
2760 | struct udp_hslot *hslot; |
2761 | unsigned int slot; |
2762 | |
2763 | slot = udp_hashfn(net, num: hnum, mask: udptable->mask); |
2764 | hslot = &udptable->hash[slot]; |
2765 | |
2766 | /* Do not bother scanning a too big list */ |
2767 | if (hslot->count > 10) |
2768 | return NULL; |
2769 | |
2770 | result = NULL; |
2771 | sk_for_each_rcu(sk, &hslot->head) { |
2772 | if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, |
2773 | rmt_port, rmt_addr, dif, sdif, hnum)) { |
2774 | if (result) |
2775 | return NULL; |
2776 | result = sk; |
2777 | } |
2778 | } |
2779 | |
2780 | return result; |
2781 | } |
2782 | |
2783 | /* For unicast we should only early demux connected sockets or we can |
2784 | * break forwarding setups. The chains here can be long so only check |
2785 | * if the first socket is an exact match and if not move on. |
2786 | */ |
2787 | static struct sock *__udp4_lib_demux_lookup(struct net *net, |
2788 | __be16 loc_port, __be32 loc_addr, |
2789 | __be16 rmt_port, __be32 rmt_addr, |
2790 | int dif, int sdif) |
2791 | { |
2792 | struct udp_table *udptable = net->ipv4.udp_table; |
2793 | INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); |
2794 | unsigned short hnum = ntohs(loc_port); |
2795 | struct udp_hslot *hslot2; |
2796 | unsigned int hash2; |
2797 | __portpair ports; |
2798 | struct sock *sk; |
2799 | |
2800 | hash2 = ipv4_portaddr_hash(net, saddr: loc_addr, port: hnum); |
2801 | hslot2 = udp_hashslot2(table: udptable, hash: hash2); |
2802 | ports = INET_COMBINED_PORTS(rmt_port, hnum); |
2803 | |
2804 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { |
2805 | if (inet_match(net, sk, cookie: acookie, ports, dif, sdif)) |
2806 | return sk; |
2807 | /* Only check first socket in chain */ |
2808 | break; |
2809 | } |
2810 | return NULL; |
2811 | } |
2812 | |
2813 | int udp_v4_early_demux(struct sk_buff *skb) |
2814 | { |
2815 | struct net *net = dev_net(dev: skb->dev); |
2816 | struct in_device *in_dev = NULL; |
2817 | const struct iphdr *iph; |
2818 | const struct udphdr *uh; |
2819 | struct sock *sk = NULL; |
2820 | struct dst_entry *dst; |
2821 | int dif = skb->dev->ifindex; |
2822 | int sdif = inet_sdif(skb); |
2823 | int ours; |
2824 | |
2825 | /* validate the packet */ |
2826 | if (!pskb_may_pull(skb, len: skb_transport_offset(skb) + sizeof(struct udphdr))) |
2827 | return 0; |
2828 | |
2829 | iph = ip_hdr(skb); |
2830 | uh = udp_hdr(skb); |
2831 | |
2832 | if (skb->pkt_type == PACKET_MULTICAST) { |
2833 | in_dev = __in_dev_get_rcu(dev: skb->dev); |
2834 | |
2835 | if (!in_dev) |
2836 | return 0; |
2837 | |
2838 | ours = ip_check_mc_rcu(dev: in_dev, mc_addr: iph->daddr, src_addr: iph->saddr, |
2839 | proto: iph->protocol); |
2840 | if (!ours) |
2841 | return 0; |
2842 | |
2843 | sk = __udp4_lib_mcast_demux_lookup(net, loc_port: uh->dest, loc_addr: iph->daddr, |
2844 | rmt_port: uh->source, rmt_addr: iph->saddr, |
2845 | dif, sdif); |
2846 | } else if (skb->pkt_type == PACKET_HOST) { |
2847 | sk = __udp4_lib_demux_lookup(net, loc_port: uh->dest, loc_addr: iph->daddr, |
2848 | rmt_port: uh->source, rmt_addr: iph->saddr, dif, sdif); |
2849 | } |
2850 | |
2851 | if (!sk) |
2852 | return 0; |
2853 | |
2854 | skb->sk = sk; |
2855 | DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk)); |
2856 | skb->destructor = sock_pfree; |
2857 | dst = rcu_dereference(sk->sk_rx_dst); |
2858 | |
2859 | if (dst) |
2860 | dst = dst_check(dst, cookie: 0); |
2861 | if (dst) { |
2862 | u32 itag = 0; |
2863 | |
2864 | /* set noref for now. |
2865 | * any place which wants to hold dst has to call |
2866 | * dst_hold_safe() |
2867 | */ |
2868 | skb_dst_set_noref(skb, dst); |
2869 | |
2870 | /* for unconnected multicast sockets we need to validate |
2871 | * the source on each packet |
2872 | */ |
2873 | if (!inet_sk(sk)->inet_daddr && in_dev) |
2874 | return ip_mc_validate_source(skb, daddr: iph->daddr, |
2875 | saddr: iph->saddr, |
2876 | dscp: ip4h_dscp(ip4h: iph), |
2877 | dev: skb->dev, in_dev, itag: &itag); |
2878 | } |
2879 | return 0; |
2880 | } |
2881 | |
2882 | int udp_rcv(struct sk_buff *skb) |
2883 | { |
2884 | return __udp4_lib_rcv(skb, udptable: dev_net(dev: skb->dev)->ipv4.udp_table, IPPROTO_UDP); |
2885 | } |
2886 | |
2887 | void udp_destroy_sock(struct sock *sk) |
2888 | { |
2889 | struct udp_sock *up = udp_sk(sk); |
2890 | bool slow = lock_sock_fast(sk); |
2891 | |
2892 | /* protects from races with udp_abort() */ |
2893 | sock_set_flag(sk, flag: SOCK_DEAD); |
2894 | udp_flush_pending_frames(sk); |
2895 | unlock_sock_fast(sk, slow); |
2896 | if (static_branch_unlikely(&udp_encap_needed_key)) { |
2897 | if (up->encap_type) { |
2898 | void (*encap_destroy)(struct sock *sk); |
2899 | encap_destroy = READ_ONCE(up->encap_destroy); |
2900 | if (encap_destroy) |
2901 | encap_destroy(sk); |
2902 | } |
2903 | if (udp_test_bit(ENCAP_ENABLED, sk)) { |
2904 | static_branch_dec(&udp_encap_needed_key); |
2905 | udp_tunnel_cleanup_gro(sk); |
2906 | } |
2907 | } |
2908 | } |
2909 | |
2910 | typedef struct sk_buff *(*udp_gro_receive_t)(struct sock *sk, |
2911 | struct list_head *head, |
2912 | struct sk_buff *skb); |
2913 | |
2914 | static void set_xfrm_gro_udp_encap_rcv(__u16 encap_type, unsigned short family, |
2915 | struct sock *sk) |
2916 | { |
2917 | #ifdef CONFIG_XFRM |
2918 | udp_gro_receive_t new_gro_receive; |
2919 | |
2920 | if (udp_test_bit(GRO_ENABLED, sk) && encap_type == UDP_ENCAP_ESPINUDP) { |
2921 | if (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) |
2922 | new_gro_receive = ipv6_stub->xfrm6_gro_udp_encap_rcv; |
2923 | else |
2924 | new_gro_receive = xfrm4_gro_udp_encap_rcv; |
2925 | |
2926 | if (udp_sk(sk)->gro_receive != new_gro_receive) { |
2927 | /* |
2928 | * With IPV6_ADDRFORM the gro callback could change |
2929 | * after being set, unregister the old one, if valid. |
2930 | */ |
2931 | if (udp_sk(sk)->gro_receive) |
2932 | udp_tunnel_update_gro_rcv(sk, add: false); |
2933 | |
2934 | WRITE_ONCE(udp_sk(sk)->gro_receive, new_gro_receive); |
2935 | udp_tunnel_update_gro_rcv(sk, add: true); |
2936 | } |
2937 | } |
2938 | #endif |
2939 | } |
2940 | |
2941 | /* |
2942 | * Socket option code for UDP |
2943 | */ |
2944 | int udp_lib_setsockopt(struct sock *sk, int level, int optname, |
2945 | sockptr_t optval, unsigned int optlen, |
2946 | int (*push_pending_frames)(struct sock *)) |
2947 | { |
2948 | struct udp_sock *up = udp_sk(sk); |
2949 | int val, valbool; |
2950 | int err = 0; |
2951 | int is_udplite = IS_UDPLITE(sk); |
2952 | |
2953 | if (level == SOL_SOCKET) { |
2954 | err = sk_setsockopt(sk, level, optname, optval, optlen); |
2955 | |
2956 | if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) { |
2957 | sockopt_lock_sock(sk); |
2958 | /* paired with READ_ONCE in udp_rmem_release() */ |
2959 | WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2); |
2960 | sockopt_release_sock(sk); |
2961 | } |
2962 | return err; |
2963 | } |
2964 | |
2965 | if (optlen < sizeof(int)) |
2966 | return -EINVAL; |
2967 | |
2968 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) |
2969 | return -EFAULT; |
2970 | |
2971 | valbool = val ? 1 : 0; |
2972 | |
2973 | switch (optname) { |
2974 | case UDP_CORK: |
2975 | if (val != 0) { |
2976 | udp_set_bit(CORK, sk); |
2977 | } else { |
2978 | udp_clear_bit(CORK, sk); |
2979 | lock_sock(sk); |
2980 | push_pending_frames(sk); |
2981 | release_sock(sk); |
2982 | } |
2983 | break; |
2984 | |
2985 | case UDP_ENCAP: |
2986 | sockopt_lock_sock(sk); |
2987 | switch (val) { |
2988 | case 0: |
2989 | #ifdef CONFIG_XFRM |
2990 | case UDP_ENCAP_ESPINUDP: |
2991 | set_xfrm_gro_udp_encap_rcv(encap_type: val, family: sk->sk_family, sk); |
2992 | #if IS_ENABLED(CONFIG_IPV6) |
2993 | if (sk->sk_family == AF_INET6) |
2994 | WRITE_ONCE(up->encap_rcv, |
2995 | ipv6_stub->xfrm6_udp_encap_rcv); |
2996 | else |
2997 | #endif |
2998 | WRITE_ONCE(up->encap_rcv, |
2999 | xfrm4_udp_encap_rcv); |
3000 | #endif |
3001 | fallthrough; |
3002 | case UDP_ENCAP_L2TPINUDP: |
3003 | WRITE_ONCE(up->encap_type, val); |
3004 | udp_tunnel_encap_enable(sk); |
3005 | break; |
3006 | default: |
3007 | err = -ENOPROTOOPT; |
3008 | break; |
3009 | } |
3010 | sockopt_release_sock(sk); |
3011 | break; |
3012 | |
3013 | case UDP_NO_CHECK6_TX: |
3014 | udp_set_no_check6_tx(sk, val: valbool); |
3015 | break; |
3016 | |
3017 | case UDP_NO_CHECK6_RX: |
3018 | udp_set_no_check6_rx(sk, val: valbool); |
3019 | break; |
3020 | |
3021 | case UDP_SEGMENT: |
3022 | if (val < 0 || val > USHRT_MAX) |
3023 | return -EINVAL; |
3024 | WRITE_ONCE(up->gso_size, val); |
3025 | break; |
3026 | |
3027 | case UDP_GRO: |
3028 | sockopt_lock_sock(sk); |
3029 | /* when enabling GRO, accept the related GSO packet type */ |
3030 | if (valbool) |
3031 | udp_tunnel_encap_enable(sk); |
3032 | udp_assign_bit(GRO_ENABLED, sk, valbool); |
3033 | udp_assign_bit(ACCEPT_L4, sk, valbool); |
3034 | set_xfrm_gro_udp_encap_rcv(encap_type: up->encap_type, family: sk->sk_family, sk); |
3035 | sockopt_release_sock(sk); |
3036 | break; |
3037 | |
3038 | /* |
3039 | * UDP-Lite's partial checksum coverage (RFC 3828). |
3040 | */ |
3041 | /* The sender sets actual checksum coverage length via this option. |
3042 | * The case coverage > packet length is handled by send module. */ |
3043 | case UDPLITE_SEND_CSCOV: |
3044 | if (!is_udplite) /* Disable the option on UDP sockets */ |
3045 | return -ENOPROTOOPT; |
3046 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ |
3047 | val = 8; |
3048 | else if (val > USHRT_MAX) |
3049 | val = USHRT_MAX; |
3050 | WRITE_ONCE(up->pcslen, val); |
3051 | udp_set_bit(UDPLITE_SEND_CC, sk); |
3052 | break; |
3053 | |
3054 | /* The receiver specifies a minimum checksum coverage value. To make |
3055 | * sense, this should be set to at least 8 (as done below). If zero is |
3056 | * used, this again means full checksum coverage. */ |
3057 | case UDPLITE_RECV_CSCOV: |
3058 | if (!is_udplite) /* Disable the option on UDP sockets */ |
3059 | return -ENOPROTOOPT; |
3060 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ |
3061 | val = 8; |
3062 | else if (val > USHRT_MAX) |
3063 | val = USHRT_MAX; |
3064 | WRITE_ONCE(up->pcrlen, val); |
3065 | udp_set_bit(UDPLITE_RECV_CC, sk); |
3066 | break; |
3067 | |
3068 | default: |
3069 | err = -ENOPROTOOPT; |
3070 | break; |
3071 | } |
3072 | |
3073 | return err; |
3074 | } |
3075 | EXPORT_IPV6_MOD(udp_lib_setsockopt); |
3076 | |
3077 | int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, |
3078 | unsigned int optlen) |
3079 | { |
3080 | if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) |
3081 | return udp_lib_setsockopt(sk, level, optname, |
3082 | optval, optlen, |
3083 | push_pending_frames: udp_push_pending_frames); |
3084 | return ip_setsockopt(sk, level, optname, optval, optlen); |
3085 | } |
3086 | |
3087 | int udp_lib_getsockopt(struct sock *sk, int level, int optname, |
3088 | char __user *optval, int __user *optlen) |
3089 | { |
3090 | struct udp_sock *up = udp_sk(sk); |
3091 | int val, len; |
3092 | |
3093 | if (get_user(len, optlen)) |
3094 | return -EFAULT; |
3095 | |
3096 | if (len < 0) |
3097 | return -EINVAL; |
3098 | |
3099 | len = min_t(unsigned int, len, sizeof(int)); |
3100 | |
3101 | switch (optname) { |
3102 | case UDP_CORK: |
3103 | val = udp_test_bit(CORK, sk); |
3104 | break; |
3105 | |
3106 | case UDP_ENCAP: |
3107 | val = READ_ONCE(up->encap_type); |
3108 | break; |
3109 | |
3110 | case UDP_NO_CHECK6_TX: |
3111 | val = udp_get_no_check6_tx(sk); |
3112 | break; |
3113 | |
3114 | case UDP_NO_CHECK6_RX: |
3115 | val = udp_get_no_check6_rx(sk); |
3116 | break; |
3117 | |
3118 | case UDP_SEGMENT: |
3119 | val = READ_ONCE(up->gso_size); |
3120 | break; |
3121 | |
3122 | case UDP_GRO: |
3123 | val = udp_test_bit(GRO_ENABLED, sk); |
3124 | break; |
3125 | |
3126 | /* The following two cannot be changed on UDP sockets, the return is |
3127 | * always 0 (which corresponds to the full checksum coverage of UDP). */ |
3128 | case UDPLITE_SEND_CSCOV: |
3129 | val = READ_ONCE(up->pcslen); |
3130 | break; |
3131 | |
3132 | case UDPLITE_RECV_CSCOV: |
3133 | val = READ_ONCE(up->pcrlen); |
3134 | break; |
3135 | |
3136 | default: |
3137 | return -ENOPROTOOPT; |
3138 | } |
3139 | |
3140 | if (put_user(len, optlen)) |
3141 | return -EFAULT; |
3142 | if (copy_to_user(to: optval, from: &val, n: len)) |
3143 | return -EFAULT; |
3144 | return 0; |
3145 | } |
3146 | EXPORT_IPV6_MOD(udp_lib_getsockopt); |
3147 | |
3148 | int udp_getsockopt(struct sock *sk, int level, int optname, |
3149 | char __user *optval, int __user *optlen) |
3150 | { |
3151 | if (level == SOL_UDP || level == SOL_UDPLITE) |
3152 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
3153 | return ip_getsockopt(sk, level, optname, optval, optlen); |
3154 | } |
3155 | |
3156 | /** |
3157 | * udp_poll - wait for a UDP event. |
3158 | * @file: - file struct |
3159 | * @sock: - socket |
3160 | * @wait: - poll table |
3161 | * |
3162 | * This is same as datagram poll, except for the special case of |
3163 | * blocking sockets. If application is using a blocking fd |
3164 | * and a packet with checksum error is in the queue; |
3165 | * then it could get return from select indicating data available |
3166 | * but then block when reading it. Add special case code |
3167 | * to work around these arguably broken applications. |
3168 | */ |
3169 | __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) |
3170 | { |
3171 | __poll_t mask = datagram_poll(file, sock, wait); |
3172 | struct sock *sk = sock->sk; |
3173 | |
3174 | if (!skb_queue_empty_lockless(list: &udp_sk(sk)->reader_queue)) |
3175 | mask |= EPOLLIN | EPOLLRDNORM; |
3176 | |
3177 | /* Check for false positives due to checksum errors */ |
3178 | if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && |
3179 | !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) |
3180 | mask &= ~(EPOLLIN | EPOLLRDNORM); |
3181 | |
3182 | /* psock ingress_msg queue should not contain any bad checksum frames */ |
3183 | if (sk_is_readable(sk)) |
3184 | mask |= EPOLLIN | EPOLLRDNORM; |
3185 | return mask; |
3186 | |
3187 | } |
3188 | EXPORT_IPV6_MOD(udp_poll); |
3189 | |
3190 | int udp_abort(struct sock *sk, int err) |
3191 | { |
3192 | if (!has_current_bpf_ctx()) |
3193 | lock_sock(sk); |
3194 | |
3195 | /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing |
3196 | * with close() |
3197 | */ |
3198 | if (sock_flag(sk, flag: SOCK_DEAD)) |
3199 | goto out; |
3200 | |
3201 | sk->sk_err = err; |
3202 | sk_error_report(sk); |
3203 | __udp_disconnect(sk, 0); |
3204 | |
3205 | out: |
3206 | if (!has_current_bpf_ctx()) |
3207 | release_sock(sk); |
3208 | |
3209 | return 0; |
3210 | } |
3211 | EXPORT_IPV6_MOD_GPL(udp_abort); |
3212 | |
3213 | struct proto udp_prot = { |
3214 | .name = "UDP", |
3215 | .owner = THIS_MODULE, |
3216 | .close = udp_lib_close, |
3217 | .pre_connect = udp_pre_connect, |
3218 | .connect = udp_connect, |
3219 | .disconnect = udp_disconnect, |
3220 | .ioctl = udp_ioctl, |
3221 | .init = udp_init_sock, |
3222 | .destroy = udp_destroy_sock, |
3223 | .setsockopt = udp_setsockopt, |
3224 | .getsockopt = udp_getsockopt, |
3225 | .sendmsg = udp_sendmsg, |
3226 | .recvmsg = udp_recvmsg, |
3227 | .splice_eof = udp_splice_eof, |
3228 | .release_cb = ip4_datagram_release_cb, |
3229 | .hash = udp_lib_hash, |
3230 | .unhash = udp_lib_unhash, |
3231 | .rehash = udp_v4_rehash, |
3232 | .get_port = udp_v4_get_port, |
3233 | .put_port = udp_lib_unhash, |
3234 | #ifdef CONFIG_BPF_SYSCALL |
3235 | .psock_update_sk_prot = udp_bpf_update_proto, |
3236 | #endif |
3237 | .memory_allocated = &udp_memory_allocated, |
3238 | .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, |
3239 | |
3240 | .sysctl_mem = sysctl_udp_mem, |
3241 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), |
3242 | .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), |
3243 | .obj_size = sizeof(struct udp_sock), |
3244 | .h.udp_table = NULL, |
3245 | .diag_destroy = udp_abort, |
3246 | }; |
3247 | EXPORT_SYMBOL(udp_prot); |
3248 | |
3249 | /* ------------------------------------------------------------------------ */ |
3250 | #ifdef CONFIG_PROC_FS |
3251 | |
3252 | static unsigned short seq_file_family(const struct seq_file *seq); |
3253 | static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) |
3254 | { |
3255 | unsigned short family = seq_file_family(seq); |
3256 | |
3257 | /* AF_UNSPEC is used as a match all */ |
3258 | return ((family == AF_UNSPEC || family == sk->sk_family) && |
3259 | net_eq(net1: sock_net(sk), net2: seq_file_net(seq))); |
3260 | } |
3261 | |
3262 | #ifdef CONFIG_BPF_SYSCALL |
3263 | static const struct seq_operations bpf_iter_udp_seq_ops; |
3264 | #endif |
3265 | static struct udp_table *udp_get_table_seq(struct seq_file *seq, |
3266 | struct net *net) |
3267 | { |
3268 | const struct udp_seq_afinfo *afinfo; |
3269 | |
3270 | #ifdef CONFIG_BPF_SYSCALL |
3271 | if (seq->op == &bpf_iter_udp_seq_ops) |
3272 | return net->ipv4.udp_table; |
3273 | #endif |
3274 | |
3275 | afinfo = pde_data(inode: file_inode(f: seq->file)); |
3276 | return afinfo->udp_table ? : net->ipv4.udp_table; |
3277 | } |
3278 | |
3279 | static struct sock *udp_get_first(struct seq_file *seq, int start) |
3280 | { |
3281 | struct udp_iter_state *state = seq->private; |
3282 | struct net *net = seq_file_net(seq); |
3283 | struct udp_table *udptable; |
3284 | struct sock *sk; |
3285 | |
3286 | udptable = udp_get_table_seq(seq, net); |
3287 | |
3288 | for (state->bucket = start; state->bucket <= udptable->mask; |
3289 | ++state->bucket) { |
3290 | struct udp_hslot *hslot = &udptable->hash[state->bucket]; |
3291 | |
3292 | if (hlist_empty(h: &hslot->head)) |
3293 | continue; |
3294 | |
3295 | spin_lock_bh(lock: &hslot->lock); |
3296 | sk_for_each(sk, &hslot->head) { |
3297 | if (seq_sk_match(seq, sk)) |
3298 | goto found; |
3299 | } |
3300 | spin_unlock_bh(lock: &hslot->lock); |
3301 | } |
3302 | sk = NULL; |
3303 | found: |
3304 | return sk; |
3305 | } |
3306 | |
3307 | static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) |
3308 | { |
3309 | struct udp_iter_state *state = seq->private; |
3310 | struct net *net = seq_file_net(seq); |
3311 | struct udp_table *udptable; |
3312 | |
3313 | do { |
3314 | sk = sk_next(sk); |
3315 | } while (sk && !seq_sk_match(seq, sk)); |
3316 | |
3317 | if (!sk) { |
3318 | udptable = udp_get_table_seq(seq, net); |
3319 | |
3320 | if (state->bucket <= udptable->mask) |
3321 | spin_unlock_bh(lock: &udptable->hash[state->bucket].lock); |
3322 | |
3323 | return udp_get_first(seq, start: state->bucket + 1); |
3324 | } |
3325 | return sk; |
3326 | } |
3327 | |
3328 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) |
3329 | { |
3330 | struct sock *sk = udp_get_first(seq, start: 0); |
3331 | |
3332 | if (sk) |
3333 | while (pos && (sk = udp_get_next(seq, sk)) != NULL) |
3334 | --pos; |
3335 | return pos ? NULL : sk; |
3336 | } |
3337 | |
3338 | void *udp_seq_start(struct seq_file *seq, loff_t *pos) |
3339 | { |
3340 | struct udp_iter_state *state = seq->private; |
3341 | state->bucket = MAX_UDP_PORTS; |
3342 | |
3343 | return *pos ? udp_get_idx(seq, pos: *pos-1) : SEQ_START_TOKEN; |
3344 | } |
3345 | EXPORT_IPV6_MOD(udp_seq_start); |
3346 | |
3347 | void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3348 | { |
3349 | struct sock *sk; |
3350 | |
3351 | if (v == SEQ_START_TOKEN) |
3352 | sk = udp_get_idx(seq, pos: 0); |
3353 | else |
3354 | sk = udp_get_next(seq, sk: v); |
3355 | |
3356 | ++*pos; |
3357 | return sk; |
3358 | } |
3359 | EXPORT_IPV6_MOD(udp_seq_next); |
3360 | |
3361 | void udp_seq_stop(struct seq_file *seq, void *v) |
3362 | { |
3363 | struct udp_iter_state *state = seq->private; |
3364 | struct udp_table *udptable; |
3365 | |
3366 | udptable = udp_get_table_seq(seq, net: seq_file_net(seq)); |
3367 | |
3368 | if (state->bucket <= udptable->mask) |
3369 | spin_unlock_bh(lock: &udptable->hash[state->bucket].lock); |
3370 | } |
3371 | EXPORT_IPV6_MOD(udp_seq_stop); |
3372 | |
3373 | /* ------------------------------------------------------------------------ */ |
3374 | static void udp4_format_sock(struct sock *sp, struct seq_file *f, |
3375 | int bucket) |
3376 | { |
3377 | struct inet_sock *inet = inet_sk(sp); |
3378 | __be32 dest = inet->inet_daddr; |
3379 | __be32 src = inet->inet_rcv_saddr; |
3380 | __u16 destp = ntohs(inet->inet_dport); |
3381 | __u16 srcp = ntohs(inet->inet_sport); |
3382 | |
3383 | seq_printf(m: f, fmt: "%5d: %08X:%04X %08X:%04X" |
3384 | " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", |
3385 | bucket, src, srcp, dest, destp, sp->sk_state, |
3386 | sk_wmem_alloc_get(sk: sp), |
3387 | udp_rqueue_get(sk: sp), |
3388 | 0, 0L, 0, |
3389 | from_kuid_munged(to: seq_user_ns(seq: f), uid: sock_i_uid(sk: sp)), |
3390 | 0, sock_i_ino(sk: sp), |
3391 | refcount_read(r: &sp->sk_refcnt), sp, |
3392 | atomic_read(v: &sp->sk_drops)); |
3393 | } |
3394 | |
3395 | int udp4_seq_show(struct seq_file *seq, void *v) |
3396 | { |
3397 | seq_setwidth(m: seq, size: 127); |
3398 | if (v == SEQ_START_TOKEN) |
3399 | seq_puts(m: seq, s: " sl local_address rem_address st tx_queue " |
3400 | "rx_queue tr tm->when retrnsmt uid timeout " |
3401 | "inode ref pointer drops"); |
3402 | else { |
3403 | struct udp_iter_state *state = seq->private; |
3404 | |
3405 | udp4_format_sock(sp: v, f: seq, bucket: state->bucket); |
3406 | } |
3407 | seq_pad(m: seq, c: '\n'); |
3408 | return 0; |
3409 | } |
3410 | |
3411 | #ifdef CONFIG_BPF_SYSCALL |
3412 | struct bpf_iter__udp { |
3413 | __bpf_md_ptr(struct bpf_iter_meta *, meta); |
3414 | __bpf_md_ptr(struct udp_sock *, udp_sk); |
3415 | uid_t uid __aligned(8); |
3416 | int bucket __aligned(8); |
3417 | }; |
3418 | |
3419 | union bpf_udp_iter_batch_item { |
3420 | struct sock *sk; |
3421 | __u64 cookie; |
3422 | }; |
3423 | |
3424 | struct bpf_udp_iter_state { |
3425 | struct udp_iter_state state; |
3426 | unsigned int cur_sk; |
3427 | unsigned int end_sk; |
3428 | unsigned int max_sk; |
3429 | union bpf_udp_iter_batch_item *batch; |
3430 | }; |
3431 | |
3432 | static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, |
3433 | unsigned int new_batch_sz, gfp_t flags); |
3434 | static struct sock *bpf_iter_udp_resume(struct sock *first_sk, |
3435 | union bpf_udp_iter_batch_item *cookies, |
3436 | int n_cookies) |
3437 | { |
3438 | struct sock *sk = NULL; |
3439 | int i; |
3440 | |
3441 | for (i = 0; i < n_cookies; i++) { |
3442 | sk = first_sk; |
3443 | udp_portaddr_for_each_entry_from(sk) |
3444 | if (cookies[i].cookie == atomic64_read(v: &sk->sk_cookie)) |
3445 | goto done; |
3446 | } |
3447 | done: |
3448 | return sk; |
3449 | } |
3450 | |
3451 | static struct sock *bpf_iter_udp_batch(struct seq_file *seq) |
3452 | { |
3453 | struct bpf_udp_iter_state *iter = seq->private; |
3454 | struct udp_iter_state *state = &iter->state; |
3455 | unsigned int find_cookie, end_cookie; |
3456 | struct net *net = seq_file_net(seq); |
3457 | struct udp_table *udptable; |
3458 | unsigned int batch_sks = 0; |
3459 | int resume_bucket; |
3460 | int resizes = 0; |
3461 | struct sock *sk; |
3462 | int err = 0; |
3463 | |
3464 | resume_bucket = state->bucket; |
3465 | |
3466 | /* The current batch is done, so advance the bucket. */ |
3467 | if (iter->cur_sk == iter->end_sk) |
3468 | state->bucket++; |
3469 | |
3470 | udptable = udp_get_table_seq(seq, net); |
3471 | |
3472 | again: |
3473 | /* New batch for the next bucket. |
3474 | * Iterate over the hash table to find a bucket with sockets matching |
3475 | * the iterator attributes, and return the first matching socket from |
3476 | * the bucket. The remaining matched sockets from the bucket are batched |
3477 | * before releasing the bucket lock. This allows BPF programs that are |
3478 | * called in seq_show to acquire the bucket lock if needed. |
3479 | */ |
3480 | find_cookie = iter->cur_sk; |
3481 | end_cookie = iter->end_sk; |
3482 | iter->cur_sk = 0; |
3483 | iter->end_sk = 0; |
3484 | batch_sks = 0; |
3485 | |
3486 | for (; state->bucket <= udptable->mask; state->bucket++) { |
3487 | struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot; |
3488 | |
3489 | if (hlist_empty(h: &hslot2->head)) |
3490 | goto next_bucket; |
3491 | |
3492 | spin_lock_bh(lock: &hslot2->lock); |
3493 | sk = hlist_entry_safe(hslot2->head.first, struct sock, |
3494 | __sk_common.skc_portaddr_node); |
3495 | /* Resume from the first (in iteration order) unseen socket from |
3496 | * the last batch that still exists in resume_bucket. Most of |
3497 | * the time this will just be where the last iteration left off |
3498 | * in resume_bucket unless that socket disappeared between |
3499 | * reads. |
3500 | */ |
3501 | if (state->bucket == resume_bucket) |
3502 | sk = bpf_iter_udp_resume(first_sk: sk, cookies: &iter->batch[find_cookie], |
3503 | n_cookies: end_cookie - find_cookie); |
3504 | fill_batch: |
3505 | udp_portaddr_for_each_entry_from(sk) { |
3506 | if (seq_sk_match(seq, sk)) { |
3507 | if (iter->end_sk < iter->max_sk) { |
3508 | sock_hold(sk); |
3509 | iter->batch[iter->end_sk++].sk = sk; |
3510 | } |
3511 | batch_sks++; |
3512 | } |
3513 | } |
3514 | |
3515 | /* Allocate a larger batch and try again. */ |
3516 | if (unlikely(resizes <= 1 && iter->end_sk && |
3517 | iter->end_sk != batch_sks)) { |
3518 | resizes++; |
3519 | |
3520 | /* First, try with GFP_USER to maximize the chances of |
3521 | * grabbing more memory. |
3522 | */ |
3523 | if (resizes == 1) { |
3524 | spin_unlock_bh(lock: &hslot2->lock); |
3525 | err = bpf_iter_udp_realloc_batch(iter, |
3526 | new_batch_sz: batch_sks * 3 / 2, |
3527 | GFP_USER); |
3528 | if (err) |
3529 | return ERR_PTR(error: err); |
3530 | /* Start over. */ |
3531 | goto again; |
3532 | } |
3533 | |
3534 | /* Next, hold onto the lock, so the bucket doesn't |
3535 | * change while we get the rest of the sockets. |
3536 | */ |
3537 | err = bpf_iter_udp_realloc_batch(iter, new_batch_sz: batch_sks, |
3538 | GFP_NOWAIT); |
3539 | if (err) { |
3540 | spin_unlock_bh(lock: &hslot2->lock); |
3541 | return ERR_PTR(error: err); |
3542 | } |
3543 | |
3544 | /* Pick up where we left off. */ |
3545 | sk = iter->batch[iter->end_sk - 1].sk; |
3546 | sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next, |
3547 | struct sock, |
3548 | __sk_common.skc_portaddr_node); |
3549 | batch_sks = iter->end_sk; |
3550 | goto fill_batch; |
3551 | } |
3552 | |
3553 | spin_unlock_bh(lock: &hslot2->lock); |
3554 | |
3555 | if (iter->end_sk) |
3556 | break; |
3557 | next_bucket: |
3558 | resizes = 0; |
3559 | } |
3560 | |
3561 | WARN_ON_ONCE(iter->end_sk != batch_sks); |
3562 | return iter->end_sk ? iter->batch[0].sk : NULL; |
3563 | } |
3564 | |
3565 | static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3566 | { |
3567 | struct bpf_udp_iter_state *iter = seq->private; |
3568 | struct sock *sk; |
3569 | |
3570 | /* Whenever seq_next() is called, the iter->cur_sk is |
3571 | * done with seq_show(), so unref the iter->cur_sk. |
3572 | */ |
3573 | if (iter->cur_sk < iter->end_sk) |
3574 | sock_put(sk: iter->batch[iter->cur_sk++].sk); |
3575 | |
3576 | /* After updating iter->cur_sk, check if there are more sockets |
3577 | * available in the current bucket batch. |
3578 | */ |
3579 | if (iter->cur_sk < iter->end_sk) |
3580 | sk = iter->batch[iter->cur_sk].sk; |
3581 | else |
3582 | /* Prepare a new batch. */ |
3583 | sk = bpf_iter_udp_batch(seq); |
3584 | |
3585 | ++*pos; |
3586 | return sk; |
3587 | } |
3588 | |
3589 | static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos) |
3590 | { |
3591 | /* bpf iter does not support lseek, so it always |
3592 | * continue from where it was stop()-ped. |
3593 | */ |
3594 | if (*pos) |
3595 | return bpf_iter_udp_batch(seq); |
3596 | |
3597 | return SEQ_START_TOKEN; |
3598 | } |
3599 | |
3600 | static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, |
3601 | struct udp_sock *udp_sk, uid_t uid, int bucket) |
3602 | { |
3603 | struct bpf_iter__udp ctx; |
3604 | |
3605 | meta->seq_num--; /* skip SEQ_START_TOKEN */ |
3606 | ctx.meta = meta; |
3607 | ctx.udp_sk = udp_sk; |
3608 | ctx.uid = uid; |
3609 | ctx.bucket = bucket; |
3610 | return bpf_iter_run_prog(prog, ctx: &ctx); |
3611 | } |
3612 | |
3613 | static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) |
3614 | { |
3615 | struct udp_iter_state *state = seq->private; |
3616 | struct bpf_iter_meta meta; |
3617 | struct bpf_prog *prog; |
3618 | struct sock *sk = v; |
3619 | uid_t uid; |
3620 | int ret; |
3621 | |
3622 | if (v == SEQ_START_TOKEN) |
3623 | return 0; |
3624 | |
3625 | lock_sock(sk); |
3626 | |
3627 | if (unlikely(sk_unhashed(sk))) { |
3628 | ret = SEQ_SKIP; |
3629 | goto unlock; |
3630 | } |
3631 | |
3632 | uid = from_kuid_munged(to: seq_user_ns(seq), uid: sock_i_uid(sk)); |
3633 | meta.seq = seq; |
3634 | prog = bpf_iter_get_info(meta: &meta, in_stop: false); |
3635 | ret = udp_prog_seq_show(prog, meta: &meta, udp_sk: v, uid, bucket: state->bucket); |
3636 | |
3637 | unlock: |
3638 | release_sock(sk); |
3639 | return ret; |
3640 | } |
3641 | |
3642 | static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter) |
3643 | { |
3644 | union bpf_udp_iter_batch_item *item; |
3645 | unsigned int cur_sk = iter->cur_sk; |
3646 | __u64 cookie; |
3647 | |
3648 | /* Remember the cookies of the sockets we haven't seen yet, so we can |
3649 | * pick up where we left off next time around. |
3650 | */ |
3651 | while (cur_sk < iter->end_sk) { |
3652 | item = &iter->batch[cur_sk++]; |
3653 | cookie = sock_gen_cookie(sk: item->sk); |
3654 | sock_put(sk: item->sk); |
3655 | item->cookie = cookie; |
3656 | } |
3657 | } |
3658 | |
3659 | static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) |
3660 | { |
3661 | struct bpf_udp_iter_state *iter = seq->private; |
3662 | struct bpf_iter_meta meta; |
3663 | struct bpf_prog *prog; |
3664 | |
3665 | if (!v) { |
3666 | meta.seq = seq; |
3667 | prog = bpf_iter_get_info(meta: &meta, in_stop: true); |
3668 | if (prog) |
3669 | (void)udp_prog_seq_show(prog, meta: &meta, udp_sk: v, uid: 0, bucket: 0); |
3670 | } |
3671 | |
3672 | if (iter->cur_sk < iter->end_sk) |
3673 | bpf_iter_udp_put_batch(iter); |
3674 | } |
3675 | |
3676 | static const struct seq_operations bpf_iter_udp_seq_ops = { |
3677 | .start = bpf_iter_udp_seq_start, |
3678 | .next = bpf_iter_udp_seq_next, |
3679 | .stop = bpf_iter_udp_seq_stop, |
3680 | .show = bpf_iter_udp_seq_show, |
3681 | }; |
3682 | #endif |
3683 | |
3684 | static unsigned short seq_file_family(const struct seq_file *seq) |
3685 | { |
3686 | const struct udp_seq_afinfo *afinfo; |
3687 | |
3688 | #ifdef CONFIG_BPF_SYSCALL |
3689 | /* BPF iterator: bpf programs to filter sockets. */ |
3690 | if (seq->op == &bpf_iter_udp_seq_ops) |
3691 | return AF_UNSPEC; |
3692 | #endif |
3693 | |
3694 | /* Proc fs iterator */ |
3695 | afinfo = pde_data(inode: file_inode(f: seq->file)); |
3696 | return afinfo->family; |
3697 | } |
3698 | |
3699 | const struct seq_operations udp_seq_ops = { |
3700 | .start = udp_seq_start, |
3701 | .next = udp_seq_next, |
3702 | .stop = udp_seq_stop, |
3703 | .show = udp4_seq_show, |
3704 | }; |
3705 | EXPORT_IPV6_MOD(udp_seq_ops); |
3706 | |
3707 | static struct udp_seq_afinfo udp4_seq_afinfo = { |
3708 | .family = AF_INET, |
3709 | .udp_table = NULL, |
3710 | }; |
3711 | |
3712 | static int __net_init udp4_proc_init_net(struct net *net) |
3713 | { |
3714 | if (!proc_create_net_data(name: "udp", mode: 0444, parent: net->proc_net, ops: &udp_seq_ops, |
3715 | state_size: sizeof(struct udp_iter_state), data: &udp4_seq_afinfo)) |
3716 | return -ENOMEM; |
3717 | return 0; |
3718 | } |
3719 | |
3720 | static void __net_exit udp4_proc_exit_net(struct net *net) |
3721 | { |
3722 | remove_proc_entry("udp", net->proc_net); |
3723 | } |
3724 | |
3725 | static struct pernet_operations udp4_net_ops = { |
3726 | .init = udp4_proc_init_net, |
3727 | .exit = udp4_proc_exit_net, |
3728 | }; |
3729 | |
3730 | int __init udp4_proc_init(void) |
3731 | { |
3732 | return register_pernet_subsys(&udp4_net_ops); |
3733 | } |
3734 | |
3735 | void udp4_proc_exit(void) |
3736 | { |
3737 | unregister_pernet_subsys(&udp4_net_ops); |
3738 | } |
3739 | #endif /* CONFIG_PROC_FS */ |
3740 | |
3741 | static __initdata unsigned long uhash_entries; |
3742 | static int __init set_uhash_entries(char *str) |
3743 | { |
3744 | ssize_t ret; |
3745 | |
3746 | if (!str) |
3747 | return 0; |
3748 | |
3749 | ret = kstrtoul(s: str, base: 0, res: &uhash_entries); |
3750 | if (ret) |
3751 | return 0; |
3752 | |
3753 | if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) |
3754 | uhash_entries = UDP_HTABLE_SIZE_MIN; |
3755 | return 1; |
3756 | } |
3757 | __setup("uhash_entries=", set_uhash_entries); |
3758 | |
3759 | void __init udp_table_init(struct udp_table *table, const char *name) |
3760 | { |
3761 | unsigned int i, slot_size; |
3762 | |
3763 | slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) + |
3764 | udp_hash4_slot_size(); |
3765 | table->hash = alloc_large_system_hash(tablename: name, |
3766 | bucketsize: slot_size, |
3767 | numentries: uhash_entries, |
3768 | scale: 21, /* one slot per 2 MB */ |
3769 | flags: 0, |
3770 | hash_shift: &table->log, |
3771 | hash_mask: &table->mask, |
3772 | UDP_HTABLE_SIZE_MIN, |
3773 | UDP_HTABLE_SIZE_MAX); |
3774 | |
3775 | table->hash2 = (void *)(table->hash + (table->mask + 1)); |
3776 | for (i = 0; i <= table->mask; i++) { |
3777 | INIT_HLIST_HEAD(&table->hash[i].head); |
3778 | table->hash[i].count = 0; |
3779 | spin_lock_init(&table->hash[i].lock); |
3780 | } |
3781 | for (i = 0; i <= table->mask; i++) { |
3782 | INIT_HLIST_HEAD(&table->hash2[i].hslot.head); |
3783 | table->hash2[i].hslot.count = 0; |
3784 | spin_lock_init(&table->hash2[i].hslot.lock); |
3785 | } |
3786 | udp_table_hash4_init(table); |
3787 | } |
3788 | |
3789 | u32 udp_flow_hashrnd(void) |
3790 | { |
3791 | static u32 hashrnd __read_mostly; |
3792 | |
3793 | net_get_random_once(&hashrnd, sizeof(hashrnd)); |
3794 | |
3795 | return hashrnd; |
3796 | } |
3797 | EXPORT_SYMBOL(udp_flow_hashrnd); |
3798 | |
3799 | static void __net_init udp_sysctl_init(struct net *net) |
3800 | { |
3801 | net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE; |
3802 | net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE; |
3803 | |
3804 | #ifdef CONFIG_NET_L3_MASTER_DEV |
3805 | net->ipv4.sysctl_udp_l3mdev_accept = 0; |
3806 | #endif |
3807 | } |
3808 | |
3809 | static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries) |
3810 | { |
3811 | struct udp_table *udptable; |
3812 | unsigned int slot_size; |
3813 | int i; |
3814 | |
3815 | udptable = kmalloc(sizeof(*udptable), GFP_KERNEL); |
3816 | if (!udptable) |
3817 | goto out; |
3818 | |
3819 | slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) + |
3820 | udp_hash4_slot_size(); |
3821 | udptable->hash = vmalloc_huge(size: hash_entries * slot_size, |
3822 | GFP_KERNEL_ACCOUNT); |
3823 | if (!udptable->hash) |
3824 | goto free_table; |
3825 | |
3826 | udptable->hash2 = (void *)(udptable->hash + hash_entries); |
3827 | udptable->mask = hash_entries - 1; |
3828 | udptable->log = ilog2(hash_entries); |
3829 | |
3830 | for (i = 0; i < hash_entries; i++) { |
3831 | INIT_HLIST_HEAD(&udptable->hash[i].head); |
3832 | udptable->hash[i].count = 0; |
3833 | spin_lock_init(&udptable->hash[i].lock); |
3834 | |
3835 | INIT_HLIST_HEAD(&udptable->hash2[i].hslot.head); |
3836 | udptable->hash2[i].hslot.count = 0; |
3837 | spin_lock_init(&udptable->hash2[i].hslot.lock); |
3838 | } |
3839 | udp_table_hash4_init(table: udptable); |
3840 | |
3841 | return udptable; |
3842 | |
3843 | free_table: |
3844 | kfree(objp: udptable); |
3845 | out: |
3846 | return NULL; |
3847 | } |
3848 | |
3849 | static void __net_exit udp_pernet_table_free(struct net *net) |
3850 | { |
3851 | struct udp_table *udptable = net->ipv4.udp_table; |
3852 | |
3853 | if (udptable == &udp_table) |
3854 | return; |
3855 | |
3856 | kvfree(addr: udptable->hash); |
3857 | kfree(objp: udptable); |
3858 | } |
3859 | |
3860 | static void __net_init udp_set_table(struct net *net) |
3861 | { |
3862 | struct udp_table *udptable; |
3863 | unsigned int hash_entries; |
3864 | struct net *old_net; |
3865 | |
3866 | if (net_eq(net1: net, net2: &init_net)) |
3867 | goto fallback; |
3868 | |
3869 | old_net = current->nsproxy->net_ns; |
3870 | hash_entries = READ_ONCE(old_net->ipv4.sysctl_udp_child_hash_entries); |
3871 | if (!hash_entries) |
3872 | goto fallback; |
3873 | |
3874 | /* Set min to keep the bitmap on stack in udp_lib_get_port() */ |
3875 | if (hash_entries < UDP_HTABLE_SIZE_MIN_PERNET) |
3876 | hash_entries = UDP_HTABLE_SIZE_MIN_PERNET; |
3877 | else |
3878 | hash_entries = roundup_pow_of_two(hash_entries); |
3879 | |
3880 | udptable = udp_pernet_table_alloc(hash_entries); |
3881 | if (udptable) { |
3882 | net->ipv4.udp_table = udptable; |
3883 | } else { |
3884 | pr_warn("Failed to allocate UDP hash table (entries: %u) " |
3885 | "for a netns, fallback to the global one\n", |
3886 | hash_entries); |
3887 | fallback: |
3888 | net->ipv4.udp_table = &udp_table; |
3889 | } |
3890 | } |
3891 | |
3892 | static int __net_init udp_pernet_init(struct net *net) |
3893 | { |
3894 | #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) |
3895 | int i; |
3896 | |
3897 | /* No tunnel is configured */ |
3898 | for (i = 0; i < ARRAY_SIZE(net->ipv4.udp_tunnel_gro); ++i) { |
3899 | INIT_HLIST_HEAD(&net->ipv4.udp_tunnel_gro[i].list); |
3900 | RCU_INIT_POINTER(net->ipv4.udp_tunnel_gro[i].sk, NULL); |
3901 | } |
3902 | #endif |
3903 | udp_sysctl_init(net); |
3904 | udp_set_table(net); |
3905 | |
3906 | return 0; |
3907 | } |
3908 | |
3909 | static void __net_exit udp_pernet_exit(struct net *net) |
3910 | { |
3911 | udp_pernet_table_free(net); |
3912 | } |
3913 | |
3914 | static struct pernet_operations __net_initdata udp_sysctl_ops = { |
3915 | .init = udp_pernet_init, |
3916 | .exit = udp_pernet_exit, |
3917 | }; |
3918 | |
3919 | #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) |
3920 | DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, |
3921 | struct udp_sock *udp_sk, uid_t uid, int bucket) |
3922 | |
3923 | static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, |
3924 | unsigned int new_batch_sz, gfp_t flags) |
3925 | { |
3926 | union bpf_udp_iter_batch_item *new_batch; |
3927 | |
3928 | new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch), |
3929 | flags | __GFP_NOWARN); |
3930 | if (!new_batch) |
3931 | return -ENOMEM; |
3932 | |
3933 | if (flags != GFP_NOWAIT) |
3934 | bpf_iter_udp_put_batch(iter); |
3935 | |
3936 | memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk); |
3937 | kvfree(addr: iter->batch); |
3938 | iter->batch = new_batch; |
3939 | iter->max_sk = new_batch_sz; |
3940 | |
3941 | return 0; |
3942 | } |
3943 | |
3944 | #define INIT_BATCH_SZ 16 |
3945 | |
3946 | static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) |
3947 | { |
3948 | struct bpf_udp_iter_state *iter = priv_data; |
3949 | int ret; |
3950 | |
3951 | ret = bpf_iter_init_seq_net(priv_data, aux); |
3952 | if (ret) |
3953 | return ret; |
3954 | |
3955 | ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER); |
3956 | if (ret) |
3957 | bpf_iter_fini_seq_net(priv_data); |
3958 | |
3959 | iter->state.bucket = -1; |
3960 | |
3961 | return ret; |
3962 | } |
3963 | |
3964 | static void bpf_iter_fini_udp(void *priv_data) |
3965 | { |
3966 | struct bpf_udp_iter_state *iter = priv_data; |
3967 | |
3968 | bpf_iter_fini_seq_net(priv_data); |
3969 | kvfree(addr: iter->batch); |
3970 | } |
3971 | |
3972 | static const struct bpf_iter_seq_info udp_seq_info = { |
3973 | .seq_ops = &bpf_iter_udp_seq_ops, |
3974 | .init_seq_private = bpf_iter_init_udp, |
3975 | .fini_seq_private = bpf_iter_fini_udp, |
3976 | .seq_priv_size = sizeof(struct bpf_udp_iter_state), |
3977 | }; |
3978 | |
3979 | static struct bpf_iter_reg udp_reg_info = { |
3980 | .target = "udp", |
3981 | .ctx_arg_info_size = 1, |
3982 | .ctx_arg_info = { |
3983 | { offsetof(struct bpf_iter__udp, udp_sk), |
3984 | PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, |
3985 | }, |
3986 | .seq_info = &udp_seq_info, |
3987 | }; |
3988 | |
3989 | static void __init bpf_iter_register(void) |
3990 | { |
3991 | udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; |
3992 | if (bpf_iter_reg_target(reg_info: &udp_reg_info)) |
3993 | pr_warn("Warning: could not register bpf iterator udp\n"); |
3994 | } |
3995 | #endif |
3996 | |
3997 | void __init udp_init(void) |
3998 | { |
3999 | unsigned long limit; |
4000 | unsigned int i; |
4001 | |
4002 | udp_table_init(table: &udp_table, name: "UDP"); |
4003 | limit = nr_free_buffer_pages() / 8; |
4004 | limit = max(limit, 128UL); |
4005 | sysctl_udp_mem[0] = limit / 4 * 3; |
4006 | sysctl_udp_mem[1] = limit; |
4007 | sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; |
4008 | |
4009 | /* 16 spinlocks per cpu */ |
4010 | udp_busylocks_log = ilog2(nr_cpu_ids) + 4; |
4011 | udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, |
4012 | GFP_KERNEL); |
4013 | if (!udp_busylocks) |
4014 | panic(fmt: "UDP: failed to alloc udp_busylocks\n"); |
4015 | for (i = 0; i < (1U << udp_busylocks_log); i++) |
4016 | spin_lock_init(udp_busylocks + i); |
4017 | |
4018 | if (register_pernet_subsys(&udp_sysctl_ops)) |
4019 | panic(fmt: "UDP: failed to init sysctl parameters.\n"); |
4020 | |
4021 | #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) |
4022 | bpf_iter_register(); |
4023 | #endif |
4024 | } |
4025 |
Definitions
- udp_table
- sysctl_udp_mem
- udp_memory_allocated
- udp_memory_per_cpu_fw_alloc
- udp_get_table_prot
- udp_lib_lport_inuse
- udp_lib_lport_inuse2
- udp_reuseport_add_sock
- udp_lib_get_port
- udp_v4_get_port
- compute_score
- udp_ehashfn
- udp4_lib_lookup1
- udp4_lib_lookup2
- udp4_lib_lookup4
- udp_rehash4
- udp_unhash4
- __udp4_lib_lookup
- __udp4_lib_lookup_skb
- udp4_lib_lookup_skb
- udp4_lib_lookup
- __udp_is_mcast_sock
- udp_encap_needed_key
- udpv6_encap_needed_key
- udp_encap_enable
- udp_encap_disable
- __udp4_lib_err_encap_no_sk
- __udp4_lib_err_encap
- __udp4_lib_err
- udp_err
- udp_flush_pending_frames
- udp4_hwcsum
- udp_set_csum
- udp_send_skb
- udp_push_pending_frames
- __udp_cmsg_send
- udp_cmsg_send
- udp_sendmsg
- udp_splice_eof
- udp_try_make_stateless
- udp_set_dev_scratch
- udp_skb_csum_unnecessary_set
- udp_skb_truesize
- udp_skb_has_head_state
- udp_rmem_release
- udp_skb_destructor
- udp_skb_dtor_locked
- udp_busylocks_log
- udp_busylocks
- busylock_acquire
- busylock_release
- udp_rmem_schedule
- __udp_enqueue_schedule_skb
- udp_destruct_common
- udp_destruct_sock
- udp_init_sock
- skb_consume_udp
- __first_packet_length
- first_packet_length
- udp_ioctl
- __skb_recv_udp
- udp_read_skb
- udp_recvmsg
- udp_pre_connect
- udp_connect
- __udp_disconnect
- udp_disconnect
- udp_lib_unhash
- udp_lib_rehash
- udp_v4_rehash
- __udp_queue_rcv_skb
- udp_queue_rcv_one_skb
- udp_queue_rcv_skb
- udp_sk_rx_dst_set
- __udp4_lib_mcast_deliver
- udp4_csum_init
- udp_unicast_rcv_skb
- __udp4_lib_rcv
- __udp4_lib_mcast_demux_lookup
- __udp4_lib_demux_lookup
- udp_v4_early_demux
- udp_rcv
- udp_destroy_sock
- set_xfrm_gro_udp_encap_rcv
- udp_lib_setsockopt
- udp_setsockopt
- udp_lib_getsockopt
- udp_getsockopt
- udp_poll
- udp_abort
- udp_prot
- seq_sk_match
- bpf_iter_udp_seq_ops
- udp_get_table_seq
- udp_get_first
- udp_get_next
- udp_get_idx
- udp_seq_start
- udp_seq_next
- udp_seq_stop
- udp4_format_sock
- udp4_seq_show
- bpf_iter__udp
- bpf_udp_iter_batch_item
- bpf_udp_iter_state
- bpf_iter_udp_resume
- bpf_iter_udp_batch
- bpf_iter_udp_seq_next
- bpf_iter_udp_seq_start
- udp_prog_seq_show
- bpf_iter_udp_seq_show
- bpf_iter_udp_put_batch
- bpf_iter_udp_seq_stop
- bpf_iter_udp_seq_ops
- seq_file_family
- udp_seq_ops
- udp4_seq_afinfo
- udp4_proc_init_net
- udp4_proc_exit_net
- udp4_net_ops
- udp4_proc_init
- udp4_proc_exit
- uhash_entries
- set_uhash_entries
- udp_table_init
- udp_flow_hashrnd
- udp_sysctl_init
- udp_pernet_table_alloc
- udp_pernet_table_free
- udp_set_table
- udp_pernet_init
- udp_pernet_exit
- udp_sysctl_ops
- bpf_iter_udp_realloc_batch
- bpf_iter_init_udp
- bpf_iter_fini_udp
- udp_seq_info
- udp_reg_info
- bpf_iter_register
Improve your Profiling and Debugging skills
Find out more