1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Implementation of the Transmission Control Protocol(TCP). |
8 | * |
9 | * Authors: Ross Biro |
10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
13 | * Florian La Roche, <flla@stud.uni-sb.de> |
14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
17 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
19 | * Jorge Cwik, <jorge@laser.satlink.net> |
20 | * |
21 | * Fixes: |
22 | * Alan Cox : Numerous verify_area() calls |
23 | * Alan Cox : Set the ACK bit on a reset |
24 | * Alan Cox : Stopped it crashing if it closed while |
25 | * sk->inuse=1 and was trying to connect |
26 | * (tcp_err()). |
27 | * Alan Cox : All icmp error handling was broken |
28 | * pointers passed where wrong and the |
29 | * socket was looked up backwards. Nobody |
30 | * tested any icmp error code obviously. |
31 | * Alan Cox : tcp_err() now handled properly. It |
32 | * wakes people on errors. poll |
33 | * behaves and the icmp error race |
34 | * has gone by moving it into sock.c |
35 | * Alan Cox : tcp_send_reset() fixed to work for |
36 | * everything not just packets for |
37 | * unknown sockets. |
38 | * Alan Cox : tcp option processing. |
39 | * Alan Cox : Reset tweaked (still not 100%) [Had |
40 | * syn rule wrong] |
41 | * Herp Rosmanith : More reset fixes |
42 | * Alan Cox : No longer acks invalid rst frames. |
43 | * Acking any kind of RST is right out. |
44 | * Alan Cox : Sets an ignore me flag on an rst |
45 | * receive otherwise odd bits of prattle |
46 | * escape still |
47 | * Alan Cox : Fixed another acking RST frame bug. |
48 | * Should stop LAN workplace lockups. |
49 | * Alan Cox : Some tidyups using the new skb list |
50 | * facilities |
51 | * Alan Cox : sk->keepopen now seems to work |
52 | * Alan Cox : Pulls options out correctly on accepts |
53 | * Alan Cox : Fixed assorted sk->rqueue->next errors |
54 | * Alan Cox : PSH doesn't end a TCP read. Switched a |
55 | * bit to skb ops. |
56 | * Alan Cox : Tidied tcp_data to avoid a potential |
57 | * nasty. |
58 | * Alan Cox : Added some better commenting, as the |
59 | * tcp is hard to follow |
60 | * Alan Cox : Removed incorrect check for 20 * psh |
61 | * Michael O'Reilly : ack < copied bug fix. |
62 | * Johannes Stille : Misc tcp fixes (not all in yet). |
63 | * Alan Cox : FIN with no memory -> CRASH |
64 | * Alan Cox : Added socket option proto entries. |
65 | * Also added awareness of them to accept. |
66 | * Alan Cox : Added TCP options (SOL_TCP) |
67 | * Alan Cox : Switched wakeup calls to callbacks, |
68 | * so the kernel can layer network |
69 | * sockets. |
70 | * Alan Cox : Use ip_tos/ip_ttl settings. |
71 | * Alan Cox : Handle FIN (more) properly (we hope). |
72 | * Alan Cox : RST frames sent on unsynchronised |
73 | * state ack error. |
74 | * Alan Cox : Put in missing check for SYN bit. |
75 | * Alan Cox : Added tcp_select_window() aka NET2E |
76 | * window non shrink trick. |
77 | * Alan Cox : Added a couple of small NET2E timer |
78 | * fixes |
79 | * Charles Hedrick : TCP fixes |
80 | * Toomas Tamm : TCP window fixes |
81 | * Alan Cox : Small URG fix to rlogin ^C ack fight |
82 | * Charles Hedrick : Rewrote most of it to actually work |
83 | * Linus : Rewrote tcp_read() and URG handling |
84 | * completely |
85 | * Gerhard Koerting: Fixed some missing timer handling |
86 | * Matthew Dillon : Reworked TCP machine states as per RFC |
87 | * Gerhard Koerting: PC/TCP workarounds |
88 | * Adam Caldwell : Assorted timer/timing errors |
89 | * Matthew Dillon : Fixed another RST bug |
90 | * Alan Cox : Move to kernel side addressing changes. |
91 | * Alan Cox : Beginning work on TCP fastpathing |
92 | * (not yet usable) |
93 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. |
94 | * Alan Cox : TCP fast path debugging |
95 | * Alan Cox : Window clamping |
96 | * Michael Riepe : Bug in tcp_check() |
97 | * Matt Dillon : More TCP improvements and RST bug fixes |
98 | * Matt Dillon : Yet more small nasties remove from the |
99 | * TCP code (Be very nice to this man if |
100 | * tcp finally works 100%) 8) |
101 | * Alan Cox : BSD accept semantics. |
102 | * Alan Cox : Reset on closedown bug. |
103 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). |
104 | * Michael Pall : Handle poll() after URG properly in |
105 | * all cases. |
106 | * Michael Pall : Undo the last fix in tcp_read_urg() |
107 | * (multi URG PUSH broke rlogin). |
108 | * Michael Pall : Fix the multi URG PUSH problem in |
109 | * tcp_readable(), poll() after URG |
110 | * works now. |
111 | * Michael Pall : recv(...,MSG_OOB) never blocks in the |
112 | * BSD api. |
113 | * Alan Cox : Changed the semantics of sk->socket to |
114 | * fix a race and a signal problem with |
115 | * accept() and async I/O. |
116 | * Alan Cox : Relaxed the rules on tcp_sendto(). |
117 | * Yury Shevchuk : Really fixed accept() blocking problem. |
118 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for |
119 | * clients/servers which listen in on |
120 | * fixed ports. |
121 | * Alan Cox : Cleaned the above up and shrank it to |
122 | * a sensible code size. |
123 | * Alan Cox : Self connect lockup fix. |
124 | * Alan Cox : No connect to multicast. |
125 | * Ross Biro : Close unaccepted children on master |
126 | * socket close. |
127 | * Alan Cox : Reset tracing code. |
128 | * Alan Cox : Spurious resets on shutdown. |
129 | * Alan Cox : Giant 15 minute/60 second timer error |
130 | * Alan Cox : Small whoops in polling before an |
131 | * accept. |
132 | * Alan Cox : Kept the state trace facility since |
133 | * it's handy for debugging. |
134 | * Alan Cox : More reset handler fixes. |
135 | * Alan Cox : Started rewriting the code based on |
136 | * the RFC's for other useful protocol |
137 | * references see: Comer, KA9Q NOS, and |
138 | * for a reference on the difference |
139 | * between specifications and how BSD |
140 | * works see the 4.4lite source. |
141 | * A.N.Kuznetsov : Don't time wait on completion of tidy |
142 | * close. |
143 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. |
144 | * Linus Torvalds : Fixed BSD port reuse to work first syn |
145 | * Alan Cox : Reimplemented timers as per the RFC |
146 | * and using multiple timers for sanity. |
147 | * Alan Cox : Small bug fixes, and a lot of new |
148 | * comments. |
149 | * Alan Cox : Fixed dual reader crash by locking |
150 | * the buffers (much like datagram.c) |
151 | * Alan Cox : Fixed stuck sockets in probe. A probe |
152 | * now gets fed up of retrying without |
153 | * (even a no space) answer. |
154 | * Alan Cox : Extracted closing code better |
155 | * Alan Cox : Fixed the closing state machine to |
156 | * resemble the RFC. |
157 | * Alan Cox : More 'per spec' fixes. |
158 | * Jorge Cwik : Even faster checksumming. |
159 | * Alan Cox : tcp_data() doesn't ack illegal PSH |
160 | * only frames. At least one pc tcp stack |
161 | * generates them. |
162 | * Alan Cox : Cache last socket. |
163 | * Alan Cox : Per route irtt. |
164 | * Matt Day : poll()->select() match BSD precisely on error |
165 | * Alan Cox : New buffers |
166 | * Marc Tamsky : Various sk->prot->retransmits and |
167 | * sk->retransmits misupdating fixed. |
168 | * Fixed tcp_write_timeout: stuck close, |
169 | * and TCP syn retries gets used now. |
170 | * Mark Yarvis : In tcp_read_wakeup(), don't send an |
171 | * ack if state is TCP_CLOSED. |
172 | * Alan Cox : Look up device on a retransmit - routes may |
173 | * change. Doesn't yet cope with MSS shrink right |
174 | * but it's a start! |
175 | * Marc Tamsky : Closing in closing fixes. |
176 | * Mike Shaver : RFC1122 verifications. |
177 | * Alan Cox : rcv_saddr errors. |
178 | * Alan Cox : Block double connect(). |
179 | * Alan Cox : Small hooks for enSKIP. |
180 | * Alexey Kuznetsov: Path MTU discovery. |
181 | * Alan Cox : Support soft errors. |
182 | * Alan Cox : Fix MTU discovery pathological case |
183 | * when the remote claims no mtu! |
184 | * Marc Tamsky : TCP_CLOSE fix. |
185 | * Colin (G3TNE) : Send a reset on syn ack replies in |
186 | * window but wrong (fixes NT lpd problems) |
187 | * Pedro Roque : Better TCP window handling, delayed ack. |
188 | * Joerg Reuter : No modification of locked buffers in |
189 | * tcp_do_retransmit() |
190 | * Eric Schenk : Changed receiver side silly window |
191 | * avoidance algorithm to BSD style |
192 | * algorithm. This doubles throughput |
193 | * against machines running Solaris, |
194 | * and seems to result in general |
195 | * improvement. |
196 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD |
197 | * Willy Konynenberg : Transparent proxying support. |
198 | * Mike McLagan : Routing by source |
199 | * Keith Owens : Do proper merging with partial SKB's in |
200 | * tcp_do_sendmsg to avoid burstiness. |
201 | * Eric Schenk : Fix fast close down bug with |
202 | * shutdown() followed by close(). |
203 | * Andi Kleen : Make poll agree with SIGIO |
204 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and |
205 | * lingertime == 0 (RFC 793 ABORT Call) |
206 | * Hirokazu Takahashi : Use copy_from_user() instead of |
207 | * csum_and_copy_from_user() if possible. |
208 | * |
209 | * Description of States: |
210 | * |
211 | * TCP_SYN_SENT sent a connection request, waiting for ack |
212 | * |
213 | * TCP_SYN_RECV received a connection request, sent ack, |
214 | * waiting for final ack in three-way handshake. |
215 | * |
216 | * TCP_ESTABLISHED connection established |
217 | * |
218 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete |
219 | * transmission of remaining buffered data |
220 | * |
221 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote |
222 | * to shutdown |
223 | * |
224 | * TCP_CLOSING both sides have shutdown but we still have |
225 | * data we have to finish sending |
226 | * |
227 | * TCP_TIME_WAIT timeout to catch resent junk before entering |
228 | * closed, can only be entered from FIN_WAIT2 |
229 | * or CLOSING. Required because the other end |
230 | * may not have gotten our last ACK causing it |
231 | * to retransmit the data packet (which we ignore) |
232 | * |
233 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for |
234 | * us to finish writing our data and to shutdown |
235 | * (we have to close() to move on to LAST_ACK) |
236 | * |
237 | * TCP_LAST_ACK out side has shutdown after remote has |
238 | * shutdown. There may still be data in our |
239 | * buffer that we have to finish sending |
240 | * |
241 | * TCP_CLOSE socket is finished |
242 | */ |
243 | |
244 | #define pr_fmt(fmt) "TCP: " fmt |
245 | |
246 | #include <crypto/hash.h> |
247 | #include <linux/kernel.h> |
248 | #include <linux/module.h> |
249 | #include <linux/types.h> |
250 | #include <linux/fcntl.h> |
251 | #include <linux/poll.h> |
252 | #include <linux/inet_diag.h> |
253 | #include <linux/init.h> |
254 | #include <linux/fs.h> |
255 | #include <linux/skbuff.h> |
256 | #include <linux/scatterlist.h> |
257 | #include <linux/splice.h> |
258 | #include <linux/net.h> |
259 | #include <linux/socket.h> |
260 | #include <linux/random.h> |
261 | #include <linux/memblock.h> |
262 | #include <linux/highmem.h> |
263 | #include <linux/cache.h> |
264 | #include <linux/err.h> |
265 | #include <linux/time.h> |
266 | #include <linux/slab.h> |
267 | #include <linux/errqueue.h> |
268 | #include <linux/static_key.h> |
269 | #include <linux/btf.h> |
270 | |
271 | #include <net/icmp.h> |
272 | #include <net/inet_common.h> |
273 | #include <net/tcp.h> |
274 | #include <net/mptcp.h> |
275 | #include <net/xfrm.h> |
276 | #include <net/ip.h> |
277 | #include <net/sock.h> |
278 | |
279 | #include <linux/uaccess.h> |
280 | #include <asm/ioctls.h> |
281 | #include <net/busy_poll.h> |
282 | #include <net/rps.h> |
283 | |
284 | /* Track pending CMSGs. */ |
285 | enum { |
286 | TCP_CMSG_INQ = 1, |
287 | TCP_CMSG_TS = 2 |
288 | }; |
289 | |
290 | DEFINE_PER_CPU(unsigned int, tcp_orphan_count); |
291 | EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); |
292 | |
293 | long sysctl_tcp_mem[3] __read_mostly; |
294 | EXPORT_SYMBOL(sysctl_tcp_mem); |
295 | |
296 | atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ |
297 | EXPORT_SYMBOL(tcp_memory_allocated); |
298 | DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); |
299 | EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); |
300 | |
301 | #if IS_ENABLED(CONFIG_SMC) |
302 | DEFINE_STATIC_KEY_FALSE(tcp_have_smc); |
303 | EXPORT_SYMBOL(tcp_have_smc); |
304 | #endif |
305 | |
306 | /* |
307 | * Current number of TCP sockets. |
308 | */ |
309 | struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; |
310 | EXPORT_SYMBOL(tcp_sockets_allocated); |
311 | |
312 | /* |
313 | * TCP splice context |
314 | */ |
315 | struct tcp_splice_state { |
316 | struct pipe_inode_info *pipe; |
317 | size_t len; |
318 | unsigned int flags; |
319 | }; |
320 | |
321 | /* |
322 | * Pressure flag: try to collapse. |
323 | * Technical note: it is used by multiple contexts non atomically. |
324 | * All the __sk_mem_schedule() is of this nature: accounting |
325 | * is strict, actions are advisory and have some latency. |
326 | */ |
327 | unsigned long tcp_memory_pressure __read_mostly; |
328 | EXPORT_SYMBOL_GPL(tcp_memory_pressure); |
329 | |
330 | void tcp_enter_memory_pressure(struct sock *sk) |
331 | { |
332 | unsigned long val; |
333 | |
334 | if (READ_ONCE(tcp_memory_pressure)) |
335 | return; |
336 | val = jiffies; |
337 | |
338 | if (!val) |
339 | val--; |
340 | if (!cmpxchg(&tcp_memory_pressure, 0, val)) |
341 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
342 | } |
343 | EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); |
344 | |
345 | void tcp_leave_memory_pressure(struct sock *sk) |
346 | { |
347 | unsigned long val; |
348 | |
349 | if (!READ_ONCE(tcp_memory_pressure)) |
350 | return; |
351 | val = xchg(&tcp_memory_pressure, 0); |
352 | if (val) |
353 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, |
354 | jiffies_to_msecs(jiffies - val)); |
355 | } |
356 | EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); |
357 | |
358 | /* Convert seconds to retransmits based on initial and max timeout */ |
359 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) |
360 | { |
361 | u8 res = 0; |
362 | |
363 | if (seconds > 0) { |
364 | int period = timeout; |
365 | |
366 | res = 1; |
367 | while (seconds > period && res < 255) { |
368 | res++; |
369 | timeout <<= 1; |
370 | if (timeout > rto_max) |
371 | timeout = rto_max; |
372 | period += timeout; |
373 | } |
374 | } |
375 | return res; |
376 | } |
377 | |
378 | /* Convert retransmits to seconds based on initial and max timeout */ |
379 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) |
380 | { |
381 | int period = 0; |
382 | |
383 | if (retrans > 0) { |
384 | period = timeout; |
385 | while (--retrans) { |
386 | timeout <<= 1; |
387 | if (timeout > rto_max) |
388 | timeout = rto_max; |
389 | period += timeout; |
390 | } |
391 | } |
392 | return period; |
393 | } |
394 | |
395 | static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) |
396 | { |
397 | u32 rate = READ_ONCE(tp->rate_delivered); |
398 | u32 intv = READ_ONCE(tp->rate_interval_us); |
399 | u64 rate64 = 0; |
400 | |
401 | if (rate && intv) { |
402 | rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; |
403 | do_div(rate64, intv); |
404 | } |
405 | return rate64; |
406 | } |
407 | |
408 | /* Address-family independent initialization for a tcp_sock. |
409 | * |
410 | * NOTE: A lot of things set to zero explicitly by call to |
411 | * sk_alloc() so need not be done here. |
412 | */ |
413 | void tcp_init_sock(struct sock *sk) |
414 | { |
415 | struct inet_connection_sock *icsk = inet_csk(sk); |
416 | struct tcp_sock *tp = tcp_sk(sk); |
417 | |
418 | tp->out_of_order_queue = RB_ROOT; |
419 | sk->tcp_rtx_queue = RB_ROOT; |
420 | tcp_init_xmit_timers(sk); |
421 | INIT_LIST_HEAD(list: &tp->tsq_node); |
422 | INIT_LIST_HEAD(list: &tp->tsorted_sent_queue); |
423 | |
424 | icsk->icsk_rto = TCP_TIMEOUT_INIT; |
425 | icsk->icsk_rto_min = TCP_RTO_MIN; |
426 | icsk->icsk_delack_max = TCP_DELACK_MAX; |
427 | tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); |
428 | minmax_reset(m: &tp->rtt_min, tcp_jiffies32, meas: ~0U); |
429 | |
430 | /* So many TCP implementations out there (incorrectly) count the |
431 | * initial SYN frame in their delayed-ACK and congestion control |
432 | * algorithms that we must have the following bandaid to talk |
433 | * efficiently to them. -DaveM |
434 | */ |
435 | tcp_snd_cwnd_set(tp, TCP_INIT_CWND); |
436 | |
437 | /* There's a bubble in the pipe until at least the first ACK. */ |
438 | tp->app_limited = ~0U; |
439 | tp->rate_app_limited = 1; |
440 | |
441 | /* See draft-stevens-tcpca-spec-01 for discussion of the |
442 | * initialization of these values. |
443 | */ |
444 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
445 | tp->snd_cwnd_clamp = ~0; |
446 | tp->mss_cache = TCP_MSS_DEFAULT; |
447 | |
448 | tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); |
449 | tcp_assign_congestion_control(sk); |
450 | |
451 | tp->tsoffset = 0; |
452 | tp->rack.reo_wnd_steps = 1; |
453 | |
454 | sk->sk_write_space = sk_stream_write_space; |
455 | sock_set_flag(sk, flag: SOCK_USE_WRITE_QUEUE); |
456 | |
457 | icsk->icsk_sync_mss = tcp_sync_mss; |
458 | |
459 | WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); |
460 | WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); |
461 | tcp_scaling_ratio_init(sk); |
462 | |
463 | set_bit(SOCK_SUPPORT_ZC, addr: &sk->sk_socket->flags); |
464 | sk_sockets_allocated_inc(sk); |
465 | } |
466 | EXPORT_SYMBOL(tcp_init_sock); |
467 | |
468 | static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) |
469 | { |
470 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
471 | |
472 | if (tsflags && skb) { |
473 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
474 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
475 | |
476 | sock_tx_timestamp(sk, tsflags, tx_flags: &shinfo->tx_flags); |
477 | if (tsflags & SOF_TIMESTAMPING_TX_ACK) |
478 | tcb->txstamp_ack = 1; |
479 | if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) |
480 | shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; |
481 | } |
482 | } |
483 | |
484 | static bool tcp_stream_is_readable(struct sock *sk, int target) |
485 | { |
486 | if (tcp_epollin_ready(sk, target)) |
487 | return true; |
488 | return sk_is_readable(sk); |
489 | } |
490 | |
491 | /* |
492 | * Wait for a TCP event. |
493 | * |
494 | * Note that we don't need to lock the socket, as the upper poll layers |
495 | * take care of normal races (between the test and the event) and we don't |
496 | * go look at any of the socket buffers directly. |
497 | */ |
498 | __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) |
499 | { |
500 | __poll_t mask; |
501 | struct sock *sk = sock->sk; |
502 | const struct tcp_sock *tp = tcp_sk(sk); |
503 | u8 shutdown; |
504 | int state; |
505 | |
506 | sock_poll_wait(filp: file, sock, p: wait); |
507 | |
508 | state = inet_sk_state_load(sk); |
509 | if (state == TCP_LISTEN) |
510 | return inet_csk_listen_poll(sk); |
511 | |
512 | /* Socket is not locked. We are protected from async events |
513 | * by poll logic and correct handling of state changes |
514 | * made by other threads is impossible in any case. |
515 | */ |
516 | |
517 | mask = 0; |
518 | |
519 | /* |
520 | * EPOLLHUP is certainly not done right. But poll() doesn't |
521 | * have a notion of HUP in just one direction, and for a |
522 | * socket the read side is more interesting. |
523 | * |
524 | * Some poll() documentation says that EPOLLHUP is incompatible |
525 | * with the EPOLLOUT/POLLWR flags, so somebody should check this |
526 | * all. But careful, it tends to be safer to return too many |
527 | * bits than too few, and you can easily break real applications |
528 | * if you don't tell them that something has hung up! |
529 | * |
530 | * Check-me. |
531 | * |
532 | * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and |
533 | * our fs/select.c). It means that after we received EOF, |
534 | * poll always returns immediately, making impossible poll() on write() |
535 | * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP |
536 | * if and only if shutdown has been made in both directions. |
537 | * Actually, it is interesting to look how Solaris and DUX |
538 | * solve this dilemma. I would prefer, if EPOLLHUP were maskable, |
539 | * then we could set it on SND_SHUTDOWN. BTW examples given |
540 | * in Stevens' books assume exactly this behaviour, it explains |
541 | * why EPOLLHUP is incompatible with EPOLLOUT. --ANK |
542 | * |
543 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent |
544 | * blocking on fresh not-connected or disconnected socket. --ANK |
545 | */ |
546 | shutdown = READ_ONCE(sk->sk_shutdown); |
547 | if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) |
548 | mask |= EPOLLHUP; |
549 | if (shutdown & RCV_SHUTDOWN) |
550 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
551 | |
552 | /* Connected or passive Fast Open socket? */ |
553 | if (state != TCP_SYN_SENT && |
554 | (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { |
555 | int target = sock_rcvlowat(sk, waitall: 0, INT_MAX); |
556 | u16 urg_data = READ_ONCE(tp->urg_data); |
557 | |
558 | if (unlikely(urg_data) && |
559 | READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && |
560 | !sock_flag(sk, flag: SOCK_URGINLINE)) |
561 | target++; |
562 | |
563 | if (tcp_stream_is_readable(sk, target)) |
564 | mask |= EPOLLIN | EPOLLRDNORM; |
565 | |
566 | if (!(shutdown & SEND_SHUTDOWN)) { |
567 | if (__sk_stream_is_writeable(sk, wake: 1)) { |
568 | mask |= EPOLLOUT | EPOLLWRNORM; |
569 | } else { /* send SIGIO later */ |
570 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
571 | set_bit(SOCK_NOSPACE, addr: &sk->sk_socket->flags); |
572 | |
573 | /* Race breaker. If space is freed after |
574 | * wspace test but before the flags are set, |
575 | * IO signal will be lost. Memory barrier |
576 | * pairs with the input side. |
577 | */ |
578 | smp_mb__after_atomic(); |
579 | if (__sk_stream_is_writeable(sk, wake: 1)) |
580 | mask |= EPOLLOUT | EPOLLWRNORM; |
581 | } |
582 | } else |
583 | mask |= EPOLLOUT | EPOLLWRNORM; |
584 | |
585 | if (urg_data & TCP_URG_VALID) |
586 | mask |= EPOLLPRI; |
587 | } else if (state == TCP_SYN_SENT && |
588 | inet_test_bit(DEFER_CONNECT, sk)) { |
589 | /* Active TCP fastopen socket with defer_connect |
590 | * Return EPOLLOUT so application can call write() |
591 | * in order for kernel to generate SYN+data |
592 | */ |
593 | mask |= EPOLLOUT | EPOLLWRNORM; |
594 | } |
595 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ |
596 | smp_rmb(); |
597 | if (READ_ONCE(sk->sk_err) || |
598 | !skb_queue_empty_lockless(list: &sk->sk_error_queue)) |
599 | mask |= EPOLLERR; |
600 | |
601 | return mask; |
602 | } |
603 | EXPORT_SYMBOL(tcp_poll); |
604 | |
605 | int tcp_ioctl(struct sock *sk, int cmd, int *karg) |
606 | { |
607 | struct tcp_sock *tp = tcp_sk(sk); |
608 | int answ; |
609 | bool slow; |
610 | |
611 | switch (cmd) { |
612 | case SIOCINQ: |
613 | if (sk->sk_state == TCP_LISTEN) |
614 | return -EINVAL; |
615 | |
616 | slow = lock_sock_fast(sk); |
617 | answ = tcp_inq(sk); |
618 | unlock_sock_fast(sk, slow); |
619 | break; |
620 | case SIOCATMARK: |
621 | answ = READ_ONCE(tp->urg_data) && |
622 | READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); |
623 | break; |
624 | case SIOCOUTQ: |
625 | if (sk->sk_state == TCP_LISTEN) |
626 | return -EINVAL; |
627 | |
628 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
629 | answ = 0; |
630 | else |
631 | answ = READ_ONCE(tp->write_seq) - tp->snd_una; |
632 | break; |
633 | case SIOCOUTQNSD: |
634 | if (sk->sk_state == TCP_LISTEN) |
635 | return -EINVAL; |
636 | |
637 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
638 | answ = 0; |
639 | else |
640 | answ = READ_ONCE(tp->write_seq) - |
641 | READ_ONCE(tp->snd_nxt); |
642 | break; |
643 | default: |
644 | return -ENOIOCTLCMD; |
645 | } |
646 | |
647 | *karg = answ; |
648 | return 0; |
649 | } |
650 | EXPORT_SYMBOL(tcp_ioctl); |
651 | |
652 | void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
653 | { |
654 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
655 | tp->pushed_seq = tp->write_seq; |
656 | } |
657 | |
658 | static inline bool forced_push(const struct tcp_sock *tp) |
659 | { |
660 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); |
661 | } |
662 | |
663 | void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) |
664 | { |
665 | struct tcp_sock *tp = tcp_sk(sk); |
666 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
667 | |
668 | tcb->seq = tcb->end_seq = tp->write_seq; |
669 | tcb->tcp_flags = TCPHDR_ACK; |
670 | __skb_header_release(skb); |
671 | tcp_add_write_queue_tail(sk, skb); |
672 | sk_wmem_queued_add(sk, val: skb->truesize); |
673 | sk_mem_charge(sk, size: skb->truesize); |
674 | if (tp->nonagle & TCP_NAGLE_PUSH) |
675 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
676 | |
677 | tcp_slow_start_after_idle_check(sk); |
678 | } |
679 | |
680 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) |
681 | { |
682 | if (flags & MSG_OOB) |
683 | tp->snd_up = tp->write_seq; |
684 | } |
685 | |
686 | /* If a not yet filled skb is pushed, do not send it if |
687 | * we have data packets in Qdisc or NIC queues : |
688 | * Because TX completion will happen shortly, it gives a chance |
689 | * to coalesce future sendmsg() payload into this skb, without |
690 | * need for a timer, and with no latency trade off. |
691 | * As packets containing data payload have a bigger truesize |
692 | * than pure acks (dataless) packets, the last checks prevent |
693 | * autocorking if we only have an ACK in Qdisc/NIC queues, |
694 | * or if TX completion was delayed after we processed ACK packet. |
695 | */ |
696 | static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, |
697 | int size_goal) |
698 | { |
699 | return skb->len < size_goal && |
700 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && |
701 | !tcp_rtx_queue_empty(sk) && |
702 | refcount_read(r: &sk->sk_wmem_alloc) > skb->truesize && |
703 | tcp_skb_can_collapse_to(skb); |
704 | } |
705 | |
706 | void tcp_push(struct sock *sk, int flags, int mss_now, |
707 | int nonagle, int size_goal) |
708 | { |
709 | struct tcp_sock *tp = tcp_sk(sk); |
710 | struct sk_buff *skb; |
711 | |
712 | skb = tcp_write_queue_tail(sk); |
713 | if (!skb) |
714 | return; |
715 | if (!(flags & MSG_MORE) || forced_push(tp)) |
716 | tcp_mark_push(tp, skb); |
717 | |
718 | tcp_mark_urg(tp, flags); |
719 | |
720 | if (tcp_should_autocork(sk, skb, size_goal)) { |
721 | |
722 | /* avoid atomic op if TSQ_THROTTLED bit is already set */ |
723 | if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { |
724 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); |
725 | set_bit(nr: TSQ_THROTTLED, addr: &sk->sk_tsq_flags); |
726 | smp_mb__after_atomic(); |
727 | } |
728 | /* It is possible TX completion already happened |
729 | * before we set TSQ_THROTTLED. |
730 | */ |
731 | if (refcount_read(r: &sk->sk_wmem_alloc) > skb->truesize) |
732 | return; |
733 | } |
734 | |
735 | if (flags & MSG_MORE) |
736 | nonagle = TCP_NAGLE_CORK; |
737 | |
738 | __tcp_push_pending_frames(sk, cur_mss: mss_now, nonagle); |
739 | } |
740 | |
741 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, |
742 | unsigned int offset, size_t len) |
743 | { |
744 | struct tcp_splice_state *tss = rd_desc->arg.data; |
745 | int ret; |
746 | |
747 | ret = skb_splice_bits(skb, sk: skb->sk, offset, pipe: tss->pipe, |
748 | min(rd_desc->count, len), flags: tss->flags); |
749 | if (ret > 0) |
750 | rd_desc->count -= ret; |
751 | return ret; |
752 | } |
753 | |
754 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) |
755 | { |
756 | /* Store TCP splice context information in read_descriptor_t. */ |
757 | read_descriptor_t rd_desc = { |
758 | .arg.data = tss, |
759 | .count = tss->len, |
760 | }; |
761 | |
762 | return tcp_read_sock(sk, desc: &rd_desc, recv_actor: tcp_splice_data_recv); |
763 | } |
764 | |
765 | /** |
766 | * tcp_splice_read - splice data from TCP socket to a pipe |
767 | * @sock: socket to splice from |
768 | * @ppos: position (not valid) |
769 | * @pipe: pipe to splice to |
770 | * @len: number of bytes to splice |
771 | * @flags: splice modifier flags |
772 | * |
773 | * Description: |
774 | * Will read pages from given socket and fill them into a pipe. |
775 | * |
776 | **/ |
777 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, |
778 | struct pipe_inode_info *pipe, size_t len, |
779 | unsigned int flags) |
780 | { |
781 | struct sock *sk = sock->sk; |
782 | struct tcp_splice_state tss = { |
783 | .pipe = pipe, |
784 | .len = len, |
785 | .flags = flags, |
786 | }; |
787 | long timeo; |
788 | ssize_t spliced; |
789 | int ret; |
790 | |
791 | sock_rps_record_flow(sk); |
792 | /* |
793 | * We can't seek on a socket input |
794 | */ |
795 | if (unlikely(*ppos)) |
796 | return -ESPIPE; |
797 | |
798 | ret = spliced = 0; |
799 | |
800 | lock_sock(sk); |
801 | |
802 | timeo = sock_rcvtimeo(sk, noblock: sock->file->f_flags & O_NONBLOCK); |
803 | while (tss.len) { |
804 | ret = __tcp_splice_read(sk, tss: &tss); |
805 | if (ret < 0) |
806 | break; |
807 | else if (!ret) { |
808 | if (spliced) |
809 | break; |
810 | if (sock_flag(sk, flag: SOCK_DONE)) |
811 | break; |
812 | if (sk->sk_err) { |
813 | ret = sock_error(sk); |
814 | break; |
815 | } |
816 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
817 | break; |
818 | if (sk->sk_state == TCP_CLOSE) { |
819 | /* |
820 | * This occurs when user tries to read |
821 | * from never connected socket. |
822 | */ |
823 | ret = -ENOTCONN; |
824 | break; |
825 | } |
826 | if (!timeo) { |
827 | ret = -EAGAIN; |
828 | break; |
829 | } |
830 | /* if __tcp_splice_read() got nothing while we have |
831 | * an skb in receive queue, we do not want to loop. |
832 | * This might happen with URG data. |
833 | */ |
834 | if (!skb_queue_empty(list: &sk->sk_receive_queue)) |
835 | break; |
836 | ret = sk_wait_data(sk, timeo: &timeo, NULL); |
837 | if (ret < 0) |
838 | break; |
839 | if (signal_pending(current)) { |
840 | ret = sock_intr_errno(timeo); |
841 | break; |
842 | } |
843 | continue; |
844 | } |
845 | tss.len -= ret; |
846 | spliced += ret; |
847 | |
848 | if (!tss.len || !timeo) |
849 | break; |
850 | release_sock(sk); |
851 | lock_sock(sk); |
852 | |
853 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || |
854 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
855 | signal_pending(current)) |
856 | break; |
857 | } |
858 | |
859 | release_sock(sk); |
860 | |
861 | if (spliced) |
862 | return spliced; |
863 | |
864 | return ret; |
865 | } |
866 | EXPORT_SYMBOL(tcp_splice_read); |
867 | |
868 | struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, |
869 | bool force_schedule) |
870 | { |
871 | struct sk_buff *skb; |
872 | |
873 | skb = alloc_skb_fclone(MAX_TCP_HEADER, priority: gfp); |
874 | if (likely(skb)) { |
875 | bool mem_scheduled; |
876 | |
877 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
878 | if (force_schedule) { |
879 | mem_scheduled = true; |
880 | sk_forced_mem_schedule(sk, size: skb->truesize); |
881 | } else { |
882 | mem_scheduled = sk_wmem_schedule(sk, size: skb->truesize); |
883 | } |
884 | if (likely(mem_scheduled)) { |
885 | skb_reserve(skb, MAX_TCP_HEADER); |
886 | skb->ip_summed = CHECKSUM_PARTIAL; |
887 | INIT_LIST_HEAD(list: &skb->tcp_tsorted_anchor); |
888 | return skb; |
889 | } |
890 | __kfree_skb(skb); |
891 | } else { |
892 | sk->sk_prot->enter_memory_pressure(sk); |
893 | sk_stream_moderate_sndbuf(sk); |
894 | } |
895 | return NULL; |
896 | } |
897 | |
898 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, |
899 | int large_allowed) |
900 | { |
901 | struct tcp_sock *tp = tcp_sk(sk); |
902 | u32 new_size_goal, size_goal; |
903 | |
904 | if (!large_allowed) |
905 | return mss_now; |
906 | |
907 | /* Note : tcp_tso_autosize() will eventually split this later */ |
908 | new_size_goal = tcp_bound_to_half_wnd(tp, pktsize: sk->sk_gso_max_size); |
909 | |
910 | /* We try hard to avoid divides here */ |
911 | size_goal = tp->gso_segs * mss_now; |
912 | if (unlikely(new_size_goal < size_goal || |
913 | new_size_goal >= size_goal + mss_now)) { |
914 | tp->gso_segs = min_t(u16, new_size_goal / mss_now, |
915 | sk->sk_gso_max_segs); |
916 | size_goal = tp->gso_segs * mss_now; |
917 | } |
918 | |
919 | return max(size_goal, mss_now); |
920 | } |
921 | |
922 | int tcp_send_mss(struct sock *sk, int *size_goal, int flags) |
923 | { |
924 | int mss_now; |
925 | |
926 | mss_now = tcp_current_mss(sk); |
927 | *size_goal = tcp_xmit_size_goal(sk, mss_now, large_allowed: !(flags & MSG_OOB)); |
928 | |
929 | return mss_now; |
930 | } |
931 | |
932 | /* In some cases, sendmsg() could have added an skb to the write queue, |
933 | * but failed adding payload on it. We need to remove it to consume less |
934 | * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger |
935 | * epoll() users. Another reason is that tcp_write_xmit() does not like |
936 | * finding an empty skb in the write queue. |
937 | */ |
938 | void tcp_remove_empty_skb(struct sock *sk) |
939 | { |
940 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
941 | |
942 | if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { |
943 | tcp_unlink_write_queue(skb, sk); |
944 | if (tcp_write_queue_empty(sk)) |
945 | tcp_chrono_stop(sk, type: TCP_CHRONO_BUSY); |
946 | tcp_wmem_free_skb(sk, skb); |
947 | } |
948 | } |
949 | |
950 | /* skb changing from pure zc to mixed, must charge zc */ |
951 | static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) |
952 | { |
953 | if (unlikely(skb_zcopy_pure(skb))) { |
954 | u32 = skb->truesize - |
955 | SKB_TRUESIZE(skb_end_offset(skb)); |
956 | |
957 | if (!sk_wmem_schedule(sk, size: extra)) |
958 | return -ENOMEM; |
959 | |
960 | sk_mem_charge(sk, size: extra); |
961 | skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; |
962 | } |
963 | return 0; |
964 | } |
965 | |
966 | |
967 | int tcp_wmem_schedule(struct sock *sk, int copy) |
968 | { |
969 | int left; |
970 | |
971 | if (likely(sk_wmem_schedule(sk, copy))) |
972 | return copy; |
973 | |
974 | /* We could be in trouble if we have nothing queued. |
975 | * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] |
976 | * to guarantee some progress. |
977 | */ |
978 | left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued; |
979 | if (left > 0) |
980 | sk_forced_mem_schedule(sk, min(left, copy)); |
981 | return min(copy, sk->sk_forward_alloc); |
982 | } |
983 | |
984 | void tcp_free_fastopen_req(struct tcp_sock *tp) |
985 | { |
986 | if (tp->fastopen_req) { |
987 | kfree(objp: tp->fastopen_req); |
988 | tp->fastopen_req = NULL; |
989 | } |
990 | } |
991 | |
992 | int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, |
993 | size_t size, struct ubuf_info *uarg) |
994 | { |
995 | struct tcp_sock *tp = tcp_sk(sk); |
996 | struct inet_sock *inet = inet_sk(sk); |
997 | struct sockaddr *uaddr = msg->msg_name; |
998 | int err, flags; |
999 | |
1000 | if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & |
1001 | TFO_CLIENT_ENABLE) || |
1002 | (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && |
1003 | uaddr->sa_family == AF_UNSPEC)) |
1004 | return -EOPNOTSUPP; |
1005 | if (tp->fastopen_req) |
1006 | return -EALREADY; /* Another Fast Open is in progress */ |
1007 | |
1008 | tp->fastopen_req = kzalloc(size: sizeof(struct tcp_fastopen_request), |
1009 | flags: sk->sk_allocation); |
1010 | if (unlikely(!tp->fastopen_req)) |
1011 | return -ENOBUFS; |
1012 | tp->fastopen_req->data = msg; |
1013 | tp->fastopen_req->size = size; |
1014 | tp->fastopen_req->uarg = uarg; |
1015 | |
1016 | if (inet_test_bit(DEFER_CONNECT, sk)) { |
1017 | err = tcp_connect(sk); |
1018 | /* Same failure procedure as in tcp_v4/6_connect */ |
1019 | if (err) { |
1020 | tcp_set_state(sk, state: TCP_CLOSE); |
1021 | inet->inet_dport = 0; |
1022 | sk->sk_route_caps = 0; |
1023 | } |
1024 | } |
1025 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; |
1026 | err = __inet_stream_connect(sock: sk->sk_socket, uaddr, |
1027 | addr_len: msg->msg_namelen, flags, is_sendmsg: 1); |
1028 | /* fastopen_req could already be freed in __inet_stream_connect |
1029 | * if the connection times out or gets rst |
1030 | */ |
1031 | if (tp->fastopen_req) { |
1032 | *copied = tp->fastopen_req->copied; |
1033 | tcp_free_fastopen_req(tp); |
1034 | inet_clear_bit(DEFER_CONNECT, sk); |
1035 | } |
1036 | return err; |
1037 | } |
1038 | |
1039 | int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) |
1040 | { |
1041 | struct tcp_sock *tp = tcp_sk(sk); |
1042 | struct ubuf_info *uarg = NULL; |
1043 | struct sk_buff *skb; |
1044 | struct sockcm_cookie sockc; |
1045 | int flags, err, copied = 0; |
1046 | int mss_now = 0, size_goal, copied_syn = 0; |
1047 | int process_backlog = 0; |
1048 | int zc = 0; |
1049 | long timeo; |
1050 | |
1051 | flags = msg->msg_flags; |
1052 | |
1053 | if ((flags & MSG_ZEROCOPY) && size) { |
1054 | if (msg->msg_ubuf) { |
1055 | uarg = msg->msg_ubuf; |
1056 | if (sk->sk_route_caps & NETIF_F_SG) |
1057 | zc = MSG_ZEROCOPY; |
1058 | } else if (sock_flag(sk, flag: SOCK_ZEROCOPY)) { |
1059 | skb = tcp_write_queue_tail(sk); |
1060 | uarg = msg_zerocopy_realloc(sk, size, uarg: skb_zcopy(skb)); |
1061 | if (!uarg) { |
1062 | err = -ENOBUFS; |
1063 | goto out_err; |
1064 | } |
1065 | if (sk->sk_route_caps & NETIF_F_SG) |
1066 | zc = MSG_ZEROCOPY; |
1067 | else |
1068 | uarg_to_msgzc(uarg)->zerocopy = 0; |
1069 | } |
1070 | } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { |
1071 | if (sk->sk_route_caps & NETIF_F_SG) |
1072 | zc = MSG_SPLICE_PAGES; |
1073 | } |
1074 | |
1075 | if (unlikely(flags & MSG_FASTOPEN || |
1076 | inet_test_bit(DEFER_CONNECT, sk)) && |
1077 | !tp->repair) { |
1078 | err = tcp_sendmsg_fastopen(sk, msg, copied: &copied_syn, size, uarg); |
1079 | if (err == -EINPROGRESS && copied_syn > 0) |
1080 | goto out; |
1081 | else if (err) |
1082 | goto out_err; |
1083 | } |
1084 | |
1085 | timeo = sock_sndtimeo(sk, noblock: flags & MSG_DONTWAIT); |
1086 | |
1087 | tcp_rate_check_app_limited(sk); /* is sending application-limited? */ |
1088 | |
1089 | /* Wait for a connection to finish. One exception is TCP Fast Open |
1090 | * (passive side) where data is allowed to be sent before a connection |
1091 | * is fully established. |
1092 | */ |
1093 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && |
1094 | !tcp_passive_fastopen(sk)) { |
1095 | err = sk_stream_wait_connect(sk, timeo_p: &timeo); |
1096 | if (err != 0) |
1097 | goto do_error; |
1098 | } |
1099 | |
1100 | if (unlikely(tp->repair)) { |
1101 | if (tp->repair_queue == TCP_RECV_QUEUE) { |
1102 | copied = tcp_send_rcvq(sk, msg, size); |
1103 | goto out_nopush; |
1104 | } |
1105 | |
1106 | err = -EINVAL; |
1107 | if (tp->repair_queue == TCP_NO_QUEUE) |
1108 | goto out_err; |
1109 | |
1110 | /* 'common' sending to sendq */ |
1111 | } |
1112 | |
1113 | sockcm_init(sockc: &sockc, sk); |
1114 | if (msg->msg_controllen) { |
1115 | err = sock_cmsg_send(sk, msg, sockc: &sockc); |
1116 | if (unlikely(err)) { |
1117 | err = -EINVAL; |
1118 | goto out_err; |
1119 | } |
1120 | } |
1121 | |
1122 | /* This should be in poll */ |
1123 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1124 | |
1125 | /* Ok commence sending. */ |
1126 | copied = 0; |
1127 | |
1128 | restart: |
1129 | mss_now = tcp_send_mss(sk, size_goal: &size_goal, flags); |
1130 | |
1131 | err = -EPIPE; |
1132 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
1133 | goto do_error; |
1134 | |
1135 | while (msg_data_left(msg)) { |
1136 | ssize_t copy = 0; |
1137 | |
1138 | skb = tcp_write_queue_tail(sk); |
1139 | if (skb) |
1140 | copy = size_goal - skb->len; |
1141 | |
1142 | if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { |
1143 | bool first_skb; |
1144 | |
1145 | new_segment: |
1146 | if (!sk_stream_memory_free(sk)) |
1147 | goto wait_for_space; |
1148 | |
1149 | if (unlikely(process_backlog >= 16)) { |
1150 | process_backlog = 0; |
1151 | if (sk_flush_backlog(sk)) |
1152 | goto restart; |
1153 | } |
1154 | first_skb = tcp_rtx_and_write_queues_empty(sk); |
1155 | skb = tcp_stream_alloc_skb(sk, gfp: sk->sk_allocation, |
1156 | force_schedule: first_skb); |
1157 | if (!skb) |
1158 | goto wait_for_space; |
1159 | |
1160 | process_backlog++; |
1161 | |
1162 | tcp_skb_entail(sk, skb); |
1163 | copy = size_goal; |
1164 | |
1165 | /* All packets are restored as if they have |
1166 | * already been sent. skb_mstamp_ns isn't set to |
1167 | * avoid wrong rtt estimation. |
1168 | */ |
1169 | if (tp->repair) |
1170 | TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; |
1171 | } |
1172 | |
1173 | /* Try to append data to the end of skb. */ |
1174 | if (copy > msg_data_left(msg)) |
1175 | copy = msg_data_left(msg); |
1176 | |
1177 | if (zc == 0) { |
1178 | bool merge = true; |
1179 | int i = skb_shinfo(skb)->nr_frags; |
1180 | struct page_frag *pfrag = sk_page_frag(sk); |
1181 | |
1182 | if (!sk_page_frag_refill(sk, pfrag)) |
1183 | goto wait_for_space; |
1184 | |
1185 | if (!skb_can_coalesce(skb, i, page: pfrag->page, |
1186 | off: pfrag->offset)) { |
1187 | if (i >= READ_ONCE(sysctl_max_skb_frags)) { |
1188 | tcp_mark_push(tp, skb); |
1189 | goto new_segment; |
1190 | } |
1191 | merge = false; |
1192 | } |
1193 | |
1194 | copy = min_t(int, copy, pfrag->size - pfrag->offset); |
1195 | |
1196 | if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { |
1197 | if (tcp_downgrade_zcopy_pure(sk, skb)) |
1198 | goto wait_for_space; |
1199 | skb_zcopy_downgrade_managed(skb); |
1200 | } |
1201 | |
1202 | copy = tcp_wmem_schedule(sk, copy); |
1203 | if (!copy) |
1204 | goto wait_for_space; |
1205 | |
1206 | err = skb_copy_to_page_nocache(sk, from: &msg->msg_iter, skb, |
1207 | page: pfrag->page, |
1208 | off: pfrag->offset, |
1209 | copy); |
1210 | if (err) |
1211 | goto do_error; |
1212 | |
1213 | /* Update the skb. */ |
1214 | if (merge) { |
1215 | skb_frag_size_add(frag: &skb_shinfo(skb)->frags[i - 1], delta: copy); |
1216 | } else { |
1217 | skb_fill_page_desc(skb, i, page: pfrag->page, |
1218 | off: pfrag->offset, size: copy); |
1219 | page_ref_inc(page: pfrag->page); |
1220 | } |
1221 | pfrag->offset += copy; |
1222 | } else if (zc == MSG_ZEROCOPY) { |
1223 | /* First append to a fragless skb builds initial |
1224 | * pure zerocopy skb |
1225 | */ |
1226 | if (!skb->len) |
1227 | skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; |
1228 | |
1229 | if (!skb_zcopy_pure(skb)) { |
1230 | copy = tcp_wmem_schedule(sk, copy); |
1231 | if (!copy) |
1232 | goto wait_for_space; |
1233 | } |
1234 | |
1235 | err = skb_zerocopy_iter_stream(sk, skb, msg, len: copy, uarg); |
1236 | if (err == -EMSGSIZE || err == -EEXIST) { |
1237 | tcp_mark_push(tp, skb); |
1238 | goto new_segment; |
1239 | } |
1240 | if (err < 0) |
1241 | goto do_error; |
1242 | copy = err; |
1243 | } else if (zc == MSG_SPLICE_PAGES) { |
1244 | /* Splice in data if we can; copy if we can't. */ |
1245 | if (tcp_downgrade_zcopy_pure(sk, skb)) |
1246 | goto wait_for_space; |
1247 | copy = tcp_wmem_schedule(sk, copy); |
1248 | if (!copy) |
1249 | goto wait_for_space; |
1250 | |
1251 | err = skb_splice_from_iter(skb, iter: &msg->msg_iter, maxsize: copy, |
1252 | gfp: sk->sk_allocation); |
1253 | if (err < 0) { |
1254 | if (err == -EMSGSIZE) { |
1255 | tcp_mark_push(tp, skb); |
1256 | goto new_segment; |
1257 | } |
1258 | goto do_error; |
1259 | } |
1260 | copy = err; |
1261 | |
1262 | if (!(flags & MSG_NO_SHARED_FRAGS)) |
1263 | skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; |
1264 | |
1265 | sk_wmem_queued_add(sk, val: copy); |
1266 | sk_mem_charge(sk, size: copy); |
1267 | } |
1268 | |
1269 | if (!copied) |
1270 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
1271 | |
1272 | WRITE_ONCE(tp->write_seq, tp->write_seq + copy); |
1273 | TCP_SKB_CB(skb)->end_seq += copy; |
1274 | tcp_skb_pcount_set(skb, segs: 0); |
1275 | |
1276 | copied += copy; |
1277 | if (!msg_data_left(msg)) { |
1278 | if (unlikely(flags & MSG_EOR)) |
1279 | TCP_SKB_CB(skb)->eor = 1; |
1280 | goto out; |
1281 | } |
1282 | |
1283 | if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) |
1284 | continue; |
1285 | |
1286 | if (forced_push(tp)) { |
1287 | tcp_mark_push(tp, skb); |
1288 | __tcp_push_pending_frames(sk, cur_mss: mss_now, TCP_NAGLE_PUSH); |
1289 | } else if (skb == tcp_send_head(sk)) |
1290 | tcp_push_one(sk, mss_now); |
1291 | continue; |
1292 | |
1293 | wait_for_space: |
1294 | set_bit(SOCK_NOSPACE, addr: &sk->sk_socket->flags); |
1295 | tcp_remove_empty_skb(sk); |
1296 | if (copied) |
1297 | tcp_push(sk, flags: flags & ~MSG_MORE, mss_now, |
1298 | TCP_NAGLE_PUSH, size_goal); |
1299 | |
1300 | err = sk_stream_wait_memory(sk, timeo_p: &timeo); |
1301 | if (err != 0) |
1302 | goto do_error; |
1303 | |
1304 | mss_now = tcp_send_mss(sk, size_goal: &size_goal, flags); |
1305 | } |
1306 | |
1307 | out: |
1308 | if (copied) { |
1309 | tcp_tx_timestamp(sk, tsflags: sockc.tsflags); |
1310 | tcp_push(sk, flags, mss_now, nonagle: tp->nonagle, size_goal); |
1311 | } |
1312 | out_nopush: |
1313 | /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ |
1314 | if (uarg && !msg->msg_ubuf) |
1315 | net_zcopy_put(uarg); |
1316 | return copied + copied_syn; |
1317 | |
1318 | do_error: |
1319 | tcp_remove_empty_skb(sk); |
1320 | |
1321 | if (copied + copied_syn) |
1322 | goto out; |
1323 | out_err: |
1324 | /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ |
1325 | if (uarg && !msg->msg_ubuf) |
1326 | net_zcopy_put_abort(uarg, have_uref: true); |
1327 | err = sk_stream_error(sk, flags, err); |
1328 | /* make sure we wake any epoll edge trigger waiter */ |
1329 | if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { |
1330 | sk->sk_write_space(sk); |
1331 | tcp_chrono_stop(sk, type: TCP_CHRONO_SNDBUF_LIMITED); |
1332 | } |
1333 | return err; |
1334 | } |
1335 | EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); |
1336 | |
1337 | int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
1338 | { |
1339 | int ret; |
1340 | |
1341 | lock_sock(sk); |
1342 | ret = tcp_sendmsg_locked(sk, msg, size); |
1343 | release_sock(sk); |
1344 | |
1345 | return ret; |
1346 | } |
1347 | EXPORT_SYMBOL(tcp_sendmsg); |
1348 | |
1349 | void tcp_splice_eof(struct socket *sock) |
1350 | { |
1351 | struct sock *sk = sock->sk; |
1352 | struct tcp_sock *tp = tcp_sk(sk); |
1353 | int mss_now, size_goal; |
1354 | |
1355 | if (!tcp_write_queue_tail(sk)) |
1356 | return; |
1357 | |
1358 | lock_sock(sk); |
1359 | mss_now = tcp_send_mss(sk, size_goal: &size_goal, flags: 0); |
1360 | tcp_push(sk, flags: 0, mss_now, nonagle: tp->nonagle, size_goal); |
1361 | release_sock(sk); |
1362 | } |
1363 | EXPORT_SYMBOL_GPL(tcp_splice_eof); |
1364 | |
1365 | /* |
1366 | * Handle reading urgent data. BSD has very simple semantics for |
1367 | * this, no blocking and very strange errors 8) |
1368 | */ |
1369 | |
1370 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) |
1371 | { |
1372 | struct tcp_sock *tp = tcp_sk(sk); |
1373 | |
1374 | /* No URG data to read. */ |
1375 | if (sock_flag(sk, flag: SOCK_URGINLINE) || !tp->urg_data || |
1376 | tp->urg_data == TCP_URG_READ) |
1377 | return -EINVAL; /* Yes this is right ! */ |
1378 | |
1379 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, flag: SOCK_DONE)) |
1380 | return -ENOTCONN; |
1381 | |
1382 | if (tp->urg_data & TCP_URG_VALID) { |
1383 | int err = 0; |
1384 | char c = tp->urg_data; |
1385 | |
1386 | if (!(flags & MSG_PEEK)) |
1387 | WRITE_ONCE(tp->urg_data, TCP_URG_READ); |
1388 | |
1389 | /* Read urgent data. */ |
1390 | msg->msg_flags |= MSG_OOB; |
1391 | |
1392 | if (len > 0) { |
1393 | if (!(flags & MSG_TRUNC)) |
1394 | err = memcpy_to_msg(msg, data: &c, len: 1); |
1395 | len = 1; |
1396 | } else |
1397 | msg->msg_flags |= MSG_TRUNC; |
1398 | |
1399 | return err ? -EFAULT : len; |
1400 | } |
1401 | |
1402 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) |
1403 | return 0; |
1404 | |
1405 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and |
1406 | * the available implementations agree in this case: |
1407 | * this call should never block, independent of the |
1408 | * blocking state of the socket. |
1409 | * Mike <pall@rz.uni-karlsruhe.de> |
1410 | */ |
1411 | return -EAGAIN; |
1412 | } |
1413 | |
1414 | static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) |
1415 | { |
1416 | struct sk_buff *skb; |
1417 | int copied = 0, err = 0; |
1418 | |
1419 | /* XXX -- need to support SO_PEEK_OFF */ |
1420 | |
1421 | skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { |
1422 | err = skb_copy_datagram_msg(from: skb, offset: 0, msg, size: skb->len); |
1423 | if (err) |
1424 | return err; |
1425 | copied += skb->len; |
1426 | } |
1427 | |
1428 | skb_queue_walk(&sk->sk_write_queue, skb) { |
1429 | err = skb_copy_datagram_msg(from: skb, offset: 0, msg, size: skb->len); |
1430 | if (err) |
1431 | break; |
1432 | |
1433 | copied += skb->len; |
1434 | } |
1435 | |
1436 | return err ?: copied; |
1437 | } |
1438 | |
1439 | /* Clean up the receive buffer for full frames taken by the user, |
1440 | * then send an ACK if necessary. COPIED is the number of bytes |
1441 | * tcp_recvmsg has given to the user so far, it speeds up the |
1442 | * calculation of whether or not we must ACK for the sake of |
1443 | * a window update. |
1444 | */ |
1445 | void __tcp_cleanup_rbuf(struct sock *sk, int copied) |
1446 | { |
1447 | struct tcp_sock *tp = tcp_sk(sk); |
1448 | bool time_to_ack = false; |
1449 | |
1450 | if (inet_csk_ack_scheduled(sk)) { |
1451 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1452 | |
1453 | if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ |
1454 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
1455 | /* |
1456 | * If this read emptied read buffer, we send ACK, if |
1457 | * connection is not bidirectional, user drained |
1458 | * receive buffer and there was a small segment |
1459 | * in queue. |
1460 | */ |
1461 | (copied > 0 && |
1462 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || |
1463 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && |
1464 | !inet_csk_in_pingpong_mode(sk))) && |
1465 | !atomic_read(v: &sk->sk_rmem_alloc))) |
1466 | time_to_ack = true; |
1467 | } |
1468 | |
1469 | /* We send an ACK if we can now advertise a non-zero window |
1470 | * which has been raised "significantly". |
1471 | * |
1472 | * Even if window raised up to infinity, do not send window open ACK |
1473 | * in states, where we will not receive more. It is useless. |
1474 | */ |
1475 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { |
1476 | __u32 rcv_window_now = tcp_receive_window(tp); |
1477 | |
1478 | /* Optimize, __tcp_select_window() is not cheap. */ |
1479 | if (2*rcv_window_now <= tp->window_clamp) { |
1480 | __u32 new_window = __tcp_select_window(sk); |
1481 | |
1482 | /* Send ACK now, if this read freed lots of space |
1483 | * in our buffer. Certainly, new_window is new window. |
1484 | * We can advertise it now, if it is not less than current one. |
1485 | * "Lots" means "at least twice" here. |
1486 | */ |
1487 | if (new_window && new_window >= 2 * rcv_window_now) |
1488 | time_to_ack = true; |
1489 | } |
1490 | } |
1491 | if (time_to_ack) |
1492 | tcp_send_ack(sk); |
1493 | } |
1494 | |
1495 | void tcp_cleanup_rbuf(struct sock *sk, int copied) |
1496 | { |
1497 | struct sk_buff *skb = skb_peek(list_: &sk->sk_receive_queue); |
1498 | struct tcp_sock *tp = tcp_sk(sk); |
1499 | |
1500 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), |
1501 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n" , |
1502 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); |
1503 | __tcp_cleanup_rbuf(sk, copied); |
1504 | } |
1505 | |
1506 | static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) |
1507 | { |
1508 | __skb_unlink(skb, list: &sk->sk_receive_queue); |
1509 | if (likely(skb->destructor == sock_rfree)) { |
1510 | sock_rfree(skb); |
1511 | skb->destructor = NULL; |
1512 | skb->sk = NULL; |
1513 | return skb_attempt_defer_free(skb); |
1514 | } |
1515 | __kfree_skb(skb); |
1516 | } |
1517 | |
1518 | struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1519 | { |
1520 | struct sk_buff *skb; |
1521 | u32 offset; |
1522 | |
1523 | while ((skb = skb_peek(list_: &sk->sk_receive_queue)) != NULL) { |
1524 | offset = seq - TCP_SKB_CB(skb)->seq; |
1525 | if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
1526 | pr_err_once("%s: found a SYN, please report !\n" , __func__); |
1527 | offset--; |
1528 | } |
1529 | if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { |
1530 | *off = offset; |
1531 | return skb; |
1532 | } |
1533 | /* This looks weird, but this can happen if TCP collapsing |
1534 | * splitted a fat GRO packet, while we released socket lock |
1535 | * in skb_splice_bits() |
1536 | */ |
1537 | tcp_eat_recv_skb(sk, skb); |
1538 | } |
1539 | return NULL; |
1540 | } |
1541 | EXPORT_SYMBOL(tcp_recv_skb); |
1542 | |
1543 | /* |
1544 | * This routine provides an alternative to tcp_recvmsg() for routines |
1545 | * that would like to handle copying from skbuffs directly in 'sendfile' |
1546 | * fashion. |
1547 | * Note: |
1548 | * - It is assumed that the socket was locked by the caller. |
1549 | * - The routine does not block. |
1550 | * - At present, there is no support for reading OOB data |
1551 | * or for 'peeking' the socket using this routine |
1552 | * (although both would be easy to implement). |
1553 | */ |
1554 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, |
1555 | sk_read_actor_t recv_actor) |
1556 | { |
1557 | struct sk_buff *skb; |
1558 | struct tcp_sock *tp = tcp_sk(sk); |
1559 | u32 seq = tp->copied_seq; |
1560 | u32 offset; |
1561 | int copied = 0; |
1562 | |
1563 | if (sk->sk_state == TCP_LISTEN) |
1564 | return -ENOTCONN; |
1565 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { |
1566 | if (offset < skb->len) { |
1567 | int used; |
1568 | size_t len; |
1569 | |
1570 | len = skb->len - offset; |
1571 | /* Stop reading if we hit a patch of urgent data */ |
1572 | if (unlikely(tp->urg_data)) { |
1573 | u32 urg_offset = tp->urg_seq - seq; |
1574 | if (urg_offset < len) |
1575 | len = urg_offset; |
1576 | if (!len) |
1577 | break; |
1578 | } |
1579 | used = recv_actor(desc, skb, offset, len); |
1580 | if (used <= 0) { |
1581 | if (!copied) |
1582 | copied = used; |
1583 | break; |
1584 | } |
1585 | if (WARN_ON_ONCE(used > len)) |
1586 | used = len; |
1587 | seq += used; |
1588 | copied += used; |
1589 | offset += used; |
1590 | |
1591 | /* If recv_actor drops the lock (e.g. TCP splice |
1592 | * receive) the skb pointer might be invalid when |
1593 | * getting here: tcp_collapse might have deleted it |
1594 | * while aggregating skbs from the socket queue. |
1595 | */ |
1596 | skb = tcp_recv_skb(sk, seq - 1, &offset); |
1597 | if (!skb) |
1598 | break; |
1599 | /* TCP coalescing might have appended data to the skb. |
1600 | * Try to splice more frags |
1601 | */ |
1602 | if (offset + 1 != skb->len) |
1603 | continue; |
1604 | } |
1605 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
1606 | tcp_eat_recv_skb(sk, skb); |
1607 | ++seq; |
1608 | break; |
1609 | } |
1610 | tcp_eat_recv_skb(sk, skb); |
1611 | if (!desc->count) |
1612 | break; |
1613 | WRITE_ONCE(tp->copied_seq, seq); |
1614 | } |
1615 | WRITE_ONCE(tp->copied_seq, seq); |
1616 | |
1617 | tcp_rcv_space_adjust(sk); |
1618 | |
1619 | /* Clean up data we have read: This will do ACK frames. */ |
1620 | if (copied > 0) { |
1621 | tcp_recv_skb(sk, seq, &offset); |
1622 | tcp_cleanup_rbuf(sk, copied); |
1623 | } |
1624 | return copied; |
1625 | } |
1626 | EXPORT_SYMBOL(tcp_read_sock); |
1627 | |
1628 | int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) |
1629 | { |
1630 | struct sk_buff *skb; |
1631 | int copied = 0; |
1632 | |
1633 | if (sk->sk_state == TCP_LISTEN) |
1634 | return -ENOTCONN; |
1635 | |
1636 | while ((skb = skb_peek(list_: &sk->sk_receive_queue)) != NULL) { |
1637 | u8 tcp_flags; |
1638 | int used; |
1639 | |
1640 | __skb_unlink(skb, list: &sk->sk_receive_queue); |
1641 | WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); |
1642 | tcp_flags = TCP_SKB_CB(skb)->tcp_flags; |
1643 | used = recv_actor(sk, skb); |
1644 | if (used < 0) { |
1645 | if (!copied) |
1646 | copied = used; |
1647 | break; |
1648 | } |
1649 | copied += used; |
1650 | |
1651 | if (tcp_flags & TCPHDR_FIN) |
1652 | break; |
1653 | } |
1654 | return copied; |
1655 | } |
1656 | EXPORT_SYMBOL(tcp_read_skb); |
1657 | |
1658 | void tcp_read_done(struct sock *sk, size_t len) |
1659 | { |
1660 | struct tcp_sock *tp = tcp_sk(sk); |
1661 | u32 seq = tp->copied_seq; |
1662 | struct sk_buff *skb; |
1663 | size_t left; |
1664 | u32 offset; |
1665 | |
1666 | if (sk->sk_state == TCP_LISTEN) |
1667 | return; |
1668 | |
1669 | left = len; |
1670 | while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { |
1671 | int used; |
1672 | |
1673 | used = min_t(size_t, skb->len - offset, left); |
1674 | seq += used; |
1675 | left -= used; |
1676 | |
1677 | if (skb->len > offset + used) |
1678 | break; |
1679 | |
1680 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
1681 | tcp_eat_recv_skb(sk, skb); |
1682 | ++seq; |
1683 | break; |
1684 | } |
1685 | tcp_eat_recv_skb(sk, skb); |
1686 | } |
1687 | WRITE_ONCE(tp->copied_seq, seq); |
1688 | |
1689 | tcp_rcv_space_adjust(sk); |
1690 | |
1691 | /* Clean up data we have read: This will do ACK frames. */ |
1692 | if (left != len) |
1693 | tcp_cleanup_rbuf(sk, copied: len - left); |
1694 | } |
1695 | EXPORT_SYMBOL(tcp_read_done); |
1696 | |
1697 | int tcp_peek_len(struct socket *sock) |
1698 | { |
1699 | return tcp_inq(sk: sock->sk); |
1700 | } |
1701 | EXPORT_SYMBOL(tcp_peek_len); |
1702 | |
1703 | /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ |
1704 | int tcp_set_rcvlowat(struct sock *sk, int val) |
1705 | { |
1706 | int space, cap; |
1707 | |
1708 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) |
1709 | cap = sk->sk_rcvbuf >> 1; |
1710 | else |
1711 | cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; |
1712 | val = min(val, cap); |
1713 | WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); |
1714 | |
1715 | /* Check if we need to signal EPOLLIN right now */ |
1716 | tcp_data_ready(sk); |
1717 | |
1718 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) |
1719 | return 0; |
1720 | |
1721 | space = tcp_space_from_win(sk, win: val); |
1722 | if (space > sk->sk_rcvbuf) { |
1723 | WRITE_ONCE(sk->sk_rcvbuf, space); |
1724 | tcp_sk(sk)->window_clamp = val; |
1725 | } |
1726 | return 0; |
1727 | } |
1728 | EXPORT_SYMBOL(tcp_set_rcvlowat); |
1729 | |
1730 | void tcp_update_recv_tstamps(struct sk_buff *skb, |
1731 | struct scm_timestamping_internal *tss) |
1732 | { |
1733 | if (skb->tstamp) |
1734 | tss->ts[0] = ktime_to_timespec64(skb->tstamp); |
1735 | else |
1736 | tss->ts[0] = (struct timespec64) {0}; |
1737 | |
1738 | if (skb_hwtstamps(skb)->hwtstamp) |
1739 | tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); |
1740 | else |
1741 | tss->ts[2] = (struct timespec64) {0}; |
1742 | } |
1743 | |
1744 | #ifdef CONFIG_MMU |
1745 | static const struct vm_operations_struct tcp_vm_ops = { |
1746 | }; |
1747 | |
1748 | int tcp_mmap(struct file *file, struct socket *sock, |
1749 | struct vm_area_struct *vma) |
1750 | { |
1751 | if (vma->vm_flags & (VM_WRITE | VM_EXEC)) |
1752 | return -EPERM; |
1753 | vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); |
1754 | |
1755 | /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ |
1756 | vm_flags_set(vma, VM_MIXEDMAP); |
1757 | |
1758 | vma->vm_ops = &tcp_vm_ops; |
1759 | return 0; |
1760 | } |
1761 | EXPORT_SYMBOL(tcp_mmap); |
1762 | |
1763 | static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, |
1764 | u32 *offset_frag) |
1765 | { |
1766 | skb_frag_t *frag; |
1767 | |
1768 | if (unlikely(offset_skb >= skb->len)) |
1769 | return NULL; |
1770 | |
1771 | offset_skb -= skb_headlen(skb); |
1772 | if ((int)offset_skb < 0 || skb_has_frag_list(skb)) |
1773 | return NULL; |
1774 | |
1775 | frag = skb_shinfo(skb)->frags; |
1776 | while (offset_skb) { |
1777 | if (skb_frag_size(frag) > offset_skb) { |
1778 | *offset_frag = offset_skb; |
1779 | return frag; |
1780 | } |
1781 | offset_skb -= skb_frag_size(frag); |
1782 | ++frag; |
1783 | } |
1784 | *offset_frag = 0; |
1785 | return frag; |
1786 | } |
1787 | |
1788 | static bool can_map_frag(const skb_frag_t *frag) |
1789 | { |
1790 | struct page *page; |
1791 | |
1792 | if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag)) |
1793 | return false; |
1794 | |
1795 | page = skb_frag_page(frag); |
1796 | |
1797 | if (PageCompound(page) || page->mapping) |
1798 | return false; |
1799 | |
1800 | return true; |
1801 | } |
1802 | |
1803 | static int find_next_mappable_frag(const skb_frag_t *frag, |
1804 | int remaining_in_skb) |
1805 | { |
1806 | int offset = 0; |
1807 | |
1808 | if (likely(can_map_frag(frag))) |
1809 | return 0; |
1810 | |
1811 | while (offset < remaining_in_skb && !can_map_frag(frag)) { |
1812 | offset += skb_frag_size(frag); |
1813 | ++frag; |
1814 | } |
1815 | return offset; |
1816 | } |
1817 | |
1818 | static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, |
1819 | struct tcp_zerocopy_receive *zc, |
1820 | struct sk_buff *skb, u32 offset) |
1821 | { |
1822 | u32 frag_offset, partial_frag_remainder = 0; |
1823 | int mappable_offset; |
1824 | skb_frag_t *frag; |
1825 | |
1826 | /* worst case: skip to next skb. try to improve on this case below */ |
1827 | zc->recv_skip_hint = skb->len - offset; |
1828 | |
1829 | /* Find the frag containing this offset (and how far into that frag) */ |
1830 | frag = skb_advance_to_frag(skb, offset_skb: offset, offset_frag: &frag_offset); |
1831 | if (!frag) |
1832 | return; |
1833 | |
1834 | if (frag_offset) { |
1835 | struct skb_shared_info *info = skb_shinfo(skb); |
1836 | |
1837 | /* We read part of the last frag, must recvmsg() rest of skb. */ |
1838 | if (frag == &info->frags[info->nr_frags - 1]) |
1839 | return; |
1840 | |
1841 | /* Else, we must at least read the remainder in this frag. */ |
1842 | partial_frag_remainder = skb_frag_size(frag) - frag_offset; |
1843 | zc->recv_skip_hint -= partial_frag_remainder; |
1844 | ++frag; |
1845 | } |
1846 | |
1847 | /* partial_frag_remainder: If part way through a frag, must read rest. |
1848 | * mappable_offset: Bytes till next mappable frag, *not* counting bytes |
1849 | * in partial_frag_remainder. |
1850 | */ |
1851 | mappable_offset = find_next_mappable_frag(frag, remaining_in_skb: zc->recv_skip_hint); |
1852 | zc->recv_skip_hint = mappable_offset + partial_frag_remainder; |
1853 | } |
1854 | |
1855 | static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, |
1856 | int flags, struct scm_timestamping_internal *tss, |
1857 | int *cmsg_flags); |
1858 | static int receive_fallback_to_copy(struct sock *sk, |
1859 | struct tcp_zerocopy_receive *zc, int inq, |
1860 | struct scm_timestamping_internal *tss) |
1861 | { |
1862 | unsigned long copy_address = (unsigned long)zc->copybuf_address; |
1863 | struct msghdr msg = {}; |
1864 | int err; |
1865 | |
1866 | zc->length = 0; |
1867 | zc->recv_skip_hint = 0; |
1868 | |
1869 | if (copy_address != zc->copybuf_address) |
1870 | return -EINVAL; |
1871 | |
1872 | err = import_ubuf(ITER_DEST, buf: (void __user *)copy_address, len: inq, |
1873 | i: &msg.msg_iter); |
1874 | if (err) |
1875 | return err; |
1876 | |
1877 | err = tcp_recvmsg_locked(sk, msg: &msg, len: inq, MSG_DONTWAIT, |
1878 | tss, cmsg_flags: &zc->msg_flags); |
1879 | if (err < 0) |
1880 | return err; |
1881 | |
1882 | zc->copybuf_len = err; |
1883 | if (likely(zc->copybuf_len)) { |
1884 | struct sk_buff *skb; |
1885 | u32 offset; |
1886 | |
1887 | skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); |
1888 | if (skb) |
1889 | tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); |
1890 | } |
1891 | return 0; |
1892 | } |
1893 | |
1894 | static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, |
1895 | struct sk_buff *skb, u32 copylen, |
1896 | u32 *offset, u32 *seq) |
1897 | { |
1898 | unsigned long copy_address = (unsigned long)zc->copybuf_address; |
1899 | struct msghdr msg = {}; |
1900 | int err; |
1901 | |
1902 | if (copy_address != zc->copybuf_address) |
1903 | return -EINVAL; |
1904 | |
1905 | err = import_ubuf(ITER_DEST, buf: (void __user *)copy_address, len: copylen, |
1906 | i: &msg.msg_iter); |
1907 | if (err) |
1908 | return err; |
1909 | err = skb_copy_datagram_msg(from: skb, offset: *offset, msg: &msg, size: copylen); |
1910 | if (err) |
1911 | return err; |
1912 | zc->recv_skip_hint -= copylen; |
1913 | *offset += copylen; |
1914 | *seq += copylen; |
1915 | return (__s32)copylen; |
1916 | } |
1917 | |
1918 | static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, |
1919 | struct sock *sk, |
1920 | struct sk_buff *skb, |
1921 | u32 *seq, |
1922 | s32 copybuf_len, |
1923 | struct scm_timestamping_internal *tss) |
1924 | { |
1925 | u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); |
1926 | |
1927 | if (!copylen) |
1928 | return 0; |
1929 | /* skb is null if inq < PAGE_SIZE. */ |
1930 | if (skb) { |
1931 | offset = *seq - TCP_SKB_CB(skb)->seq; |
1932 | } else { |
1933 | skb = tcp_recv_skb(sk, *seq, &offset); |
1934 | if (TCP_SKB_CB(skb)->has_rxtstamp) { |
1935 | tcp_update_recv_tstamps(skb, tss); |
1936 | zc->msg_flags |= TCP_CMSG_TS; |
1937 | } |
1938 | } |
1939 | |
1940 | zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, offset: &offset, |
1941 | seq); |
1942 | return zc->copybuf_len < 0 ? 0 : copylen; |
1943 | } |
1944 | |
1945 | static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, |
1946 | struct page **pending_pages, |
1947 | unsigned long pages_remaining, |
1948 | unsigned long *address, |
1949 | u32 *length, |
1950 | u32 *seq, |
1951 | struct tcp_zerocopy_receive *zc, |
1952 | u32 total_bytes_to_map, |
1953 | int err) |
1954 | { |
1955 | /* At least one page did not map. Try zapping if we skipped earlier. */ |
1956 | if (err == -EBUSY && |
1957 | zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { |
1958 | u32 maybe_zap_len; |
1959 | |
1960 | maybe_zap_len = total_bytes_to_map - /* All bytes to map */ |
1961 | *length + /* Mapped or pending */ |
1962 | (pages_remaining * PAGE_SIZE); /* Failed map. */ |
1963 | zap_page_range_single(vma, address: *address, size: maybe_zap_len, NULL); |
1964 | err = 0; |
1965 | } |
1966 | |
1967 | if (!err) { |
1968 | unsigned long leftover_pages = pages_remaining; |
1969 | int bytes_mapped; |
1970 | |
1971 | /* We called zap_page_range_single, try to reinsert. */ |
1972 | err = vm_insert_pages(vma, addr: *address, |
1973 | pages: pending_pages, |
1974 | num: &pages_remaining); |
1975 | bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); |
1976 | *seq += bytes_mapped; |
1977 | *address += bytes_mapped; |
1978 | } |
1979 | if (err) { |
1980 | /* Either we were unable to zap, OR we zapped, retried an |
1981 | * insert, and still had an issue. Either ways, pages_remaining |
1982 | * is the number of pages we were unable to map, and we unroll |
1983 | * some state we speculatively touched before. |
1984 | */ |
1985 | const int bytes_not_mapped = PAGE_SIZE * pages_remaining; |
1986 | |
1987 | *length -= bytes_not_mapped; |
1988 | zc->recv_skip_hint += bytes_not_mapped; |
1989 | } |
1990 | return err; |
1991 | } |
1992 | |
1993 | static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, |
1994 | struct page **pages, |
1995 | unsigned int pages_to_map, |
1996 | unsigned long *address, |
1997 | u32 *length, |
1998 | u32 *seq, |
1999 | struct tcp_zerocopy_receive *zc, |
2000 | u32 total_bytes_to_map) |
2001 | { |
2002 | unsigned long pages_remaining = pages_to_map; |
2003 | unsigned int pages_mapped; |
2004 | unsigned int bytes_mapped; |
2005 | int err; |
2006 | |
2007 | err = vm_insert_pages(vma, addr: *address, pages, num: &pages_remaining); |
2008 | pages_mapped = pages_to_map - (unsigned int)pages_remaining; |
2009 | bytes_mapped = PAGE_SIZE * pages_mapped; |
2010 | /* Even if vm_insert_pages fails, it may have partially succeeded in |
2011 | * mapping (some but not all of the pages). |
2012 | */ |
2013 | *seq += bytes_mapped; |
2014 | *address += bytes_mapped; |
2015 | |
2016 | if (likely(!err)) |
2017 | return 0; |
2018 | |
2019 | /* Error: maybe zap and retry + rollback state for failed inserts. */ |
2020 | return tcp_zerocopy_vm_insert_batch_error(vma, pending_pages: pages + pages_mapped, |
2021 | pages_remaining, address, length, seq, zc, total_bytes_to_map, |
2022 | err); |
2023 | } |
2024 | |
2025 | #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) |
2026 | static void tcp_zc_finalize_rx_tstamp(struct sock *sk, |
2027 | struct tcp_zerocopy_receive *zc, |
2028 | struct scm_timestamping_internal *tss) |
2029 | { |
2030 | unsigned long msg_control_addr; |
2031 | struct msghdr cmsg_dummy; |
2032 | |
2033 | msg_control_addr = (unsigned long)zc->msg_control; |
2034 | cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; |
2035 | cmsg_dummy.msg_controllen = |
2036 | (__kernel_size_t)zc->msg_controllen; |
2037 | cmsg_dummy.msg_flags = in_compat_syscall() |
2038 | ? MSG_CMSG_COMPAT : 0; |
2039 | cmsg_dummy.msg_control_is_user = true; |
2040 | zc->msg_flags = 0; |
2041 | if (zc->msg_control == msg_control_addr && |
2042 | zc->msg_controllen == cmsg_dummy.msg_controllen) { |
2043 | tcp_recv_timestamp(msg: &cmsg_dummy, sk, tss); |
2044 | zc->msg_control = (__u64) |
2045 | ((uintptr_t)cmsg_dummy.msg_control_user); |
2046 | zc->msg_controllen = |
2047 | (__u64)cmsg_dummy.msg_controllen; |
2048 | zc->msg_flags = (__u32)cmsg_dummy.msg_flags; |
2049 | } |
2050 | } |
2051 | |
2052 | static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, |
2053 | unsigned long address, |
2054 | bool *mmap_locked) |
2055 | { |
2056 | struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); |
2057 | |
2058 | if (vma) { |
2059 | if (vma->vm_ops != &tcp_vm_ops) { |
2060 | vma_end_read(vma); |
2061 | return NULL; |
2062 | } |
2063 | *mmap_locked = false; |
2064 | return vma; |
2065 | } |
2066 | |
2067 | mmap_read_lock(mm); |
2068 | vma = vma_lookup(mm, addr: address); |
2069 | if (!vma || vma->vm_ops != &tcp_vm_ops) { |
2070 | mmap_read_unlock(mm); |
2071 | return NULL; |
2072 | } |
2073 | *mmap_locked = true; |
2074 | return vma; |
2075 | } |
2076 | |
2077 | #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 |
2078 | static int tcp_zerocopy_receive(struct sock *sk, |
2079 | struct tcp_zerocopy_receive *zc, |
2080 | struct scm_timestamping_internal *tss) |
2081 | { |
2082 | u32 length = 0, offset, vma_len, avail_len, copylen = 0; |
2083 | unsigned long address = (unsigned long)zc->address; |
2084 | struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; |
2085 | s32 copybuf_len = zc->copybuf_len; |
2086 | struct tcp_sock *tp = tcp_sk(sk); |
2087 | const skb_frag_t *frags = NULL; |
2088 | unsigned int pages_to_map = 0; |
2089 | struct vm_area_struct *vma; |
2090 | struct sk_buff *skb = NULL; |
2091 | u32 seq = tp->copied_seq; |
2092 | u32 total_bytes_to_map; |
2093 | int inq = tcp_inq(sk); |
2094 | bool mmap_locked; |
2095 | int ret; |
2096 | |
2097 | zc->copybuf_len = 0; |
2098 | zc->msg_flags = 0; |
2099 | |
2100 | if (address & (PAGE_SIZE - 1) || address != zc->address) |
2101 | return -EINVAL; |
2102 | |
2103 | if (sk->sk_state == TCP_LISTEN) |
2104 | return -ENOTCONN; |
2105 | |
2106 | sock_rps_record_flow(sk); |
2107 | |
2108 | if (inq && inq <= copybuf_len) |
2109 | return receive_fallback_to_copy(sk, zc, inq, tss); |
2110 | |
2111 | if (inq < PAGE_SIZE) { |
2112 | zc->length = 0; |
2113 | zc->recv_skip_hint = inq; |
2114 | if (!inq && sock_flag(sk, flag: SOCK_DONE)) |
2115 | return -EIO; |
2116 | return 0; |
2117 | } |
2118 | |
2119 | vma = find_tcp_vma(current->mm, address, mmap_locked: &mmap_locked); |
2120 | if (!vma) |
2121 | return -EINVAL; |
2122 | |
2123 | vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); |
2124 | avail_len = min_t(u32, vma_len, inq); |
2125 | total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); |
2126 | if (total_bytes_to_map) { |
2127 | if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) |
2128 | zap_page_range_single(vma, address, size: total_bytes_to_map, |
2129 | NULL); |
2130 | zc->length = total_bytes_to_map; |
2131 | zc->recv_skip_hint = 0; |
2132 | } else { |
2133 | zc->length = avail_len; |
2134 | zc->recv_skip_hint = avail_len; |
2135 | } |
2136 | ret = 0; |
2137 | while (length + PAGE_SIZE <= zc->length) { |
2138 | int mappable_offset; |
2139 | struct page *page; |
2140 | |
2141 | if (zc->recv_skip_hint < PAGE_SIZE) { |
2142 | u32 offset_frag; |
2143 | |
2144 | if (skb) { |
2145 | if (zc->recv_skip_hint > 0) |
2146 | break; |
2147 | skb = skb->next; |
2148 | offset = seq - TCP_SKB_CB(skb)->seq; |
2149 | } else { |
2150 | skb = tcp_recv_skb(sk, seq, &offset); |
2151 | } |
2152 | |
2153 | if (TCP_SKB_CB(skb)->has_rxtstamp) { |
2154 | tcp_update_recv_tstamps(skb, tss); |
2155 | zc->msg_flags |= TCP_CMSG_TS; |
2156 | } |
2157 | zc->recv_skip_hint = skb->len - offset; |
2158 | frags = skb_advance_to_frag(skb, offset_skb: offset, offset_frag: &offset_frag); |
2159 | if (!frags || offset_frag) |
2160 | break; |
2161 | } |
2162 | |
2163 | mappable_offset = find_next_mappable_frag(frag: frags, |
2164 | remaining_in_skb: zc->recv_skip_hint); |
2165 | if (mappable_offset) { |
2166 | zc->recv_skip_hint = mappable_offset; |
2167 | break; |
2168 | } |
2169 | page = skb_frag_page(frag: frags); |
2170 | prefetchw(x: page); |
2171 | pages[pages_to_map++] = page; |
2172 | length += PAGE_SIZE; |
2173 | zc->recv_skip_hint -= PAGE_SIZE; |
2174 | frags++; |
2175 | if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || |
2176 | zc->recv_skip_hint < PAGE_SIZE) { |
2177 | /* Either full batch, or we're about to go to next skb |
2178 | * (and we cannot unroll failed ops across skbs). |
2179 | */ |
2180 | ret = tcp_zerocopy_vm_insert_batch(vma, pages, |
2181 | pages_to_map, |
2182 | address: &address, length: &length, |
2183 | seq: &seq, zc, |
2184 | total_bytes_to_map); |
2185 | if (ret) |
2186 | goto out; |
2187 | pages_to_map = 0; |
2188 | } |
2189 | } |
2190 | if (pages_to_map) { |
2191 | ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, |
2192 | address: &address, length: &length, seq: &seq, |
2193 | zc, total_bytes_to_map); |
2194 | } |
2195 | out: |
2196 | if (mmap_locked) |
2197 | mmap_read_unlock(current->mm); |
2198 | else |
2199 | vma_end_read(vma); |
2200 | /* Try to copy straggler data. */ |
2201 | if (!ret) |
2202 | copylen = tcp_zc_handle_leftover(zc, sk, skb, seq: &seq, copybuf_len, tss); |
2203 | |
2204 | if (length + copylen) { |
2205 | WRITE_ONCE(tp->copied_seq, seq); |
2206 | tcp_rcv_space_adjust(sk); |
2207 | |
2208 | /* Clean up data we have read: This will do ACK frames. */ |
2209 | tcp_recv_skb(sk, seq, &offset); |
2210 | tcp_cleanup_rbuf(sk, copied: length + copylen); |
2211 | ret = 0; |
2212 | if (length == zc->length) |
2213 | zc->recv_skip_hint = 0; |
2214 | } else { |
2215 | if (!zc->recv_skip_hint && sock_flag(sk, flag: SOCK_DONE)) |
2216 | ret = -EIO; |
2217 | } |
2218 | zc->length = length; |
2219 | return ret; |
2220 | } |
2221 | #endif |
2222 | |
2223 | /* Similar to __sock_recv_timestamp, but does not require an skb */ |
2224 | void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, |
2225 | struct scm_timestamping_internal *tss) |
2226 | { |
2227 | int new_tstamp = sock_flag(sk, flag: SOCK_TSTAMP_NEW); |
2228 | bool has_timestamping = false; |
2229 | |
2230 | if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { |
2231 | if (sock_flag(sk, flag: SOCK_RCVTSTAMP)) { |
2232 | if (sock_flag(sk, flag: SOCK_RCVTSTAMPNS)) { |
2233 | if (new_tstamp) { |
2234 | struct __kernel_timespec kts = { |
2235 | .tv_sec = tss->ts[0].tv_sec, |
2236 | .tv_nsec = tss->ts[0].tv_nsec, |
2237 | }; |
2238 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, |
2239 | len: sizeof(kts), data: &kts); |
2240 | } else { |
2241 | struct __kernel_old_timespec ts_old = { |
2242 | .tv_sec = tss->ts[0].tv_sec, |
2243 | .tv_nsec = tss->ts[0].tv_nsec, |
2244 | }; |
2245 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, |
2246 | len: sizeof(ts_old), data: &ts_old); |
2247 | } |
2248 | } else { |
2249 | if (new_tstamp) { |
2250 | struct __kernel_sock_timeval stv = { |
2251 | .tv_sec = tss->ts[0].tv_sec, |
2252 | .tv_usec = tss->ts[0].tv_nsec / 1000, |
2253 | }; |
2254 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, |
2255 | len: sizeof(stv), data: &stv); |
2256 | } else { |
2257 | struct __kernel_old_timeval tv = { |
2258 | .tv_sec = tss->ts[0].tv_sec, |
2259 | .tv_usec = tss->ts[0].tv_nsec / 1000, |
2260 | }; |
2261 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, |
2262 | len: sizeof(tv), data: &tv); |
2263 | } |
2264 | } |
2265 | } |
2266 | |
2267 | if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE) |
2268 | has_timestamping = true; |
2269 | else |
2270 | tss->ts[0] = (struct timespec64) {0}; |
2271 | } |
2272 | |
2273 | if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { |
2274 | if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE) |
2275 | has_timestamping = true; |
2276 | else |
2277 | tss->ts[2] = (struct timespec64) {0}; |
2278 | } |
2279 | |
2280 | if (has_timestamping) { |
2281 | tss->ts[1] = (struct timespec64) {0}; |
2282 | if (sock_flag(sk, SOCK_TSTAMP_NEW)) |
2283 | put_cmsg_scm_timestamping64(msg, tss); |
2284 | else |
2285 | put_cmsg_scm_timestamping(msg, tss); |
2286 | } |
2287 | } |
2288 | |
2289 | static int tcp_inq_hint(struct sock *sk) |
2290 | { |
2291 | const struct tcp_sock *tp = tcp_sk(sk); |
2292 | u32 copied_seq = READ_ONCE(tp->copied_seq); |
2293 | u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); |
2294 | int inq; |
2295 | |
2296 | inq = rcv_nxt - copied_seq; |
2297 | if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { |
2298 | lock_sock(sk); |
2299 | inq = tp->rcv_nxt - tp->copied_seq; |
2300 | release_sock(sk); |
2301 | } |
2302 | /* After receiving a FIN, tell the user-space to continue reading |
2303 | * by returning a non-zero inq. |
2304 | */ |
2305 | if (inq == 0 && sock_flag(sk, flag: SOCK_DONE)) |
2306 | inq = 1; |
2307 | return inq; |
2308 | } |
2309 | |
2310 | /* |
2311 | * This routine copies from a sock struct into the user buffer. |
2312 | * |
2313 | * Technical note: in 2.3 we work on _locked_ socket, so that |
2314 | * tricks with *seq access order and skb->users are not required. |
2315 | * Probably, code can be easily improved even more. |
2316 | */ |
2317 | |
2318 | static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, |
2319 | int flags, struct scm_timestamping_internal *tss, |
2320 | int *cmsg_flags) |
2321 | { |
2322 | struct tcp_sock *tp = tcp_sk(sk); |
2323 | int copied = 0; |
2324 | u32 peek_seq; |
2325 | u32 *seq; |
2326 | unsigned long used; |
2327 | int err; |
2328 | int target; /* Read at least this many bytes */ |
2329 | long timeo; |
2330 | struct sk_buff *skb, *last; |
2331 | u32 urg_hole = 0; |
2332 | |
2333 | err = -ENOTCONN; |
2334 | if (sk->sk_state == TCP_LISTEN) |
2335 | goto out; |
2336 | |
2337 | if (tp->recvmsg_inq) { |
2338 | *cmsg_flags = TCP_CMSG_INQ; |
2339 | msg->msg_get_inq = 1; |
2340 | } |
2341 | timeo = sock_rcvtimeo(sk, noblock: flags & MSG_DONTWAIT); |
2342 | |
2343 | /* Urgent data needs to be handled specially. */ |
2344 | if (flags & MSG_OOB) |
2345 | goto recv_urg; |
2346 | |
2347 | if (unlikely(tp->repair)) { |
2348 | err = -EPERM; |
2349 | if (!(flags & MSG_PEEK)) |
2350 | goto out; |
2351 | |
2352 | if (tp->repair_queue == TCP_SEND_QUEUE) |
2353 | goto recv_sndq; |
2354 | |
2355 | err = -EINVAL; |
2356 | if (tp->repair_queue == TCP_NO_QUEUE) |
2357 | goto out; |
2358 | |
2359 | /* 'common' recv queue MSG_PEEK-ing */ |
2360 | } |
2361 | |
2362 | seq = &tp->copied_seq; |
2363 | if (flags & MSG_PEEK) { |
2364 | peek_seq = tp->copied_seq; |
2365 | seq = &peek_seq; |
2366 | } |
2367 | |
2368 | target = sock_rcvlowat(sk, waitall: flags & MSG_WAITALL, len); |
2369 | |
2370 | do { |
2371 | u32 offset; |
2372 | |
2373 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ |
2374 | if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { |
2375 | if (copied) |
2376 | break; |
2377 | if (signal_pending(current)) { |
2378 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; |
2379 | break; |
2380 | } |
2381 | } |
2382 | |
2383 | /* Next get a buffer. */ |
2384 | |
2385 | last = skb_peek_tail(list_: &sk->sk_receive_queue); |
2386 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
2387 | last = skb; |
2388 | /* Now that we have two receive queues this |
2389 | * shouldn't happen. |
2390 | */ |
2391 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), |
2392 | "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n" , |
2393 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, |
2394 | flags)) |
2395 | break; |
2396 | |
2397 | offset = *seq - TCP_SKB_CB(skb)->seq; |
2398 | if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
2399 | pr_err_once("%s: found a SYN, please report !\n" , __func__); |
2400 | offset--; |
2401 | } |
2402 | if (offset < skb->len) |
2403 | goto found_ok_skb; |
2404 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
2405 | goto found_fin_ok; |
2406 | WARN(!(flags & MSG_PEEK), |
2407 | "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n" , |
2408 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); |
2409 | } |
2410 | |
2411 | /* Well, if we have backlog, try to process it now yet. */ |
2412 | |
2413 | if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) |
2414 | break; |
2415 | |
2416 | if (copied) { |
2417 | if (!timeo || |
2418 | sk->sk_err || |
2419 | sk->sk_state == TCP_CLOSE || |
2420 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
2421 | signal_pending(current)) |
2422 | break; |
2423 | } else { |
2424 | if (sock_flag(sk, flag: SOCK_DONE)) |
2425 | break; |
2426 | |
2427 | if (sk->sk_err) { |
2428 | copied = sock_error(sk); |
2429 | break; |
2430 | } |
2431 | |
2432 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
2433 | break; |
2434 | |
2435 | if (sk->sk_state == TCP_CLOSE) { |
2436 | /* This occurs when user tries to read |
2437 | * from never connected socket. |
2438 | */ |
2439 | copied = -ENOTCONN; |
2440 | break; |
2441 | } |
2442 | |
2443 | if (!timeo) { |
2444 | copied = -EAGAIN; |
2445 | break; |
2446 | } |
2447 | |
2448 | if (signal_pending(current)) { |
2449 | copied = sock_intr_errno(timeo); |
2450 | break; |
2451 | } |
2452 | } |
2453 | |
2454 | if (copied >= target) { |
2455 | /* Do not sleep, just process backlog. */ |
2456 | __sk_flush_backlog(sk); |
2457 | } else { |
2458 | tcp_cleanup_rbuf(sk, copied); |
2459 | err = sk_wait_data(sk, timeo: &timeo, skb: last); |
2460 | if (err < 0) { |
2461 | err = copied ? : err; |
2462 | goto out; |
2463 | } |
2464 | } |
2465 | |
2466 | if ((flags & MSG_PEEK) && |
2467 | (peek_seq - copied - urg_hole != tp->copied_seq)) { |
2468 | net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n" , |
2469 | current->comm, |
2470 | task_pid_nr(current)); |
2471 | peek_seq = tp->copied_seq; |
2472 | } |
2473 | continue; |
2474 | |
2475 | found_ok_skb: |
2476 | /* Ok so how much can we use? */ |
2477 | used = skb->len - offset; |
2478 | if (len < used) |
2479 | used = len; |
2480 | |
2481 | /* Do we have urgent data here? */ |
2482 | if (unlikely(tp->urg_data)) { |
2483 | u32 urg_offset = tp->urg_seq - *seq; |
2484 | if (urg_offset < used) { |
2485 | if (!urg_offset) { |
2486 | if (!sock_flag(sk, flag: SOCK_URGINLINE)) { |
2487 | WRITE_ONCE(*seq, *seq + 1); |
2488 | urg_hole++; |
2489 | offset++; |
2490 | used--; |
2491 | if (!used) |
2492 | goto skip_copy; |
2493 | } |
2494 | } else |
2495 | used = urg_offset; |
2496 | } |
2497 | } |
2498 | |
2499 | if (!(flags & MSG_TRUNC)) { |
2500 | err = skb_copy_datagram_msg(from: skb, offset, msg, size: used); |
2501 | if (err) { |
2502 | /* Exception. Bailout! */ |
2503 | if (!copied) |
2504 | copied = -EFAULT; |
2505 | break; |
2506 | } |
2507 | } |
2508 | |
2509 | WRITE_ONCE(*seq, *seq + used); |
2510 | copied += used; |
2511 | len -= used; |
2512 | |
2513 | tcp_rcv_space_adjust(sk); |
2514 | |
2515 | skip_copy: |
2516 | if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { |
2517 | WRITE_ONCE(tp->urg_data, 0); |
2518 | tcp_fast_path_check(sk); |
2519 | } |
2520 | |
2521 | if (TCP_SKB_CB(skb)->has_rxtstamp) { |
2522 | tcp_update_recv_tstamps(skb, tss); |
2523 | *cmsg_flags |= TCP_CMSG_TS; |
2524 | } |
2525 | |
2526 | if (used + offset < skb->len) |
2527 | continue; |
2528 | |
2529 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
2530 | goto found_fin_ok; |
2531 | if (!(flags & MSG_PEEK)) |
2532 | tcp_eat_recv_skb(sk, skb); |
2533 | continue; |
2534 | |
2535 | found_fin_ok: |
2536 | /* Process the FIN. */ |
2537 | WRITE_ONCE(*seq, *seq + 1); |
2538 | if (!(flags & MSG_PEEK)) |
2539 | tcp_eat_recv_skb(sk, skb); |
2540 | break; |
2541 | } while (len > 0); |
2542 | |
2543 | /* According to UNIX98, msg_name/msg_namelen are ignored |
2544 | * on connected socket. I was just happy when found this 8) --ANK |
2545 | */ |
2546 | |
2547 | /* Clean up data we have read: This will do ACK frames. */ |
2548 | tcp_cleanup_rbuf(sk, copied); |
2549 | return copied; |
2550 | |
2551 | out: |
2552 | return err; |
2553 | |
2554 | recv_urg: |
2555 | err = tcp_recv_urg(sk, msg, len, flags); |
2556 | goto out; |
2557 | |
2558 | recv_sndq: |
2559 | err = tcp_peek_sndq(sk, msg, len); |
2560 | goto out; |
2561 | } |
2562 | |
2563 | int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, |
2564 | int *addr_len) |
2565 | { |
2566 | int cmsg_flags = 0, ret; |
2567 | struct scm_timestamping_internal tss; |
2568 | |
2569 | if (unlikely(flags & MSG_ERRQUEUE)) |
2570 | return inet_recv_error(sk, msg, len, addr_len); |
2571 | |
2572 | if (sk_can_busy_loop(sk) && |
2573 | skb_queue_empty_lockless(list: &sk->sk_receive_queue) && |
2574 | sk->sk_state == TCP_ESTABLISHED) |
2575 | sk_busy_loop(sk, nonblock: flags & MSG_DONTWAIT); |
2576 | |
2577 | lock_sock(sk); |
2578 | ret = tcp_recvmsg_locked(sk, msg, len, flags, tss: &tss, cmsg_flags: &cmsg_flags); |
2579 | release_sock(sk); |
2580 | |
2581 | if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { |
2582 | if (cmsg_flags & TCP_CMSG_TS) |
2583 | tcp_recv_timestamp(msg, sk, tss: &tss); |
2584 | if (msg->msg_get_inq) { |
2585 | msg->msg_inq = tcp_inq_hint(sk); |
2586 | if (cmsg_flags & TCP_CMSG_INQ) |
2587 | put_cmsg(msg, SOL_TCP, TCP_CM_INQ, |
2588 | len: sizeof(msg->msg_inq), data: &msg->msg_inq); |
2589 | } |
2590 | } |
2591 | return ret; |
2592 | } |
2593 | EXPORT_SYMBOL(tcp_recvmsg); |
2594 | |
2595 | void tcp_set_state(struct sock *sk, int state) |
2596 | { |
2597 | int oldstate = sk->sk_state; |
2598 | |
2599 | /* We defined a new enum for TCP states that are exported in BPF |
2600 | * so as not force the internal TCP states to be frozen. The |
2601 | * following checks will detect if an internal state value ever |
2602 | * differs from the BPF value. If this ever happens, then we will |
2603 | * need to remap the internal value to the BPF value before calling |
2604 | * tcp_call_bpf_2arg. |
2605 | */ |
2606 | BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); |
2607 | BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); |
2608 | BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); |
2609 | BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); |
2610 | BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); |
2611 | BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); |
2612 | BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); |
2613 | BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); |
2614 | BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); |
2615 | BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); |
2616 | BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); |
2617 | BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); |
2618 | BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE); |
2619 | BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); |
2620 | |
2621 | /* bpf uapi header bpf.h defines an anonymous enum with values |
2622 | * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux |
2623 | * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. |
2624 | * But clang built vmlinux does not have this enum in DWARF |
2625 | * since clang removes the above code before generating IR/debuginfo. |
2626 | * Let us explicitly emit the type debuginfo to ensure the |
2627 | * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF |
2628 | * regardless of which compiler is used. |
2629 | */ |
2630 | BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); |
2631 | |
2632 | if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) |
2633 | tcp_call_bpf_2arg(sk, op: BPF_SOCK_OPS_STATE_CB, arg1: oldstate, arg2: state); |
2634 | |
2635 | switch (state) { |
2636 | case TCP_ESTABLISHED: |
2637 | if (oldstate != TCP_ESTABLISHED) |
2638 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
2639 | break; |
2640 | |
2641 | case TCP_CLOSE: |
2642 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) |
2643 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
2644 | |
2645 | sk->sk_prot->unhash(sk); |
2646 | if (inet_csk(sk)->icsk_bind_hash && |
2647 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
2648 | inet_put_port(sk); |
2649 | fallthrough; |
2650 | default: |
2651 | if (oldstate == TCP_ESTABLISHED) |
2652 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
2653 | } |
2654 | |
2655 | /* Change state AFTER socket is unhashed to avoid closed |
2656 | * socket sitting in hash tables. |
2657 | */ |
2658 | inet_sk_state_store(sk, newstate: state); |
2659 | } |
2660 | EXPORT_SYMBOL_GPL(tcp_set_state); |
2661 | |
2662 | /* |
2663 | * State processing on a close. This implements the state shift for |
2664 | * sending our FIN frame. Note that we only send a FIN for some |
2665 | * states. A shutdown() may have already sent the FIN, or we may be |
2666 | * closed. |
2667 | */ |
2668 | |
2669 | static const unsigned char new_state[16] = { |
2670 | /* current state: new state: action: */ |
2671 | [0 /* (Invalid) */] = TCP_CLOSE, |
2672 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
2673 | [TCP_SYN_SENT] = TCP_CLOSE, |
2674 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
2675 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, |
2676 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, |
2677 | [TCP_TIME_WAIT] = TCP_CLOSE, |
2678 | [TCP_CLOSE] = TCP_CLOSE, |
2679 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, |
2680 | [TCP_LAST_ACK] = TCP_LAST_ACK, |
2681 | [TCP_LISTEN] = TCP_CLOSE, |
2682 | [TCP_CLOSING] = TCP_CLOSING, |
2683 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ |
2684 | }; |
2685 | |
2686 | static int tcp_close_state(struct sock *sk) |
2687 | { |
2688 | int next = (int)new_state[sk->sk_state]; |
2689 | int ns = next & TCP_STATE_MASK; |
2690 | |
2691 | tcp_set_state(sk, ns); |
2692 | |
2693 | return next & TCP_ACTION_FIN; |
2694 | } |
2695 | |
2696 | /* |
2697 | * Shutdown the sending side of a connection. Much like close except |
2698 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). |
2699 | */ |
2700 | |
2701 | void tcp_shutdown(struct sock *sk, int how) |
2702 | { |
2703 | /* We need to grab some memory, and put together a FIN, |
2704 | * and then put it into the queue to be sent. |
2705 | * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. |
2706 | */ |
2707 | if (!(how & SEND_SHUTDOWN)) |
2708 | return; |
2709 | |
2710 | /* If we've already sent a FIN, or it's a closed state, skip this. */ |
2711 | if ((1 << sk->sk_state) & |
2712 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | |
2713 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { |
2714 | /* Clear out any half completed packets. FIN if needed. */ |
2715 | if (tcp_close_state(sk)) |
2716 | tcp_send_fin(sk); |
2717 | } |
2718 | } |
2719 | EXPORT_SYMBOL(tcp_shutdown); |
2720 | |
2721 | int tcp_orphan_count_sum(void) |
2722 | { |
2723 | int i, total = 0; |
2724 | |
2725 | for_each_possible_cpu(i) |
2726 | total += per_cpu(tcp_orphan_count, i); |
2727 | |
2728 | return max(total, 0); |
2729 | } |
2730 | |
2731 | static int tcp_orphan_cache; |
2732 | static struct timer_list tcp_orphan_timer; |
2733 | #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) |
2734 | |
2735 | static void tcp_orphan_update(struct timer_list *unused) |
2736 | { |
2737 | WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); |
2738 | mod_timer(timer: &tcp_orphan_timer, expires: jiffies + TCP_ORPHAN_TIMER_PERIOD); |
2739 | } |
2740 | |
2741 | static bool tcp_too_many_orphans(int shift) |
2742 | { |
2743 | return READ_ONCE(tcp_orphan_cache) << shift > |
2744 | READ_ONCE(sysctl_tcp_max_orphans); |
2745 | } |
2746 | |
2747 | bool tcp_check_oom(struct sock *sk, int shift) |
2748 | { |
2749 | bool too_many_orphans, out_of_socket_memory; |
2750 | |
2751 | too_many_orphans = tcp_too_many_orphans(shift); |
2752 | out_of_socket_memory = tcp_out_of_memory(sk); |
2753 | |
2754 | if (too_many_orphans) |
2755 | net_info_ratelimited("too many orphaned sockets\n" ); |
2756 | if (out_of_socket_memory) |
2757 | net_info_ratelimited("out of memory -- consider tuning tcp_mem\n" ); |
2758 | return too_many_orphans || out_of_socket_memory; |
2759 | } |
2760 | |
2761 | void __tcp_close(struct sock *sk, long timeout) |
2762 | { |
2763 | struct sk_buff *skb; |
2764 | int data_was_unread = 0; |
2765 | int state; |
2766 | |
2767 | WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); |
2768 | |
2769 | if (sk->sk_state == TCP_LISTEN) { |
2770 | tcp_set_state(sk, TCP_CLOSE); |
2771 | |
2772 | /* Special case. */ |
2773 | inet_csk_listen_stop(sk); |
2774 | |
2775 | goto adjudge_to_death; |
2776 | } |
2777 | |
2778 | /* We need to flush the recv. buffs. We do this only on the |
2779 | * descriptor close, not protocol-sourced closes, because the |
2780 | * reader process may not have drained the data yet! |
2781 | */ |
2782 | while ((skb = __skb_dequeue(list: &sk->sk_receive_queue)) != NULL) { |
2783 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; |
2784 | |
2785 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
2786 | len--; |
2787 | data_was_unread += len; |
2788 | __kfree_skb(skb); |
2789 | } |
2790 | |
2791 | /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ |
2792 | if (sk->sk_state == TCP_CLOSE) |
2793 | goto adjudge_to_death; |
2794 | |
2795 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
2796 | * data was lost. To witness the awful effects of the old behavior of |
2797 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk |
2798 | * GET in an FTP client, suspend the process, wait for the client to |
2799 | * advertise a zero window, then kill -9 the FTP client, wheee... |
2800 | * Note: timeout is always zero in such a case. |
2801 | */ |
2802 | if (unlikely(tcp_sk(sk)->repair)) { |
2803 | sk->sk_prot->disconnect(sk, 0); |
2804 | } else if (data_was_unread) { |
2805 | /* Unread data was tossed, zap the connection. */ |
2806 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
2807 | tcp_set_state(sk, TCP_CLOSE); |
2808 | tcp_send_active_reset(sk, priority: sk->sk_allocation); |
2809 | } else if (sock_flag(sk, flag: SOCK_LINGER) && !sk->sk_lingertime) { |
2810 | /* Check zero linger _after_ checking for unread data. */ |
2811 | sk->sk_prot->disconnect(sk, 0); |
2812 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
2813 | } else if (tcp_close_state(sk)) { |
2814 | /* We FIN if the application ate all the data before |
2815 | * zapping the connection. |
2816 | */ |
2817 | |
2818 | /* RED-PEN. Formally speaking, we have broken TCP state |
2819 | * machine. State transitions: |
2820 | * |
2821 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 |
2822 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) |
2823 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK |
2824 | * |
2825 | * are legal only when FIN has been sent (i.e. in window), |
2826 | * rather than queued out of window. Purists blame. |
2827 | * |
2828 | * F.e. "RFC state" is ESTABLISHED, |
2829 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. |
2830 | * |
2831 | * The visible declinations are that sometimes |
2832 | * we enter time-wait state, when it is not required really |
2833 | * (harmless), do not send active resets, when they are |
2834 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when |
2835 | * they look as CLOSING or LAST_ACK for Linux) |
2836 | * Probably, I missed some more holelets. |
2837 | * --ANK |
2838 | * XXX (TFO) - To start off we don't support SYN+ACK+FIN |
2839 | * in a single packet! (May consider it later but will |
2840 | * probably need API support or TCP_CORK SYN-ACK until |
2841 | * data is written and socket is closed.) |
2842 | */ |
2843 | tcp_send_fin(sk); |
2844 | } |
2845 | |
2846 | sk_stream_wait_close(sk, timeo_p: timeout); |
2847 | |
2848 | adjudge_to_death: |
2849 | state = sk->sk_state; |
2850 | sock_hold(sk); |
2851 | sock_orphan(sk); |
2852 | |
2853 | local_bh_disable(); |
2854 | bh_lock_sock(sk); |
2855 | /* remove backlog if any, without releasing ownership. */ |
2856 | __release_sock(sk); |
2857 | |
2858 | this_cpu_inc(tcp_orphan_count); |
2859 | |
2860 | /* Have we already been destroyed by a softirq or backlog? */ |
2861 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
2862 | goto out; |
2863 | |
2864 | /* This is a (useful) BSD violating of the RFC. There is a |
2865 | * problem with TCP as specified in that the other end could |
2866 | * keep a socket open forever with no application left this end. |
2867 | * We use a 1 minute timeout (about the same as BSD) then kill |
2868 | * our end. If they send after that then tough - BUT: long enough |
2869 | * that we won't make the old 4*rto = almost no time - whoops |
2870 | * reset mistake. |
2871 | * |
2872 | * Nope, it was not mistake. It is really desired behaviour |
2873 | * f.e. on http servers, when such sockets are useless, but |
2874 | * consume significant resources. Let's do it with special |
2875 | * linger2 option. --ANK |
2876 | */ |
2877 | |
2878 | if (sk->sk_state == TCP_FIN_WAIT2) { |
2879 | struct tcp_sock *tp = tcp_sk(sk); |
2880 | if (READ_ONCE(tp->linger2) < 0) { |
2881 | tcp_set_state(sk, TCP_CLOSE); |
2882 | tcp_send_active_reset(sk, GFP_ATOMIC); |
2883 | __NET_INC_STATS(sock_net(sk), |
2884 | LINUX_MIB_TCPABORTONLINGER); |
2885 | } else { |
2886 | const int tmo = tcp_fin_time(sk); |
2887 | |
2888 | if (tmo > TCP_TIMEWAIT_LEN) { |
2889 | inet_csk_reset_keepalive_timer(sk, |
2890 | timeout: tmo - TCP_TIMEWAIT_LEN); |
2891 | } else { |
2892 | tcp_time_wait(sk, state: TCP_FIN_WAIT2, timeo: tmo); |
2893 | goto out; |
2894 | } |
2895 | } |
2896 | } |
2897 | if (sk->sk_state != TCP_CLOSE) { |
2898 | if (tcp_check_oom(sk, shift: 0)) { |
2899 | tcp_set_state(sk, TCP_CLOSE); |
2900 | tcp_send_active_reset(sk, GFP_ATOMIC); |
2901 | __NET_INC_STATS(sock_net(sk), |
2902 | LINUX_MIB_TCPABORTONMEMORY); |
2903 | } else if (!check_net(net: sock_net(sk))) { |
2904 | /* Not possible to send reset; just close */ |
2905 | tcp_set_state(sk, TCP_CLOSE); |
2906 | } |
2907 | } |
2908 | |
2909 | if (sk->sk_state == TCP_CLOSE) { |
2910 | struct request_sock *req; |
2911 | |
2912 | req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, |
2913 | lockdep_sock_is_held(sk)); |
2914 | /* We could get here with a non-NULL req if the socket is |
2915 | * aborted (e.g., closed with unread data) before 3WHS |
2916 | * finishes. |
2917 | */ |
2918 | if (req) |
2919 | reqsk_fastopen_remove(sk, req, reset: false); |
2920 | inet_csk_destroy_sock(sk); |
2921 | } |
2922 | /* Otherwise, socket is reprieved until protocol close. */ |
2923 | |
2924 | out: |
2925 | bh_unlock_sock(sk); |
2926 | local_bh_enable(); |
2927 | } |
2928 | |
2929 | void tcp_close(struct sock *sk, long timeout) |
2930 | { |
2931 | lock_sock(sk); |
2932 | __tcp_close(sk, timeout); |
2933 | release_sock(sk); |
2934 | if (!sk->sk_net_refcnt) |
2935 | inet_csk_clear_xmit_timers_sync(sk); |
2936 | sock_put(sk); |
2937 | } |
2938 | EXPORT_SYMBOL(tcp_close); |
2939 | |
2940 | /* These states need RST on ABORT according to RFC793 */ |
2941 | |
2942 | static inline bool tcp_need_reset(int state) |
2943 | { |
2944 | return (1 << state) & |
2945 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | |
2946 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); |
2947 | } |
2948 | |
2949 | static void tcp_rtx_queue_purge(struct sock *sk) |
2950 | { |
2951 | struct rb_node *p = rb_first(&sk->tcp_rtx_queue); |
2952 | |
2953 | tcp_sk(sk)->highest_sack = NULL; |
2954 | while (p) { |
2955 | struct sk_buff *skb = rb_to_skb(p); |
2956 | |
2957 | p = rb_next(p); |
2958 | /* Since we are deleting whole queue, no need to |
2959 | * list_del(&skb->tcp_tsorted_anchor) |
2960 | */ |
2961 | tcp_rtx_queue_unlink(skb, sk); |
2962 | tcp_wmem_free_skb(sk, skb); |
2963 | } |
2964 | } |
2965 | |
2966 | void tcp_write_queue_purge(struct sock *sk) |
2967 | { |
2968 | struct sk_buff *skb; |
2969 | |
2970 | tcp_chrono_stop(sk, type: TCP_CHRONO_BUSY); |
2971 | while ((skb = __skb_dequeue(list: &sk->sk_write_queue)) != NULL) { |
2972 | tcp_skb_tsorted_anchor_cleanup(skb); |
2973 | tcp_wmem_free_skb(sk, skb); |
2974 | } |
2975 | tcp_rtx_queue_purge(sk); |
2976 | INIT_LIST_HEAD(list: &tcp_sk(sk)->tsorted_sent_queue); |
2977 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
2978 | tcp_sk(sk)->packets_out = 0; |
2979 | inet_csk(sk)->icsk_backoff = 0; |
2980 | } |
2981 | |
2982 | int tcp_disconnect(struct sock *sk, int flags) |
2983 | { |
2984 | struct inet_sock *inet = inet_sk(sk); |
2985 | struct inet_connection_sock *icsk = inet_csk(sk); |
2986 | struct tcp_sock *tp = tcp_sk(sk); |
2987 | int old_state = sk->sk_state; |
2988 | u32 seq; |
2989 | |
2990 | if (old_state != TCP_CLOSE) |
2991 | tcp_set_state(sk, TCP_CLOSE); |
2992 | |
2993 | /* ABORT function of RFC793 */ |
2994 | if (old_state == TCP_LISTEN) { |
2995 | inet_csk_listen_stop(sk); |
2996 | } else if (unlikely(tp->repair)) { |
2997 | WRITE_ONCE(sk->sk_err, ECONNABORTED); |
2998 | } else if (tcp_need_reset(state: old_state) || |
2999 | (tp->snd_nxt != tp->write_seq && |
3000 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { |
3001 | /* The last check adjusts for discrepancy of Linux wrt. RFC |
3002 | * states |
3003 | */ |
3004 | tcp_send_active_reset(sk, priority: gfp_any()); |
3005 | WRITE_ONCE(sk->sk_err, ECONNRESET); |
3006 | } else if (old_state == TCP_SYN_SENT) |
3007 | WRITE_ONCE(sk->sk_err, ECONNRESET); |
3008 | |
3009 | tcp_clear_xmit_timers(sk); |
3010 | __skb_queue_purge(list: &sk->sk_receive_queue); |
3011 | WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); |
3012 | WRITE_ONCE(tp->urg_data, 0); |
3013 | tcp_write_queue_purge(sk); |
3014 | tcp_fastopen_active_disable_ofo_check(sk); |
3015 | skb_rbtree_purge(root: &tp->out_of_order_queue); |
3016 | |
3017 | inet->inet_dport = 0; |
3018 | |
3019 | inet_bhash2_reset_saddr(sk); |
3020 | |
3021 | WRITE_ONCE(sk->sk_shutdown, 0); |
3022 | sock_reset_flag(sk, flag: SOCK_DONE); |
3023 | tp->srtt_us = 0; |
3024 | tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); |
3025 | tp->rcv_rtt_last_tsecr = 0; |
3026 | |
3027 | seq = tp->write_seq + tp->max_window + 2; |
3028 | if (!seq) |
3029 | seq = 1; |
3030 | WRITE_ONCE(tp->write_seq, seq); |
3031 | |
3032 | icsk->icsk_backoff = 0; |
3033 | icsk->icsk_probes_out = 0; |
3034 | icsk->icsk_probes_tstamp = 0; |
3035 | icsk->icsk_rto = TCP_TIMEOUT_INIT; |
3036 | icsk->icsk_rto_min = TCP_RTO_MIN; |
3037 | icsk->icsk_delack_max = TCP_DELACK_MAX; |
3038 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
3039 | tcp_snd_cwnd_set(tp, TCP_INIT_CWND); |
3040 | tp->snd_cwnd_cnt = 0; |
3041 | tp->is_cwnd_limited = 0; |
3042 | tp->max_packets_out = 0; |
3043 | tp->window_clamp = 0; |
3044 | tp->delivered = 0; |
3045 | tp->delivered_ce = 0; |
3046 | if (icsk->icsk_ca_ops->release) |
3047 | icsk->icsk_ca_ops->release(sk); |
3048 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
3049 | icsk->icsk_ca_initialized = 0; |
3050 | tcp_set_ca_state(sk, ca_state: TCP_CA_Open); |
3051 | tp->is_sack_reneg = 0; |
3052 | tcp_clear_retrans(tp); |
3053 | tp->total_retrans = 0; |
3054 | inet_csk_delack_init(sk); |
3055 | /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 |
3056 | * issue in __tcp_select_window() |
3057 | */ |
3058 | icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; |
3059 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
3060 | __sk_dst_reset(sk); |
3061 | dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); |
3062 | tcp_saved_syn_free(tp); |
3063 | tp->compressed_ack = 0; |
3064 | tp->segs_in = 0; |
3065 | tp->segs_out = 0; |
3066 | tp->bytes_sent = 0; |
3067 | tp->bytes_acked = 0; |
3068 | tp->bytes_received = 0; |
3069 | tp->bytes_retrans = 0; |
3070 | tp->data_segs_in = 0; |
3071 | tp->data_segs_out = 0; |
3072 | tp->duplicate_sack[0].start_seq = 0; |
3073 | tp->duplicate_sack[0].end_seq = 0; |
3074 | tp->dsack_dups = 0; |
3075 | tp->reord_seen = 0; |
3076 | tp->retrans_out = 0; |
3077 | tp->sacked_out = 0; |
3078 | tp->tlp_high_seq = 0; |
3079 | tp->last_oow_ack_time = 0; |
3080 | tp->plb_rehash = 0; |
3081 | /* There's a bubble in the pipe until at least the first ACK. */ |
3082 | tp->app_limited = ~0U; |
3083 | tp->rate_app_limited = 1; |
3084 | tp->rack.mstamp = 0; |
3085 | tp->rack.advanced = 0; |
3086 | tp->rack.reo_wnd_steps = 1; |
3087 | tp->rack.last_delivered = 0; |
3088 | tp->rack.reo_wnd_persist = 0; |
3089 | tp->rack.dsack_seen = 0; |
3090 | tp->syn_data_acked = 0; |
3091 | tp->rx_opt.saw_tstamp = 0; |
3092 | tp->rx_opt.dsack = 0; |
3093 | tp->rx_opt.num_sacks = 0; |
3094 | tp->rcv_ooopack = 0; |
3095 | |
3096 | |
3097 | /* Clean up fastopen related fields */ |
3098 | tcp_free_fastopen_req(tp); |
3099 | inet_clear_bit(DEFER_CONNECT, sk); |
3100 | tp->fastopen_client_fail = 0; |
3101 | |
3102 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
3103 | |
3104 | if (sk->sk_frag.page) { |
3105 | put_page(page: sk->sk_frag.page); |
3106 | sk->sk_frag.page = NULL; |
3107 | sk->sk_frag.offset = 0; |
3108 | } |
3109 | sk_error_report(sk); |
3110 | return 0; |
3111 | } |
3112 | EXPORT_SYMBOL(tcp_disconnect); |
3113 | |
3114 | static inline bool tcp_can_repair_sock(const struct sock *sk) |
3115 | { |
3116 | return sockopt_ns_capable(ns: sock_net(sk)->user_ns, CAP_NET_ADMIN) && |
3117 | (sk->sk_state != TCP_LISTEN); |
3118 | } |
3119 | |
3120 | static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) |
3121 | { |
3122 | struct tcp_repair_window opt; |
3123 | |
3124 | if (!tp->repair) |
3125 | return -EPERM; |
3126 | |
3127 | if (len != sizeof(opt)) |
3128 | return -EINVAL; |
3129 | |
3130 | if (copy_from_sockptr(dst: &opt, src: optbuf, size: sizeof(opt))) |
3131 | return -EFAULT; |
3132 | |
3133 | if (opt.max_window < opt.snd_wnd) |
3134 | return -EINVAL; |
3135 | |
3136 | if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) |
3137 | return -EINVAL; |
3138 | |
3139 | if (after(opt.rcv_wup, tp->rcv_nxt)) |
3140 | return -EINVAL; |
3141 | |
3142 | tp->snd_wl1 = opt.snd_wl1; |
3143 | tp->snd_wnd = opt.snd_wnd; |
3144 | tp->max_window = opt.max_window; |
3145 | |
3146 | tp->rcv_wnd = opt.rcv_wnd; |
3147 | tp->rcv_wup = opt.rcv_wup; |
3148 | |
3149 | return 0; |
3150 | } |
3151 | |
3152 | static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, |
3153 | unsigned int len) |
3154 | { |
3155 | struct tcp_sock *tp = tcp_sk(sk); |
3156 | struct tcp_repair_opt opt; |
3157 | size_t offset = 0; |
3158 | |
3159 | while (len >= sizeof(opt)) { |
3160 | if (copy_from_sockptr_offset(dst: &opt, src: optbuf, offset, size: sizeof(opt))) |
3161 | return -EFAULT; |
3162 | |
3163 | offset += sizeof(opt); |
3164 | len -= sizeof(opt); |
3165 | |
3166 | switch (opt.opt_code) { |
3167 | case TCPOPT_MSS: |
3168 | tp->rx_opt.mss_clamp = opt.opt_val; |
3169 | tcp_mtup_init(sk); |
3170 | break; |
3171 | case TCPOPT_WINDOW: |
3172 | { |
3173 | u16 snd_wscale = opt.opt_val & 0xFFFF; |
3174 | u16 rcv_wscale = opt.opt_val >> 16; |
3175 | |
3176 | if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) |
3177 | return -EFBIG; |
3178 | |
3179 | tp->rx_opt.snd_wscale = snd_wscale; |
3180 | tp->rx_opt.rcv_wscale = rcv_wscale; |
3181 | tp->rx_opt.wscale_ok = 1; |
3182 | } |
3183 | break; |
3184 | case TCPOPT_SACK_PERM: |
3185 | if (opt.opt_val != 0) |
3186 | return -EINVAL; |
3187 | |
3188 | tp->rx_opt.sack_ok |= TCP_SACK_SEEN; |
3189 | break; |
3190 | case TCPOPT_TIMESTAMP: |
3191 | if (opt.opt_val != 0) |
3192 | return -EINVAL; |
3193 | |
3194 | tp->rx_opt.tstamp_ok = 1; |
3195 | break; |
3196 | } |
3197 | } |
3198 | |
3199 | return 0; |
3200 | } |
3201 | |
3202 | DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); |
3203 | EXPORT_SYMBOL(tcp_tx_delay_enabled); |
3204 | |
3205 | static void tcp_enable_tx_delay(void) |
3206 | { |
3207 | if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { |
3208 | static int __tcp_tx_delay_enabled = 0; |
3209 | |
3210 | if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { |
3211 | static_branch_enable(&tcp_tx_delay_enabled); |
3212 | pr_info("TCP_TX_DELAY enabled\n" ); |
3213 | } |
3214 | } |
3215 | } |
3216 | |
3217 | /* When set indicates to always queue non-full frames. Later the user clears |
3218 | * this option and we transmit any pending partial frames in the queue. This is |
3219 | * meant to be used alongside sendfile() to get properly filled frames when the |
3220 | * user (for example) must write out headers with a write() call first and then |
3221 | * use sendfile to send out the data parts. |
3222 | * |
3223 | * TCP_CORK can be set together with TCP_NODELAY and it is stronger than |
3224 | * TCP_NODELAY. |
3225 | */ |
3226 | void __tcp_sock_set_cork(struct sock *sk, bool on) |
3227 | { |
3228 | struct tcp_sock *tp = tcp_sk(sk); |
3229 | |
3230 | if (on) { |
3231 | tp->nonagle |= TCP_NAGLE_CORK; |
3232 | } else { |
3233 | tp->nonagle &= ~TCP_NAGLE_CORK; |
3234 | if (tp->nonagle & TCP_NAGLE_OFF) |
3235 | tp->nonagle |= TCP_NAGLE_PUSH; |
3236 | tcp_push_pending_frames(sk); |
3237 | } |
3238 | } |
3239 | |
3240 | void tcp_sock_set_cork(struct sock *sk, bool on) |
3241 | { |
3242 | lock_sock(sk); |
3243 | __tcp_sock_set_cork(sk, on); |
3244 | release_sock(sk); |
3245 | } |
3246 | EXPORT_SYMBOL(tcp_sock_set_cork); |
3247 | |
3248 | /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is |
3249 | * remembered, but it is not activated until cork is cleared. |
3250 | * |
3251 | * However, when TCP_NODELAY is set we make an explicit push, which overrides |
3252 | * even TCP_CORK for currently queued segments. |
3253 | */ |
3254 | void __tcp_sock_set_nodelay(struct sock *sk, bool on) |
3255 | { |
3256 | if (on) { |
3257 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; |
3258 | tcp_push_pending_frames(sk); |
3259 | } else { |
3260 | tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; |
3261 | } |
3262 | } |
3263 | |
3264 | void tcp_sock_set_nodelay(struct sock *sk) |
3265 | { |
3266 | lock_sock(sk); |
3267 | __tcp_sock_set_nodelay(sk, on: true); |
3268 | release_sock(sk); |
3269 | } |
3270 | EXPORT_SYMBOL(tcp_sock_set_nodelay); |
3271 | |
3272 | static void __tcp_sock_set_quickack(struct sock *sk, int val) |
3273 | { |
3274 | if (!val) { |
3275 | inet_csk_enter_pingpong_mode(sk); |
3276 | return; |
3277 | } |
3278 | |
3279 | inet_csk_exit_pingpong_mode(sk); |
3280 | if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && |
3281 | inet_csk_ack_scheduled(sk)) { |
3282 | inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; |
3283 | tcp_cleanup_rbuf(sk, copied: 1); |
3284 | if (!(val & 1)) |
3285 | inet_csk_enter_pingpong_mode(sk); |
3286 | } |
3287 | } |
3288 | |
3289 | void tcp_sock_set_quickack(struct sock *sk, int val) |
3290 | { |
3291 | lock_sock(sk); |
3292 | __tcp_sock_set_quickack(sk, val); |
3293 | release_sock(sk); |
3294 | } |
3295 | EXPORT_SYMBOL(tcp_sock_set_quickack); |
3296 | |
3297 | int tcp_sock_set_syncnt(struct sock *sk, int val) |
3298 | { |
3299 | if (val < 1 || val > MAX_TCP_SYNCNT) |
3300 | return -EINVAL; |
3301 | |
3302 | WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); |
3303 | return 0; |
3304 | } |
3305 | EXPORT_SYMBOL(tcp_sock_set_syncnt); |
3306 | |
3307 | int tcp_sock_set_user_timeout(struct sock *sk, int val) |
3308 | { |
3309 | /* Cap the max time in ms TCP will retry or probe the window |
3310 | * before giving up and aborting (ETIMEDOUT) a connection. |
3311 | */ |
3312 | if (val < 0) |
3313 | return -EINVAL; |
3314 | |
3315 | WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); |
3316 | return 0; |
3317 | } |
3318 | EXPORT_SYMBOL(tcp_sock_set_user_timeout); |
3319 | |
3320 | int tcp_sock_set_keepidle_locked(struct sock *sk, int val) |
3321 | { |
3322 | struct tcp_sock *tp = tcp_sk(sk); |
3323 | |
3324 | if (val < 1 || val > MAX_TCP_KEEPIDLE) |
3325 | return -EINVAL; |
3326 | |
3327 | /* Paired with WRITE_ONCE() in keepalive_time_when() */ |
3328 | WRITE_ONCE(tp->keepalive_time, val * HZ); |
3329 | if (sock_flag(sk, flag: SOCK_KEEPOPEN) && |
3330 | !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { |
3331 | u32 elapsed = keepalive_time_elapsed(tp); |
3332 | |
3333 | if (tp->keepalive_time > elapsed) |
3334 | elapsed = tp->keepalive_time - elapsed; |
3335 | else |
3336 | elapsed = 0; |
3337 | inet_csk_reset_keepalive_timer(sk, timeout: elapsed); |
3338 | } |
3339 | |
3340 | return 0; |
3341 | } |
3342 | |
3343 | int tcp_sock_set_keepidle(struct sock *sk, int val) |
3344 | { |
3345 | int err; |
3346 | |
3347 | lock_sock(sk); |
3348 | err = tcp_sock_set_keepidle_locked(sk, val); |
3349 | release_sock(sk); |
3350 | return err; |
3351 | } |
3352 | EXPORT_SYMBOL(tcp_sock_set_keepidle); |
3353 | |
3354 | int tcp_sock_set_keepintvl(struct sock *sk, int val) |
3355 | { |
3356 | if (val < 1 || val > MAX_TCP_KEEPINTVL) |
3357 | return -EINVAL; |
3358 | |
3359 | WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); |
3360 | return 0; |
3361 | } |
3362 | EXPORT_SYMBOL(tcp_sock_set_keepintvl); |
3363 | |
3364 | int tcp_sock_set_keepcnt(struct sock *sk, int val) |
3365 | { |
3366 | if (val < 1 || val > MAX_TCP_KEEPCNT) |
3367 | return -EINVAL; |
3368 | |
3369 | /* Paired with READ_ONCE() in keepalive_probes() */ |
3370 | WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); |
3371 | return 0; |
3372 | } |
3373 | EXPORT_SYMBOL(tcp_sock_set_keepcnt); |
3374 | |
3375 | int tcp_set_window_clamp(struct sock *sk, int val) |
3376 | { |
3377 | struct tcp_sock *tp = tcp_sk(sk); |
3378 | |
3379 | if (!val) { |
3380 | if (sk->sk_state != TCP_CLOSE) |
3381 | return -EINVAL; |
3382 | tp->window_clamp = 0; |
3383 | } else { |
3384 | u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; |
3385 | u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ? |
3386 | SOCK_MIN_RCVBUF / 2 : val; |
3387 | |
3388 | if (new_window_clamp == old_window_clamp) |
3389 | return 0; |
3390 | |
3391 | tp->window_clamp = new_window_clamp; |
3392 | if (new_window_clamp < old_window_clamp) { |
3393 | /* need to apply the reserved mem provisioning only |
3394 | * when shrinking the window clamp |
3395 | */ |
3396 | __tcp_adjust_rcv_ssthresh(sk, new_ssthresh: tp->window_clamp); |
3397 | |
3398 | } else { |
3399 | new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); |
3400 | tp->rcv_ssthresh = max(new_rcv_ssthresh, |
3401 | tp->rcv_ssthresh); |
3402 | } |
3403 | } |
3404 | return 0; |
3405 | } |
3406 | |
3407 | /* |
3408 | * Socket option code for TCP. |
3409 | */ |
3410 | int do_tcp_setsockopt(struct sock *sk, int level, int optname, |
3411 | sockptr_t optval, unsigned int optlen) |
3412 | { |
3413 | struct tcp_sock *tp = tcp_sk(sk); |
3414 | struct inet_connection_sock *icsk = inet_csk(sk); |
3415 | struct net *net = sock_net(sk); |
3416 | int val; |
3417 | int err = 0; |
3418 | |
3419 | /* These are data/string values, all the others are ints */ |
3420 | switch (optname) { |
3421 | case TCP_CONGESTION: { |
3422 | char name[TCP_CA_NAME_MAX]; |
3423 | |
3424 | if (optlen < 1) |
3425 | return -EINVAL; |
3426 | |
3427 | val = strncpy_from_sockptr(dst: name, src: optval, |
3428 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); |
3429 | if (val < 0) |
3430 | return -EFAULT; |
3431 | name[val] = 0; |
3432 | |
3433 | sockopt_lock_sock(sk); |
3434 | err = tcp_set_congestion_control(sk, name, load: !has_current_bpf_ctx(), |
3435 | cap_net_admin: sockopt_ns_capable(ns: sock_net(sk)->user_ns, |
3436 | CAP_NET_ADMIN)); |
3437 | sockopt_release_sock(sk); |
3438 | return err; |
3439 | } |
3440 | case TCP_ULP: { |
3441 | char name[TCP_ULP_NAME_MAX]; |
3442 | |
3443 | if (optlen < 1) |
3444 | return -EINVAL; |
3445 | |
3446 | val = strncpy_from_sockptr(dst: name, src: optval, |
3447 | min_t(long, TCP_ULP_NAME_MAX - 1, |
3448 | optlen)); |
3449 | if (val < 0) |
3450 | return -EFAULT; |
3451 | name[val] = 0; |
3452 | |
3453 | sockopt_lock_sock(sk); |
3454 | err = tcp_set_ulp(sk, name); |
3455 | sockopt_release_sock(sk); |
3456 | return err; |
3457 | } |
3458 | case TCP_FASTOPEN_KEY: { |
3459 | __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; |
3460 | __u8 *backup_key = NULL; |
3461 | |
3462 | /* Allow a backup key as well to facilitate key rotation |
3463 | * First key is the active one. |
3464 | */ |
3465 | if (optlen != TCP_FASTOPEN_KEY_LENGTH && |
3466 | optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) |
3467 | return -EINVAL; |
3468 | |
3469 | if (copy_from_sockptr(dst: key, src: optval, size: optlen)) |
3470 | return -EFAULT; |
3471 | |
3472 | if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) |
3473 | backup_key = key + TCP_FASTOPEN_KEY_LENGTH; |
3474 | |
3475 | return tcp_fastopen_reset_cipher(net, sk, primary_key: key, backup_key); |
3476 | } |
3477 | default: |
3478 | /* fallthru */ |
3479 | break; |
3480 | } |
3481 | |
3482 | if (optlen < sizeof(int)) |
3483 | return -EINVAL; |
3484 | |
3485 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) |
3486 | return -EFAULT; |
3487 | |
3488 | /* Handle options that can be set without locking the socket. */ |
3489 | switch (optname) { |
3490 | case TCP_SYNCNT: |
3491 | return tcp_sock_set_syncnt(sk, val); |
3492 | case TCP_USER_TIMEOUT: |
3493 | return tcp_sock_set_user_timeout(sk, val); |
3494 | case TCP_KEEPINTVL: |
3495 | return tcp_sock_set_keepintvl(sk, val); |
3496 | case TCP_KEEPCNT: |
3497 | return tcp_sock_set_keepcnt(sk, val); |
3498 | case TCP_LINGER2: |
3499 | if (val < 0) |
3500 | WRITE_ONCE(tp->linger2, -1); |
3501 | else if (val > TCP_FIN_TIMEOUT_MAX / HZ) |
3502 | WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); |
3503 | else |
3504 | WRITE_ONCE(tp->linger2, val * HZ); |
3505 | return 0; |
3506 | case TCP_DEFER_ACCEPT: |
3507 | /* Translate value in seconds to number of retransmits */ |
3508 | WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, |
3509 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, |
3510 | TCP_RTO_MAX / HZ)); |
3511 | return 0; |
3512 | } |
3513 | |
3514 | sockopt_lock_sock(sk); |
3515 | |
3516 | switch (optname) { |
3517 | case TCP_MAXSEG: |
3518 | /* Values greater than interface MTU won't take effect. However |
3519 | * at the point when this call is done we typically don't yet |
3520 | * know which interface is going to be used |
3521 | */ |
3522 | if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { |
3523 | err = -EINVAL; |
3524 | break; |
3525 | } |
3526 | tp->rx_opt.user_mss = val; |
3527 | break; |
3528 | |
3529 | case TCP_NODELAY: |
3530 | __tcp_sock_set_nodelay(sk, on: val); |
3531 | break; |
3532 | |
3533 | case TCP_THIN_LINEAR_TIMEOUTS: |
3534 | if (val < 0 || val > 1) |
3535 | err = -EINVAL; |
3536 | else |
3537 | tp->thin_lto = val; |
3538 | break; |
3539 | |
3540 | case TCP_THIN_DUPACK: |
3541 | if (val < 0 || val > 1) |
3542 | err = -EINVAL; |
3543 | break; |
3544 | |
3545 | case TCP_REPAIR: |
3546 | if (!tcp_can_repair_sock(sk)) |
3547 | err = -EPERM; |
3548 | else if (val == TCP_REPAIR_ON) { |
3549 | tp->repair = 1; |
3550 | sk->sk_reuse = SK_FORCE_REUSE; |
3551 | tp->repair_queue = TCP_NO_QUEUE; |
3552 | } else if (val == TCP_REPAIR_OFF) { |
3553 | tp->repair = 0; |
3554 | sk->sk_reuse = SK_NO_REUSE; |
3555 | tcp_send_window_probe(sk); |
3556 | } else if (val == TCP_REPAIR_OFF_NO_WP) { |
3557 | tp->repair = 0; |
3558 | sk->sk_reuse = SK_NO_REUSE; |
3559 | } else |
3560 | err = -EINVAL; |
3561 | |
3562 | break; |
3563 | |
3564 | case TCP_REPAIR_QUEUE: |
3565 | if (!tp->repair) |
3566 | err = -EPERM; |
3567 | else if ((unsigned int)val < TCP_QUEUES_NR) |
3568 | tp->repair_queue = val; |
3569 | else |
3570 | err = -EINVAL; |
3571 | break; |
3572 | |
3573 | case TCP_QUEUE_SEQ: |
3574 | if (sk->sk_state != TCP_CLOSE) { |
3575 | err = -EPERM; |
3576 | } else if (tp->repair_queue == TCP_SEND_QUEUE) { |
3577 | if (!tcp_rtx_queue_empty(sk)) |
3578 | err = -EPERM; |
3579 | else |
3580 | WRITE_ONCE(tp->write_seq, val); |
3581 | } else if (tp->repair_queue == TCP_RECV_QUEUE) { |
3582 | if (tp->rcv_nxt != tp->copied_seq) { |
3583 | err = -EPERM; |
3584 | } else { |
3585 | WRITE_ONCE(tp->rcv_nxt, val); |
3586 | WRITE_ONCE(tp->copied_seq, val); |
3587 | } |
3588 | } else { |
3589 | err = -EINVAL; |
3590 | } |
3591 | break; |
3592 | |
3593 | case TCP_REPAIR_OPTIONS: |
3594 | if (!tp->repair) |
3595 | err = -EINVAL; |
3596 | else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) |
3597 | err = tcp_repair_options_est(sk, optbuf: optval, len: optlen); |
3598 | else |
3599 | err = -EPERM; |
3600 | break; |
3601 | |
3602 | case TCP_CORK: |
3603 | __tcp_sock_set_cork(sk, on: val); |
3604 | break; |
3605 | |
3606 | case TCP_KEEPIDLE: |
3607 | err = tcp_sock_set_keepidle_locked(sk, val); |
3608 | break; |
3609 | case TCP_SAVE_SYN: |
3610 | /* 0: disable, 1: enable, 2: start from ether_header */ |
3611 | if (val < 0 || val > 2) |
3612 | err = -EINVAL; |
3613 | else |
3614 | tp->save_syn = val; |
3615 | break; |
3616 | |
3617 | case TCP_WINDOW_CLAMP: |
3618 | err = tcp_set_window_clamp(sk, val); |
3619 | break; |
3620 | |
3621 | case TCP_QUICKACK: |
3622 | __tcp_sock_set_quickack(sk, val); |
3623 | break; |
3624 | |
3625 | case TCP_AO_REPAIR: |
3626 | if (!tcp_can_repair_sock(sk)) { |
3627 | err = -EPERM; |
3628 | break; |
3629 | } |
3630 | err = tcp_ao_set_repair(sk, optval, optlen); |
3631 | break; |
3632 | #ifdef CONFIG_TCP_AO |
3633 | case TCP_AO_ADD_KEY: |
3634 | case TCP_AO_DEL_KEY: |
3635 | case TCP_AO_INFO: { |
3636 | /* If this is the first TCP-AO setsockopt() on the socket, |
3637 | * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR |
3638 | * in any state. |
3639 | */ |
3640 | if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) |
3641 | goto ao_parse; |
3642 | if (rcu_dereference_protected(tcp_sk(sk)->ao_info, |
3643 | lockdep_sock_is_held(sk))) |
3644 | goto ao_parse; |
3645 | if (tp->repair) |
3646 | goto ao_parse; |
3647 | err = -EISCONN; |
3648 | break; |
3649 | ao_parse: |
3650 | err = tp->af_specific->ao_parse(sk, optname, optval, optlen); |
3651 | break; |
3652 | } |
3653 | #endif |
3654 | #ifdef CONFIG_TCP_MD5SIG |
3655 | case TCP_MD5SIG: |
3656 | case TCP_MD5SIG_EXT: |
3657 | err = tp->af_specific->md5_parse(sk, optname, optval, optlen); |
3658 | break; |
3659 | #endif |
3660 | case TCP_FASTOPEN: |
3661 | if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | |
3662 | TCPF_LISTEN))) { |
3663 | tcp_fastopen_init_key_once(net); |
3664 | |
3665 | fastopen_queue_tune(sk, backlog: val); |
3666 | } else { |
3667 | err = -EINVAL; |
3668 | } |
3669 | break; |
3670 | case TCP_FASTOPEN_CONNECT: |
3671 | if (val > 1 || val < 0) { |
3672 | err = -EINVAL; |
3673 | } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & |
3674 | TFO_CLIENT_ENABLE) { |
3675 | if (sk->sk_state == TCP_CLOSE) |
3676 | tp->fastopen_connect = val; |
3677 | else |
3678 | err = -EINVAL; |
3679 | } else { |
3680 | err = -EOPNOTSUPP; |
3681 | } |
3682 | break; |
3683 | case TCP_FASTOPEN_NO_COOKIE: |
3684 | if (val > 1 || val < 0) |
3685 | err = -EINVAL; |
3686 | else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
3687 | err = -EINVAL; |
3688 | else |
3689 | tp->fastopen_no_cookie = val; |
3690 | break; |
3691 | case TCP_TIMESTAMP: |
3692 | if (!tp->repair) { |
3693 | err = -EPERM; |
3694 | break; |
3695 | } |
3696 | /* val is an opaque field, |
3697 | * and low order bit contains usec_ts enable bit. |
3698 | * Its a best effort, and we do not care if user makes an error. |
3699 | */ |
3700 | tp->tcp_usec_ts = val & 1; |
3701 | WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts)); |
3702 | break; |
3703 | case TCP_REPAIR_WINDOW: |
3704 | err = tcp_repair_set_window(tp, optbuf: optval, len: optlen); |
3705 | break; |
3706 | case TCP_NOTSENT_LOWAT: |
3707 | WRITE_ONCE(tp->notsent_lowat, val); |
3708 | sk->sk_write_space(sk); |
3709 | break; |
3710 | case TCP_INQ: |
3711 | if (val > 1 || val < 0) |
3712 | err = -EINVAL; |
3713 | else |
3714 | tp->recvmsg_inq = val; |
3715 | break; |
3716 | case TCP_TX_DELAY: |
3717 | if (val) |
3718 | tcp_enable_tx_delay(); |
3719 | WRITE_ONCE(tp->tcp_tx_delay, val); |
3720 | break; |
3721 | default: |
3722 | err = -ENOPROTOOPT; |
3723 | break; |
3724 | } |
3725 | |
3726 | sockopt_release_sock(sk); |
3727 | return err; |
3728 | } |
3729 | |
3730 | int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, |
3731 | unsigned int optlen) |
3732 | { |
3733 | const struct inet_connection_sock *icsk = inet_csk(sk); |
3734 | |
3735 | if (level != SOL_TCP) |
3736 | /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ |
3737 | return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, |
3738 | optval, optlen); |
3739 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
3740 | } |
3741 | EXPORT_SYMBOL(tcp_setsockopt); |
3742 | |
3743 | static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, |
3744 | struct tcp_info *info) |
3745 | { |
3746 | u64 stats[__TCP_CHRONO_MAX], total = 0; |
3747 | enum tcp_chrono i; |
3748 | |
3749 | for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { |
3750 | stats[i] = tp->chrono_stat[i - 1]; |
3751 | if (i == tp->chrono_type) |
3752 | stats[i] += tcp_jiffies32 - tp->chrono_start; |
3753 | stats[i] *= USEC_PER_SEC / HZ; |
3754 | total += stats[i]; |
3755 | } |
3756 | |
3757 | info->tcpi_busy_time = total; |
3758 | info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; |
3759 | info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; |
3760 | } |
3761 | |
3762 | /* Return information about state of tcp endpoint in API format. */ |
3763 | void tcp_get_info(struct sock *sk, struct tcp_info *info) |
3764 | { |
3765 | const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ |
3766 | const struct inet_connection_sock *icsk = inet_csk(sk); |
3767 | unsigned long rate; |
3768 | u32 now; |
3769 | u64 rate64; |
3770 | bool slow; |
3771 | |
3772 | memset(info, 0, sizeof(*info)); |
3773 | if (sk->sk_type != SOCK_STREAM) |
3774 | return; |
3775 | |
3776 | info->tcpi_state = inet_sk_state_load(sk); |
3777 | |
3778 | /* Report meaningful fields for all TCP states, including listeners */ |
3779 | rate = READ_ONCE(sk->sk_pacing_rate); |
3780 | rate64 = (rate != ~0UL) ? rate : ~0ULL; |
3781 | info->tcpi_pacing_rate = rate64; |
3782 | |
3783 | rate = READ_ONCE(sk->sk_max_pacing_rate); |
3784 | rate64 = (rate != ~0UL) ? rate : ~0ULL; |
3785 | info->tcpi_max_pacing_rate = rate64; |
3786 | |
3787 | info->tcpi_reordering = tp->reordering; |
3788 | info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); |
3789 | |
3790 | if (info->tcpi_state == TCP_LISTEN) { |
3791 | /* listeners aliased fields : |
3792 | * tcpi_unacked -> Number of children ready for accept() |
3793 | * tcpi_sacked -> max backlog |
3794 | */ |
3795 | info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); |
3796 | info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); |
3797 | return; |
3798 | } |
3799 | |
3800 | slow = lock_sock_fast(sk); |
3801 | |
3802 | info->tcpi_ca_state = icsk->icsk_ca_state; |
3803 | info->tcpi_retransmits = icsk->icsk_retransmits; |
3804 | info->tcpi_probes = icsk->icsk_probes_out; |
3805 | info->tcpi_backoff = icsk->icsk_backoff; |
3806 | |
3807 | if (tp->rx_opt.tstamp_ok) |
3808 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
3809 | if (tcp_is_sack(tp)) |
3810 | info->tcpi_options |= TCPI_OPT_SACK; |
3811 | if (tp->rx_opt.wscale_ok) { |
3812 | info->tcpi_options |= TCPI_OPT_WSCALE; |
3813 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; |
3814 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; |
3815 | } |
3816 | |
3817 | if (tp->ecn_flags & TCP_ECN_OK) |
3818 | info->tcpi_options |= TCPI_OPT_ECN; |
3819 | if (tp->ecn_flags & TCP_ECN_SEEN) |
3820 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; |
3821 | if (tp->syn_data_acked) |
3822 | info->tcpi_options |= TCPI_OPT_SYN_DATA; |
3823 | if (tp->tcp_usec_ts) |
3824 | info->tcpi_options |= TCPI_OPT_USEC_TS; |
3825 | |
3826 | info->tcpi_rto = jiffies_to_usecs(j: icsk->icsk_rto); |
3827 | info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato, |
3828 | tcp_delack_max(sk))); |
3829 | info->tcpi_snd_mss = tp->mss_cache; |
3830 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
3831 | |
3832 | info->tcpi_unacked = tp->packets_out; |
3833 | info->tcpi_sacked = tp->sacked_out; |
3834 | |
3835 | info->tcpi_lost = tp->lost_out; |
3836 | info->tcpi_retrans = tp->retrans_out; |
3837 | |
3838 | now = tcp_jiffies32; |
3839 | info->tcpi_last_data_sent = jiffies_to_msecs(j: now - tp->lsndtime); |
3840 | info->tcpi_last_data_recv = jiffies_to_msecs(j: now - icsk->icsk_ack.lrcvtime); |
3841 | info->tcpi_last_ack_recv = jiffies_to_msecs(j: now - tp->rcv_tstamp); |
3842 | |
3843 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; |
3844 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; |
3845 | info->tcpi_rtt = tp->srtt_us >> 3; |
3846 | info->tcpi_rttvar = tp->mdev_us >> 2; |
3847 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; |
3848 | info->tcpi_advmss = tp->advmss; |
3849 | |
3850 | info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; |
3851 | info->tcpi_rcv_space = tp->rcvq_space.space; |
3852 | |
3853 | info->tcpi_total_retrans = tp->total_retrans; |
3854 | |
3855 | info->tcpi_bytes_acked = tp->bytes_acked; |
3856 | info->tcpi_bytes_received = tp->bytes_received; |
3857 | info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); |
3858 | tcp_get_info_chrono_stats(tp, info); |
3859 | |
3860 | info->tcpi_segs_out = tp->segs_out; |
3861 | |
3862 | /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ |
3863 | info->tcpi_segs_in = READ_ONCE(tp->segs_in); |
3864 | info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); |
3865 | |
3866 | info->tcpi_min_rtt = tcp_min_rtt(tp); |
3867 | info->tcpi_data_segs_out = tp->data_segs_out; |
3868 | |
3869 | info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; |
3870 | rate64 = tcp_compute_delivery_rate(tp); |
3871 | if (rate64) |
3872 | info->tcpi_delivery_rate = rate64; |
3873 | info->tcpi_delivered = tp->delivered; |
3874 | info->tcpi_delivered_ce = tp->delivered_ce; |
3875 | info->tcpi_bytes_sent = tp->bytes_sent; |
3876 | info->tcpi_bytes_retrans = tp->bytes_retrans; |
3877 | info->tcpi_dsack_dups = tp->dsack_dups; |
3878 | info->tcpi_reord_seen = tp->reord_seen; |
3879 | info->tcpi_rcv_ooopack = tp->rcv_ooopack; |
3880 | info->tcpi_snd_wnd = tp->snd_wnd; |
3881 | info->tcpi_rcv_wnd = tp->rcv_wnd; |
3882 | info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; |
3883 | info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; |
3884 | |
3885 | info->tcpi_total_rto = tp->total_rto; |
3886 | info->tcpi_total_rto_recoveries = tp->total_rto_recoveries; |
3887 | info->tcpi_total_rto_time = tp->total_rto_time; |
3888 | if (tp->rto_stamp) |
3889 | info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp; |
3890 | |
3891 | unlock_sock_fast(sk, slow); |
3892 | } |
3893 | EXPORT_SYMBOL_GPL(tcp_get_info); |
3894 | |
3895 | static size_t tcp_opt_stats_get_size(void) |
3896 | { |
3897 | return |
3898 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_BUSY */ |
3899 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ |
3900 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ |
3901 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ |
3902 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ |
3903 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_PACING_RATE */ |
3904 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ |
3905 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_SND_CWND */ |
3906 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_REORDERING */ |
3907 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_MIN_RTT */ |
3908 | nla_total_size(payload: sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ |
3909 | nla_total_size(payload: sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ |
3910 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ |
3911 | nla_total_size(payload: sizeof(u8)) + /* TCP_NLA_CA_STATE */ |
3912 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ |
3913 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_DELIVERED */ |
3914 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ |
3915 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ |
3916 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ |
3917 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ |
3918 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ |
3919 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_SRTT */ |
3920 | nla_total_size(payload: sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ |
3921 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ |
3922 | nla_total_size_64bit(payload: sizeof(u64)) + /* TCP_NLA_EDT */ |
3923 | nla_total_size(payload: sizeof(u8)) + /* TCP_NLA_TTL */ |
3924 | nla_total_size(payload: sizeof(u32)) + /* TCP_NLA_REHASH */ |
3925 | 0; |
3926 | } |
3927 | |
3928 | /* Returns TTL or hop limit of an incoming packet from skb. */ |
3929 | static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) |
3930 | { |
3931 | if (skb->protocol == htons(ETH_P_IP)) |
3932 | return ip_hdr(skb)->ttl; |
3933 | else if (skb->protocol == htons(ETH_P_IPV6)) |
3934 | return ipv6_hdr(skb)->hop_limit; |
3935 | else |
3936 | return 0; |
3937 | } |
3938 | |
3939 | struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, |
3940 | const struct sk_buff *orig_skb, |
3941 | const struct sk_buff *ack_skb) |
3942 | { |
3943 | const struct tcp_sock *tp = tcp_sk(sk); |
3944 | struct sk_buff *stats; |
3945 | struct tcp_info info; |
3946 | unsigned long rate; |
3947 | u64 rate64; |
3948 | |
3949 | stats = alloc_skb(size: tcp_opt_stats_get_size(), GFP_ATOMIC); |
3950 | if (!stats) |
3951 | return NULL; |
3952 | |
3953 | tcp_get_info_chrono_stats(tp, info: &info); |
3954 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_BUSY, |
3955 | value: info.tcpi_busy_time, padattr: TCP_NLA_PAD); |
3956 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_RWND_LIMITED, |
3957 | value: info.tcpi_rwnd_limited, padattr: TCP_NLA_PAD); |
3958 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_SNDBUF_LIMITED, |
3959 | value: info.tcpi_sndbuf_limited, padattr: TCP_NLA_PAD); |
3960 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_DATA_SEGS_OUT, |
3961 | value: tp->data_segs_out, padattr: TCP_NLA_PAD); |
3962 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_TOTAL_RETRANS, |
3963 | value: tp->total_retrans, padattr: TCP_NLA_PAD); |
3964 | |
3965 | rate = READ_ONCE(sk->sk_pacing_rate); |
3966 | rate64 = (rate != ~0UL) ? rate : ~0ULL; |
3967 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_PACING_RATE, value: rate64, padattr: TCP_NLA_PAD); |
3968 | |
3969 | rate64 = tcp_compute_delivery_rate(tp); |
3970 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_DELIVERY_RATE, value: rate64, padattr: TCP_NLA_PAD); |
3971 | |
3972 | nla_put_u32(skb: stats, attrtype: TCP_NLA_SND_CWND, value: tcp_snd_cwnd(tp)); |
3973 | nla_put_u32(skb: stats, attrtype: TCP_NLA_REORDERING, value: tp->reordering); |
3974 | nla_put_u32(skb: stats, attrtype: TCP_NLA_MIN_RTT, value: tcp_min_rtt(tp)); |
3975 | |
3976 | nla_put_u8(skb: stats, attrtype: TCP_NLA_RECUR_RETRANS, value: inet_csk(sk)->icsk_retransmits); |
3977 | nla_put_u8(skb: stats, attrtype: TCP_NLA_DELIVERY_RATE_APP_LMT, value: !!tp->rate_app_limited); |
3978 | nla_put_u32(skb: stats, attrtype: TCP_NLA_SND_SSTHRESH, value: tp->snd_ssthresh); |
3979 | nla_put_u32(skb: stats, attrtype: TCP_NLA_DELIVERED, value: tp->delivered); |
3980 | nla_put_u32(skb: stats, attrtype: TCP_NLA_DELIVERED_CE, value: tp->delivered_ce); |
3981 | |
3982 | nla_put_u32(skb: stats, attrtype: TCP_NLA_SNDQ_SIZE, value: tp->write_seq - tp->snd_una); |
3983 | nla_put_u8(skb: stats, attrtype: TCP_NLA_CA_STATE, value: inet_csk(sk)->icsk_ca_state); |
3984 | |
3985 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_BYTES_SENT, value: tp->bytes_sent, |
3986 | padattr: TCP_NLA_PAD); |
3987 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_BYTES_RETRANS, value: tp->bytes_retrans, |
3988 | padattr: TCP_NLA_PAD); |
3989 | nla_put_u32(skb: stats, attrtype: TCP_NLA_DSACK_DUPS, value: tp->dsack_dups); |
3990 | nla_put_u32(skb: stats, attrtype: TCP_NLA_REORD_SEEN, value: tp->reord_seen); |
3991 | nla_put_u32(skb: stats, attrtype: TCP_NLA_SRTT, value: tp->srtt_us >> 3); |
3992 | nla_put_u16(skb: stats, attrtype: TCP_NLA_TIMEOUT_REHASH, value: tp->timeout_rehash); |
3993 | nla_put_u32(skb: stats, attrtype: TCP_NLA_BYTES_NOTSENT, |
3994 | max_t(int, 0, tp->write_seq - tp->snd_nxt)); |
3995 | nla_put_u64_64bit(skb: stats, attrtype: TCP_NLA_EDT, value: orig_skb->skb_mstamp_ns, |
3996 | padattr: TCP_NLA_PAD); |
3997 | if (ack_skb) |
3998 | nla_put_u8(skb: stats, attrtype: TCP_NLA_TTL, |
3999 | value: tcp_skb_ttl_or_hop_limit(skb: ack_skb)); |
4000 | |
4001 | nla_put_u32(skb: stats, attrtype: TCP_NLA_REHASH, value: tp->plb_rehash + tp->timeout_rehash); |
4002 | return stats; |
4003 | } |
4004 | |
4005 | int do_tcp_getsockopt(struct sock *sk, int level, |
4006 | int optname, sockptr_t optval, sockptr_t optlen) |
4007 | { |
4008 | struct inet_connection_sock *icsk = inet_csk(sk); |
4009 | struct tcp_sock *tp = tcp_sk(sk); |
4010 | struct net *net = sock_net(sk); |
4011 | int val, len; |
4012 | |
4013 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4014 | return -EFAULT; |
4015 | |
4016 | if (len < 0) |
4017 | return -EINVAL; |
4018 | |
4019 | len = min_t(unsigned int, len, sizeof(int)); |
4020 | |
4021 | switch (optname) { |
4022 | case TCP_MAXSEG: |
4023 | val = tp->mss_cache; |
4024 | if (tp->rx_opt.user_mss && |
4025 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
4026 | val = tp->rx_opt.user_mss; |
4027 | if (tp->repair) |
4028 | val = tp->rx_opt.mss_clamp; |
4029 | break; |
4030 | case TCP_NODELAY: |
4031 | val = !!(tp->nonagle&TCP_NAGLE_OFF); |
4032 | break; |
4033 | case TCP_CORK: |
4034 | val = !!(tp->nonagle&TCP_NAGLE_CORK); |
4035 | break; |
4036 | case TCP_KEEPIDLE: |
4037 | val = keepalive_time_when(tp) / HZ; |
4038 | break; |
4039 | case TCP_KEEPINTVL: |
4040 | val = keepalive_intvl_when(tp) / HZ; |
4041 | break; |
4042 | case TCP_KEEPCNT: |
4043 | val = keepalive_probes(tp); |
4044 | break; |
4045 | case TCP_SYNCNT: |
4046 | val = READ_ONCE(icsk->icsk_syn_retries) ? : |
4047 | READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); |
4048 | break; |
4049 | case TCP_LINGER2: |
4050 | val = READ_ONCE(tp->linger2); |
4051 | if (val >= 0) |
4052 | val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; |
4053 | break; |
4054 | case TCP_DEFER_ACCEPT: |
4055 | val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); |
4056 | val = retrans_to_secs(retrans: val, TCP_TIMEOUT_INIT / HZ, |
4057 | TCP_RTO_MAX / HZ); |
4058 | break; |
4059 | case TCP_WINDOW_CLAMP: |
4060 | val = tp->window_clamp; |
4061 | break; |
4062 | case TCP_INFO: { |
4063 | struct tcp_info info; |
4064 | |
4065 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4066 | return -EFAULT; |
4067 | |
4068 | tcp_get_info(sk, &info); |
4069 | |
4070 | len = min_t(unsigned int, len, sizeof(info)); |
4071 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4072 | return -EFAULT; |
4073 | if (copy_to_sockptr(dst: optval, src: &info, size: len)) |
4074 | return -EFAULT; |
4075 | return 0; |
4076 | } |
4077 | case TCP_CC_INFO: { |
4078 | const struct tcp_congestion_ops *ca_ops; |
4079 | union tcp_cc_info info; |
4080 | size_t sz = 0; |
4081 | int attr; |
4082 | |
4083 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4084 | return -EFAULT; |
4085 | |
4086 | ca_ops = icsk->icsk_ca_ops; |
4087 | if (ca_ops && ca_ops->get_info) |
4088 | sz = ca_ops->get_info(sk, ~0U, &attr, &info); |
4089 | |
4090 | len = min_t(unsigned int, len, sz); |
4091 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4092 | return -EFAULT; |
4093 | if (copy_to_sockptr(dst: optval, src: &info, size: len)) |
4094 | return -EFAULT; |
4095 | return 0; |
4096 | } |
4097 | case TCP_QUICKACK: |
4098 | val = !inet_csk_in_pingpong_mode(sk); |
4099 | break; |
4100 | |
4101 | case TCP_CONGESTION: |
4102 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4103 | return -EFAULT; |
4104 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); |
4105 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4106 | return -EFAULT; |
4107 | if (copy_to_sockptr(dst: optval, src: icsk->icsk_ca_ops->name, size: len)) |
4108 | return -EFAULT; |
4109 | return 0; |
4110 | |
4111 | case TCP_ULP: |
4112 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4113 | return -EFAULT; |
4114 | len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); |
4115 | if (!icsk->icsk_ulp_ops) { |
4116 | len = 0; |
4117 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4118 | return -EFAULT; |
4119 | return 0; |
4120 | } |
4121 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4122 | return -EFAULT; |
4123 | if (copy_to_sockptr(dst: optval, src: icsk->icsk_ulp_ops->name, size: len)) |
4124 | return -EFAULT; |
4125 | return 0; |
4126 | |
4127 | case TCP_FASTOPEN_KEY: { |
4128 | u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; |
4129 | unsigned int key_len; |
4130 | |
4131 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4132 | return -EFAULT; |
4133 | |
4134 | key_len = tcp_fastopen_get_cipher(net, icsk, key) * |
4135 | TCP_FASTOPEN_KEY_LENGTH; |
4136 | len = min_t(unsigned int, len, key_len); |
4137 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4138 | return -EFAULT; |
4139 | if (copy_to_sockptr(dst: optval, src: key, size: len)) |
4140 | return -EFAULT; |
4141 | return 0; |
4142 | } |
4143 | case TCP_THIN_LINEAR_TIMEOUTS: |
4144 | val = tp->thin_lto; |
4145 | break; |
4146 | |
4147 | case TCP_THIN_DUPACK: |
4148 | val = 0; |
4149 | break; |
4150 | |
4151 | case TCP_REPAIR: |
4152 | val = tp->repair; |
4153 | break; |
4154 | |
4155 | case TCP_REPAIR_QUEUE: |
4156 | if (tp->repair) |
4157 | val = tp->repair_queue; |
4158 | else |
4159 | return -EINVAL; |
4160 | break; |
4161 | |
4162 | case TCP_REPAIR_WINDOW: { |
4163 | struct tcp_repair_window opt; |
4164 | |
4165 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4166 | return -EFAULT; |
4167 | |
4168 | if (len != sizeof(opt)) |
4169 | return -EINVAL; |
4170 | |
4171 | if (!tp->repair) |
4172 | return -EPERM; |
4173 | |
4174 | opt.snd_wl1 = tp->snd_wl1; |
4175 | opt.snd_wnd = tp->snd_wnd; |
4176 | opt.max_window = tp->max_window; |
4177 | opt.rcv_wnd = tp->rcv_wnd; |
4178 | opt.rcv_wup = tp->rcv_wup; |
4179 | |
4180 | if (copy_to_sockptr(dst: optval, src: &opt, size: len)) |
4181 | return -EFAULT; |
4182 | return 0; |
4183 | } |
4184 | case TCP_QUEUE_SEQ: |
4185 | if (tp->repair_queue == TCP_SEND_QUEUE) |
4186 | val = tp->write_seq; |
4187 | else if (tp->repair_queue == TCP_RECV_QUEUE) |
4188 | val = tp->rcv_nxt; |
4189 | else |
4190 | return -EINVAL; |
4191 | break; |
4192 | |
4193 | case TCP_USER_TIMEOUT: |
4194 | val = READ_ONCE(icsk->icsk_user_timeout); |
4195 | break; |
4196 | |
4197 | case TCP_FASTOPEN: |
4198 | val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); |
4199 | break; |
4200 | |
4201 | case TCP_FASTOPEN_CONNECT: |
4202 | val = tp->fastopen_connect; |
4203 | break; |
4204 | |
4205 | case TCP_FASTOPEN_NO_COOKIE: |
4206 | val = tp->fastopen_no_cookie; |
4207 | break; |
4208 | |
4209 | case TCP_TX_DELAY: |
4210 | val = READ_ONCE(tp->tcp_tx_delay); |
4211 | break; |
4212 | |
4213 | case TCP_TIMESTAMP: |
4214 | val = tcp_clock_ts(usec_ts: tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset); |
4215 | if (tp->tcp_usec_ts) |
4216 | val |= 1; |
4217 | else |
4218 | val &= ~1; |
4219 | break; |
4220 | case TCP_NOTSENT_LOWAT: |
4221 | val = READ_ONCE(tp->notsent_lowat); |
4222 | break; |
4223 | case TCP_INQ: |
4224 | val = tp->recvmsg_inq; |
4225 | break; |
4226 | case TCP_SAVE_SYN: |
4227 | val = tp->save_syn; |
4228 | break; |
4229 | case TCP_SAVED_SYN: { |
4230 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4231 | return -EFAULT; |
4232 | |
4233 | sockopt_lock_sock(sk); |
4234 | if (tp->saved_syn) { |
4235 | if (len < tcp_saved_syn_len(saved_syn: tp->saved_syn)) { |
4236 | len = tcp_saved_syn_len(saved_syn: tp->saved_syn); |
4237 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) { |
4238 | sockopt_release_sock(sk); |
4239 | return -EFAULT; |
4240 | } |
4241 | sockopt_release_sock(sk); |
4242 | return -EINVAL; |
4243 | } |
4244 | len = tcp_saved_syn_len(saved_syn: tp->saved_syn); |
4245 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) { |
4246 | sockopt_release_sock(sk); |
4247 | return -EFAULT; |
4248 | } |
4249 | if (copy_to_sockptr(dst: optval, src: tp->saved_syn->data, size: len)) { |
4250 | sockopt_release_sock(sk); |
4251 | return -EFAULT; |
4252 | } |
4253 | tcp_saved_syn_free(tp); |
4254 | sockopt_release_sock(sk); |
4255 | } else { |
4256 | sockopt_release_sock(sk); |
4257 | len = 0; |
4258 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4259 | return -EFAULT; |
4260 | } |
4261 | return 0; |
4262 | } |
4263 | #ifdef CONFIG_MMU |
4264 | case TCP_ZEROCOPY_RECEIVE: { |
4265 | struct scm_timestamping_internal tss; |
4266 | struct tcp_zerocopy_receive zc = {}; |
4267 | int err; |
4268 | |
4269 | if (copy_from_sockptr(dst: &len, src: optlen, size: sizeof(int))) |
4270 | return -EFAULT; |
4271 | if (len < 0 || |
4272 | len < offsetofend(struct tcp_zerocopy_receive, length)) |
4273 | return -EINVAL; |
4274 | if (unlikely(len > sizeof(zc))) { |
4275 | err = check_zeroed_sockptr(src: optval, offset: sizeof(zc), |
4276 | size: len - sizeof(zc)); |
4277 | if (err < 1) |
4278 | return err == 0 ? -EINVAL : err; |
4279 | len = sizeof(zc); |
4280 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4281 | return -EFAULT; |
4282 | } |
4283 | if (copy_from_sockptr(dst: &zc, src: optval, size: len)) |
4284 | return -EFAULT; |
4285 | if (zc.reserved) |
4286 | return -EINVAL; |
4287 | if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) |
4288 | return -EINVAL; |
4289 | sockopt_lock_sock(sk); |
4290 | err = tcp_zerocopy_receive(sk, zc: &zc, tss: &tss); |
4291 | err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, |
4292 | &zc, &len, err); |
4293 | sockopt_release_sock(sk); |
4294 | if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) |
4295 | goto zerocopy_rcv_cmsg; |
4296 | switch (len) { |
4297 | case offsetofend(struct tcp_zerocopy_receive, msg_flags): |
4298 | goto zerocopy_rcv_cmsg; |
4299 | case offsetofend(struct tcp_zerocopy_receive, msg_controllen): |
4300 | case offsetofend(struct tcp_zerocopy_receive, msg_control): |
4301 | case offsetofend(struct tcp_zerocopy_receive, flags): |
4302 | case offsetofend(struct tcp_zerocopy_receive, copybuf_len): |
4303 | case offsetofend(struct tcp_zerocopy_receive, copybuf_address): |
4304 | case offsetofend(struct tcp_zerocopy_receive, err): |
4305 | goto zerocopy_rcv_sk_err; |
4306 | case offsetofend(struct tcp_zerocopy_receive, inq): |
4307 | goto zerocopy_rcv_inq; |
4308 | case offsetofend(struct tcp_zerocopy_receive, length): |
4309 | default: |
4310 | goto zerocopy_rcv_out; |
4311 | } |
4312 | zerocopy_rcv_cmsg: |
4313 | if (zc.msg_flags & TCP_CMSG_TS) |
4314 | tcp_zc_finalize_rx_tstamp(sk, zc: &zc, tss: &tss); |
4315 | else |
4316 | zc.msg_flags = 0; |
4317 | zerocopy_rcv_sk_err: |
4318 | if (!err) |
4319 | zc.err = sock_error(sk); |
4320 | zerocopy_rcv_inq: |
4321 | zc.inq = tcp_inq_hint(sk); |
4322 | zerocopy_rcv_out: |
4323 | if (!err && copy_to_sockptr(dst: optval, src: &zc, size: len)) |
4324 | err = -EFAULT; |
4325 | return err; |
4326 | } |
4327 | #endif |
4328 | case TCP_AO_REPAIR: |
4329 | if (!tcp_can_repair_sock(sk)) |
4330 | return -EPERM; |
4331 | return tcp_ao_get_repair(sk, optval, optlen); |
4332 | case TCP_AO_GET_KEYS: |
4333 | case TCP_AO_INFO: { |
4334 | int err; |
4335 | |
4336 | sockopt_lock_sock(sk); |
4337 | if (optname == TCP_AO_GET_KEYS) |
4338 | err = tcp_ao_get_mkts(sk, optval, optlen); |
4339 | else |
4340 | err = tcp_ao_get_sock_info(sk, optval, optlen); |
4341 | sockopt_release_sock(sk); |
4342 | |
4343 | return err; |
4344 | } |
4345 | default: |
4346 | return -ENOPROTOOPT; |
4347 | } |
4348 | |
4349 | if (copy_to_sockptr(dst: optlen, src: &len, size: sizeof(int))) |
4350 | return -EFAULT; |
4351 | if (copy_to_sockptr(dst: optval, src: &val, size: len)) |
4352 | return -EFAULT; |
4353 | return 0; |
4354 | } |
4355 | |
4356 | bool tcp_bpf_bypass_getsockopt(int level, int optname) |
4357 | { |
4358 | /* TCP do_tcp_getsockopt has optimized getsockopt implementation |
4359 | * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. |
4360 | */ |
4361 | if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) |
4362 | return true; |
4363 | |
4364 | return false; |
4365 | } |
4366 | EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); |
4367 | |
4368 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, |
4369 | int __user *optlen) |
4370 | { |
4371 | struct inet_connection_sock *icsk = inet_csk(sk); |
4372 | |
4373 | if (level != SOL_TCP) |
4374 | /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ |
4375 | return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, |
4376 | optval, optlen); |
4377 | return do_tcp_getsockopt(sk, level, optname, optval: USER_SOCKPTR(p: optval), |
4378 | optlen: USER_SOCKPTR(p: optlen)); |
4379 | } |
4380 | EXPORT_SYMBOL(tcp_getsockopt); |
4381 | |
4382 | #ifdef CONFIG_TCP_MD5SIG |
4383 | int tcp_md5_sigpool_id = -1; |
4384 | EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id); |
4385 | |
4386 | int tcp_md5_alloc_sigpool(void) |
4387 | { |
4388 | size_t scratch_size; |
4389 | int ret; |
4390 | |
4391 | scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr); |
4392 | ret = tcp_sigpool_alloc_ahash(alg: "md5" , scratch_size); |
4393 | if (ret >= 0) { |
4394 | /* As long as any md5 sigpool was allocated, the return |
4395 | * id would stay the same. Re-write the id only for the case |
4396 | * when previously all MD5 keys were deleted and this call |
4397 | * allocates the first MD5 key, which may return a different |
4398 | * sigpool id than was used previously. |
4399 | */ |
4400 | WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */ |
4401 | return 0; |
4402 | } |
4403 | return ret; |
4404 | } |
4405 | |
4406 | void tcp_md5_release_sigpool(void) |
4407 | { |
4408 | tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id)); |
4409 | } |
4410 | |
4411 | void tcp_md5_add_sigpool(void) |
4412 | { |
4413 | tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id)); |
4414 | } |
4415 | |
4416 | int tcp_md5_hash_key(struct tcp_sigpool *hp, |
4417 | const struct tcp_md5sig_key *key) |
4418 | { |
4419 | u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ |
4420 | struct scatterlist sg; |
4421 | |
4422 | sg_init_one(&sg, key->key, keylen); |
4423 | ahash_request_set_crypt(req: hp->req, src: &sg, NULL, nbytes: keylen); |
4424 | |
4425 | /* We use data_race() because tcp_md5_do_add() might change |
4426 | * key->key under us |
4427 | */ |
4428 | return data_race(crypto_ahash_update(hp->req)); |
4429 | } |
4430 | EXPORT_SYMBOL(tcp_md5_hash_key); |
4431 | |
4432 | /* Called with rcu_read_lock() */ |
4433 | enum skb_drop_reason |
4434 | tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, |
4435 | const void *saddr, const void *daddr, |
4436 | int family, int l3index, const __u8 *hash_location) |
4437 | { |
4438 | /* This gets called for each TCP segment that has TCP-MD5 option. |
4439 | * We have 3 drop cases: |
4440 | * o No MD5 hash and one expected. |
4441 | * o MD5 hash and we're not expecting one. |
4442 | * o MD5 hash and its wrong. |
4443 | */ |
4444 | const struct tcp_sock *tp = tcp_sk(sk); |
4445 | struct tcp_md5sig_key *key; |
4446 | u8 newhash[16]; |
4447 | int genhash; |
4448 | |
4449 | key = tcp_md5_do_lookup(sk, l3index, addr: saddr, family); |
4450 | |
4451 | if (!key && hash_location) { |
4452 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); |
4453 | tcp_hash_fail("Unexpected MD5 Hash found" , family, skb, "" ); |
4454 | return SKB_DROP_REASON_TCP_MD5UNEXPECTED; |
4455 | } |
4456 | |
4457 | /* Check the signature. |
4458 | * To support dual stack listeners, we need to handle |
4459 | * IPv4-mapped case. |
4460 | */ |
4461 | if (family == AF_INET) |
4462 | genhash = tcp_v4_md5_hash_skb(md5_hash: newhash, key, NULL, skb); |
4463 | else |
4464 | genhash = tp->af_specific->calc_md5_hash(newhash, key, |
4465 | NULL, skb); |
4466 | if (genhash || memcmp(p: hash_location, q: newhash, size: 16) != 0) { |
4467 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); |
4468 | if (family == AF_INET) { |
4469 | tcp_hash_fail("MD5 Hash failed" , AF_INET, skb, "%s L3 index %d" , |
4470 | genhash ? "tcp_v4_calc_md5_hash failed" |
4471 | : "" , l3index); |
4472 | } else { |
4473 | if (genhash) { |
4474 | tcp_hash_fail("MD5 Hash failed" , |
4475 | AF_INET6, skb, "L3 index %d" , |
4476 | l3index); |
4477 | } else { |
4478 | tcp_hash_fail("MD5 Hash mismatch" , |
4479 | AF_INET6, skb, "L3 index %d" , |
4480 | l3index); |
4481 | } |
4482 | } |
4483 | return SKB_DROP_REASON_TCP_MD5FAILURE; |
4484 | } |
4485 | return SKB_NOT_DROPPED_YET; |
4486 | } |
4487 | EXPORT_SYMBOL(tcp_inbound_md5_hash); |
4488 | |
4489 | #endif |
4490 | |
4491 | void tcp_done(struct sock *sk) |
4492 | { |
4493 | struct request_sock *req; |
4494 | |
4495 | /* We might be called with a new socket, after |
4496 | * inet_csk_prepare_forced_close() has been called |
4497 | * so we can not use lockdep_sock_is_held(sk) |
4498 | */ |
4499 | req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); |
4500 | |
4501 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
4502 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
4503 | |
4504 | tcp_set_state(sk, TCP_CLOSE); |
4505 | tcp_clear_xmit_timers(sk); |
4506 | if (req) |
4507 | reqsk_fastopen_remove(sk, req, reset: false); |
4508 | |
4509 | WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); |
4510 | |
4511 | if (!sock_flag(sk, flag: SOCK_DEAD)) |
4512 | sk->sk_state_change(sk); |
4513 | else |
4514 | inet_csk_destroy_sock(sk); |
4515 | } |
4516 | EXPORT_SYMBOL_GPL(tcp_done); |
4517 | |
4518 | int tcp_abort(struct sock *sk, int err) |
4519 | { |
4520 | int state = inet_sk_state_load(sk); |
4521 | |
4522 | if (state == TCP_NEW_SYN_RECV) { |
4523 | struct request_sock *req = inet_reqsk(sk); |
4524 | |
4525 | local_bh_disable(); |
4526 | inet_csk_reqsk_queue_drop(sk: req->rsk_listener, req); |
4527 | local_bh_enable(); |
4528 | return 0; |
4529 | } |
4530 | if (state == TCP_TIME_WAIT) { |
4531 | struct inet_timewait_sock *tw = inet_twsk(sk); |
4532 | |
4533 | refcount_inc(r: &tw->tw_refcnt); |
4534 | local_bh_disable(); |
4535 | inet_twsk_deschedule_put(tw); |
4536 | local_bh_enable(); |
4537 | return 0; |
4538 | } |
4539 | |
4540 | /* BPF context ensures sock locking. */ |
4541 | if (!has_current_bpf_ctx()) |
4542 | /* Don't race with userspace socket closes such as tcp_close. */ |
4543 | lock_sock(sk); |
4544 | |
4545 | if (sk->sk_state == TCP_LISTEN) { |
4546 | tcp_set_state(sk, TCP_CLOSE); |
4547 | inet_csk_listen_stop(sk); |
4548 | } |
4549 | |
4550 | /* Don't race with BH socket closes such as inet_csk_listen_stop. */ |
4551 | local_bh_disable(); |
4552 | bh_lock_sock(sk); |
4553 | |
4554 | if (!sock_flag(sk, flag: SOCK_DEAD)) { |
4555 | WRITE_ONCE(sk->sk_err, err); |
4556 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ |
4557 | smp_wmb(); |
4558 | sk_error_report(sk); |
4559 | if (tcp_need_reset(state: sk->sk_state)) |
4560 | tcp_send_active_reset(sk, GFP_ATOMIC); |
4561 | tcp_done(sk); |
4562 | } |
4563 | |
4564 | bh_unlock_sock(sk); |
4565 | local_bh_enable(); |
4566 | tcp_write_queue_purge(sk); |
4567 | if (!has_current_bpf_ctx()) |
4568 | release_sock(sk); |
4569 | return 0; |
4570 | } |
4571 | EXPORT_SYMBOL_GPL(tcp_abort); |
4572 | |
4573 | extern struct tcp_congestion_ops tcp_reno; |
4574 | |
4575 | static __initdata unsigned long thash_entries; |
4576 | static int __init set_thash_entries(char *str) |
4577 | { |
4578 | ssize_t ret; |
4579 | |
4580 | if (!str) |
4581 | return 0; |
4582 | |
4583 | ret = kstrtoul(s: str, base: 0, res: &thash_entries); |
4584 | if (ret) |
4585 | return 0; |
4586 | |
4587 | return 1; |
4588 | } |
4589 | __setup("thash_entries=" , set_thash_entries); |
4590 | |
4591 | static void __init tcp_init_mem(void) |
4592 | { |
4593 | unsigned long limit = nr_free_buffer_pages() / 16; |
4594 | |
4595 | limit = max(limit, 128UL); |
4596 | sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ |
4597 | sysctl_tcp_mem[1] = limit; /* 6.25 % */ |
4598 | sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ |
4599 | } |
4600 | |
4601 | static void __init tcp_struct_check(void) |
4602 | { |
4603 | /* TX read-mostly hotpath cache lines */ |
4604 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window); |
4605 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh); |
4606 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering); |
4607 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat); |
4608 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs); |
4609 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint); |
4610 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint); |
4611 | CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40); |
4612 | |
4613 | /* TXRX read-mostly hotpath cache lines */ |
4614 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset); |
4615 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd); |
4616 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache); |
4617 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd); |
4618 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out); |
4619 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out); |
4620 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out); |
4621 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio); |
4622 | CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32); |
4623 | |
4624 | /* RX read-mostly hotpath cache lines */ |
4625 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq); |
4626 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp); |
4627 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1); |
4628 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq); |
4629 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us); |
4630 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out); |
4631 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss); |
4632 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data); |
4633 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost); |
4634 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min); |
4635 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue); |
4636 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh); |
4637 | CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69); |
4638 | |
4639 | /* TX read-write hotpath cache lines */ |
4640 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out); |
4641 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out); |
4642 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent); |
4643 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml); |
4644 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start); |
4645 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat); |
4646 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq); |
4647 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq); |
4648 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); |
4649 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); |
4650 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); |
4651 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_clock_cache); |
4652 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_mstamp); |
4653 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); |
4654 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); |
4655 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); |
4656 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); |
4657 | CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105); |
4658 | |
4659 | /* TXRX read-write hotpath cache lines */ |
4660 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); |
4661 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); |
4662 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); |
4663 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); |
4664 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp); |
4665 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us); |
4666 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out); |
4667 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up); |
4668 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered); |
4669 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce); |
4670 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); |
4671 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); |
4672 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); |
4673 | CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 76); |
4674 | |
4675 | /* RX read-write hotpath cache lines */ |
4676 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); |
4677 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in); |
4678 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in); |
4679 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup); |
4680 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out); |
4681 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq); |
4682 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered); |
4683 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us); |
4684 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr); |
4685 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp); |
4686 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp); |
4687 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked); |
4688 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est); |
4689 | CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space); |
4690 | CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99); |
4691 | } |
4692 | |
4693 | void __init tcp_init(void) |
4694 | { |
4695 | int max_rshare, max_wshare, cnt; |
4696 | unsigned long limit; |
4697 | unsigned int i; |
4698 | |
4699 | BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); |
4700 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > |
4701 | sizeof_field(struct sk_buff, cb)); |
4702 | |
4703 | tcp_struct_check(); |
4704 | |
4705 | percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); |
4706 | |
4707 | timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); |
4708 | mod_timer(timer: &tcp_orphan_timer, expires: jiffies + TCP_ORPHAN_TIMER_PERIOD); |
4709 | |
4710 | inet_hashinfo2_init(h: &tcp_hashinfo, name: "tcp_listen_portaddr_hash" , |
4711 | numentries: thash_entries, scale: 21, /* one slot per 2 MB*/ |
4712 | low_limit: 0, high_limit: 64 * 1024); |
4713 | tcp_hashinfo.bind_bucket_cachep = |
4714 | kmem_cache_create(name: "tcp_bind_bucket" , |
4715 | size: sizeof(struct inet_bind_bucket), align: 0, |
4716 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | |
4717 | SLAB_ACCOUNT, |
4718 | NULL); |
4719 | tcp_hashinfo.bind2_bucket_cachep = |
4720 | kmem_cache_create(name: "tcp_bind2_bucket" , |
4721 | size: sizeof(struct inet_bind2_bucket), align: 0, |
4722 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | |
4723 | SLAB_ACCOUNT, |
4724 | NULL); |
4725 | |
4726 | /* Size and allocate the main established and bind bucket |
4727 | * hash tables. |
4728 | * |
4729 | * The methodology is similar to that of the buffer cache. |
4730 | */ |
4731 | tcp_hashinfo.ehash = |
4732 | alloc_large_system_hash(tablename: "TCP established" , |
4733 | bucketsize: sizeof(struct inet_ehash_bucket), |
4734 | numentries: thash_entries, |
4735 | scale: 17, /* one slot per 128 KB of memory */ |
4736 | flags: 0, |
4737 | NULL, |
4738 | hash_mask: &tcp_hashinfo.ehash_mask, |
4739 | low_limit: 0, |
4740 | high_limit: thash_entries ? 0 : 512 * 1024); |
4741 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) |
4742 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
4743 | |
4744 | if (inet_ehash_locks_alloc(hashinfo: &tcp_hashinfo)) |
4745 | panic(fmt: "TCP: failed to alloc ehash_locks" ); |
4746 | tcp_hashinfo.bhash = |
4747 | alloc_large_system_hash(tablename: "TCP bind" , |
4748 | bucketsize: 2 * sizeof(struct inet_bind_hashbucket), |
4749 | numentries: tcp_hashinfo.ehash_mask + 1, |
4750 | scale: 17, /* one slot per 128 KB of memory */ |
4751 | flags: 0, |
4752 | hash_shift: &tcp_hashinfo.bhash_size, |
4753 | NULL, |
4754 | low_limit: 0, |
4755 | high_limit: 64 * 1024); |
4756 | tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; |
4757 | tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; |
4758 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { |
4759 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); |
4760 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); |
4761 | spin_lock_init(&tcp_hashinfo.bhash2[i].lock); |
4762 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); |
4763 | } |
4764 | |
4765 | tcp_hashinfo.pernet = false; |
4766 | |
4767 | cnt = tcp_hashinfo.ehash_mask + 1; |
4768 | sysctl_tcp_max_orphans = cnt / 2; |
4769 | |
4770 | tcp_init_mem(); |
4771 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ |
4772 | limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); |
4773 | max_wshare = min(4UL*1024*1024, limit); |
4774 | max_rshare = min(6UL*1024*1024, limit); |
4775 | |
4776 | init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; |
4777 | init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; |
4778 | init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); |
4779 | |
4780 | init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; |
4781 | init_net.ipv4.sysctl_tcp_rmem[1] = 131072; |
4782 | init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); |
4783 | |
4784 | pr_info("Hash tables configured (established %u bind %u)\n" , |
4785 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
4786 | |
4787 | tcp_v4_init(); |
4788 | tcp_metrics_init(); |
4789 | BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); |
4790 | tcp_tasklet_init(); |
4791 | mptcp_init(); |
4792 | } |
4793 | |