1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Definitions for the TCP protocol. |
8 | * |
9 | * Version: @(#)tcp.h 1.0.2 04/28/93 |
10 | * |
11 | * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | */ |
13 | #ifndef _LINUX_TCP_H |
14 | #define _LINUX_TCP_H |
15 | |
16 | |
17 | #include <linux/skbuff.h> |
18 | #include <linux/win_minmax.h> |
19 | #include <net/sock.h> |
20 | #include <net/inet_connection_sock.h> |
21 | #include <net/inet_timewait_sock.h> |
22 | #include <uapi/linux/tcp.h> |
23 | |
24 | static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) |
25 | { |
26 | return (struct tcphdr *)skb_transport_header(skb); |
27 | } |
28 | |
29 | static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) |
30 | { |
31 | return th->doff * 4; |
32 | } |
33 | |
34 | static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) |
35 | { |
36 | return __tcp_hdrlen(tcp_hdr(skb)); |
37 | } |
38 | |
39 | static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) |
40 | { |
41 | return (struct tcphdr *)skb_inner_transport_header(skb); |
42 | } |
43 | |
44 | static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb) |
45 | { |
46 | return inner_tcp_hdr(skb)->doff * 4; |
47 | } |
48 | |
49 | /** |
50 | * skb_tcp_all_headers - Returns size of all headers for a TCP packet |
51 | * @skb: buffer |
52 | * |
53 | * Used in TX path, for a packet known to be a TCP one. |
54 | * |
55 | * if (skb_is_gso(skb)) { |
56 | * int hlen = skb_tcp_all_headers(skb); |
57 | * ... |
58 | */ |
59 | static inline int (const struct sk_buff *skb) |
60 | { |
61 | return skb_transport_offset(skb) + tcp_hdrlen(skb); |
62 | } |
63 | |
64 | /** |
65 | * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet |
66 | * @skb: buffer |
67 | * |
68 | * Used in TX path, for a packet known to be a TCP one. |
69 | * |
70 | * if (skb_is_gso(skb) && skb->encapsulation) { |
71 | * int hlen = skb_inner_tcp_all_headers(skb); |
72 | * ... |
73 | */ |
74 | static inline int (const struct sk_buff *skb) |
75 | { |
76 | return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); |
77 | } |
78 | |
79 | static inline unsigned int tcp_optlen(const struct sk_buff *skb) |
80 | { |
81 | return (tcp_hdr(skb)->doff - 5) * 4; |
82 | } |
83 | |
84 | /* TCP Fast Open */ |
85 | #define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ |
86 | #define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ |
87 | #define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */ |
88 | |
89 | /* TCP Fast Open Cookie as stored in memory */ |
90 | struct tcp_fastopen_cookie { |
91 | __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))]; |
92 | s8 len; |
93 | bool exp; /* In RFC6994 experimental option format */ |
94 | }; |
95 | |
96 | /* This defines a selective acknowledgement block. */ |
97 | struct tcp_sack_block_wire { |
98 | __be32 start_seq; |
99 | __be32 end_seq; |
100 | }; |
101 | |
102 | struct tcp_sack_block { |
103 | u32 start_seq; |
104 | u32 end_seq; |
105 | }; |
106 | |
107 | /*These are used to set the sack_ok field in struct tcp_options_received */ |
108 | #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ |
109 | #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ |
110 | |
111 | struct tcp_options_received { |
112 | /* PAWS/RTTM data */ |
113 | int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ |
114 | u32 ts_recent; /* Time stamp to echo next */ |
115 | u32 rcv_tsval; /* Time stamp value */ |
116 | u32 rcv_tsecr; /* Time stamp echo reply */ |
117 | u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */ |
118 | tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */ |
119 | dsack : 1, /* D-SACK is scheduled */ |
120 | wscale_ok : 1, /* Wscale seen on SYN packet */ |
121 | sack_ok : 3, /* SACK seen on SYN packet */ |
122 | smc_ok : 1, /* SMC seen on SYN packet */ |
123 | snd_wscale : 4, /* Window scaling received from sender */ |
124 | rcv_wscale : 4; /* Window scaling to send to receiver */ |
125 | u8 saw_unknown:1, /* Received unknown option */ |
126 | unused:7; |
127 | u8 num_sacks; /* Number of SACK blocks */ |
128 | u16 user_mss; /* mss requested by user in ioctl */ |
129 | u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ |
130 | }; |
131 | |
132 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) |
133 | { |
134 | rx_opt->tstamp_ok = rx_opt->sack_ok = 0; |
135 | rx_opt->wscale_ok = rx_opt->snd_wscale = 0; |
136 | #if IS_ENABLED(CONFIG_SMC) |
137 | rx_opt->smc_ok = 0; |
138 | #endif |
139 | } |
140 | |
141 | /* This is the max number of SACKS that we'll generate and process. It's safe |
142 | * to increase this, although since: |
143 | * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8) |
144 | * only four options will fit in a standard TCP header */ |
145 | #define TCP_NUM_SACKS 4 |
146 | |
147 | struct tcp_request_sock_ops; |
148 | |
149 | struct tcp_request_sock { |
150 | struct inet_request_sock req; |
151 | const struct tcp_request_sock_ops *af_specific; |
152 | u64 snt_synack; /* first SYNACK sent time */ |
153 | bool tfo_listener; |
154 | bool is_mptcp; |
155 | #if IS_ENABLED(CONFIG_MPTCP) |
156 | bool drop_req; |
157 | #endif |
158 | u32 txhash; |
159 | u32 rcv_isn; |
160 | u32 snt_isn; |
161 | u32 ts_off; |
162 | u32 last_oow_ack_time; /* last SYNACK */ |
163 | u32 rcv_nxt; /* the ack # by SYNACK. For |
164 | * FastOpen it's the seq# |
165 | * after data-in-SYN. |
166 | */ |
167 | u8 syn_tos; |
168 | }; |
169 | |
170 | static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) |
171 | { |
172 | return (struct tcp_request_sock *)req; |
173 | } |
174 | |
175 | struct tcp_sock { |
176 | /* inet_connection_sock has to be the first member of tcp_sock */ |
177 | struct inet_connection_sock inet_conn; |
178 | u16 ; /* Bytes of tcp header to send */ |
179 | u16 gso_segs; /* Max number of segs per GSO packet */ |
180 | |
181 | /* |
182 | * Header prediction flags |
183 | * 0x5?10 << 16 + snd_wnd in net byte order |
184 | */ |
185 | __be32 pred_flags; |
186 | |
187 | /* |
188 | * RFC793 variables by their proper names. This means you can |
189 | * read the code and the spec side by side (and laugh ...) |
190 | * See RFC793 and RFC1122. The RFC writes these in capitals. |
191 | */ |
192 | u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived |
193 | * sum(delta(rcv_nxt)), or how many bytes |
194 | * were acked. |
195 | */ |
196 | u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn |
197 | * total number of segments in. |
198 | */ |
199 | u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn |
200 | * total number of data segments in. |
201 | */ |
202 | u32 rcv_nxt; /* What we want to receive next */ |
203 | u32 copied_seq; /* Head of yet unread data */ |
204 | u32 rcv_wup; /* rcv_nxt on last window update sent */ |
205 | u32 snd_nxt; /* Next sequence we send */ |
206 | u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut |
207 | * The total number of segments sent. |
208 | */ |
209 | u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut |
210 | * total number of data segments sent. |
211 | */ |
212 | u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut |
213 | * total number of data bytes sent. |
214 | */ |
215 | u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked |
216 | * sum(delta(snd_una)), or how many bytes |
217 | * were acked. |
218 | */ |
219 | u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups |
220 | * total number of DSACK blocks received |
221 | */ |
222 | u32 snd_una; /* First byte we want an ack for */ |
223 | u32 snd_sml; /* Last byte of the most recently transmitted small packet */ |
224 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ |
225 | u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ |
226 | u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ |
227 | u32 compressed_ack_rcv_nxt; |
228 | |
229 | u32 tsoffset; /* timestamp offset */ |
230 | |
231 | struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ |
232 | struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ |
233 | |
234 | u32 snd_wl1; /* Sequence for window update */ |
235 | u32 snd_wnd; /* The window we expect to receive */ |
236 | u32 max_window; /* Maximal window ever seen from peer */ |
237 | u32 mss_cache; /* Cached effective mss, not including SACKS */ |
238 | |
239 | u32 window_clamp; /* Maximal window to advertise */ |
240 | u32 rcv_ssthresh; /* Current window clamp */ |
241 | |
242 | /* Information of the most recently (s)acked skb */ |
243 | struct tcp_rack { |
244 | u64 mstamp; /* (Re)sent time of the skb */ |
245 | u32 rtt_us; /* Associated RTT */ |
246 | u32 end_seq; /* Ending TCP sequence of the skb */ |
247 | u32 last_delivered; /* tp->delivered at last reo_wnd adj */ |
248 | u8 reo_wnd_steps; /* Allowed reordering window */ |
249 | #define TCP_RACK_RECOVERY_THRESH 16 |
250 | u8 reo_wnd_persist:5, /* No. of recovery since last adj */ |
251 | dsack_seen:1, /* Whether DSACK seen after last adj */ |
252 | advanced:1; /* mstamp advanced since last lost marking */ |
253 | } rack; |
254 | u16 advmss; /* Advertised MSS */ |
255 | u8 compressed_ack; |
256 | u8 dup_ack_counter:2, |
257 | tlp_retrans:1, /* TLP is a retransmission */ |
258 | unused:5; |
259 | u32 chrono_start; /* Start time in jiffies of a TCP chrono */ |
260 | u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ |
261 | u8 chrono_type:2, /* current chronograph type */ |
262 | rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ |
263 | fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ |
264 | fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ |
265 | is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ |
266 | fastopen_client_fail:2; /* reason why fastopen failed */ |
267 | u8 nonagle : 4,/* Disable Nagle algorithm? */ |
268 | thin_lto : 1,/* Use linear timeouts for thin streams */ |
269 | recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ |
270 | repair : 1, |
271 | frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ |
272 | u8 repair_queue; |
273 | u8 save_syn:2, /* Save headers of SYN packet */ |
274 | syn_data:1, /* SYN includes data */ |
275 | syn_fastopen:1, /* SYN includes Fast Open option */ |
276 | syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ |
277 | syn_fastopen_ch:1, /* Active TFO re-enabling probe */ |
278 | syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ |
279 | is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ |
280 | u32 tlp_high_seq; /* snd_nxt at the time of TLP */ |
281 | |
282 | u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ |
283 | u64 tcp_wstamp_ns; /* departure time for next sent data packet */ |
284 | u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ |
285 | |
286 | /* RTT measurement */ |
287 | u64 tcp_mstamp; /* most recent packet received/sent */ |
288 | u32 srtt_us; /* smoothed round trip time << 3 in usecs */ |
289 | u32 mdev_us; /* medium deviation */ |
290 | u32 mdev_max_us; /* maximal mdev for the last rtt period */ |
291 | u32 rttvar_us; /* smoothed mdev_max */ |
292 | u32 rtt_seq; /* sequence number to update rttvar */ |
293 | struct minmax rtt_min; |
294 | |
295 | u32 packets_out; /* Packets which are "in flight" */ |
296 | u32 retrans_out; /* Retransmitted packets out */ |
297 | u32 max_packets_out; /* max packets_out in last window */ |
298 | u32 max_packets_seq; /* right edge of max_packets_out flight */ |
299 | |
300 | u16 urg_data; /* Saved octet of OOB data and control flags */ |
301 | u8 ecn_flags; /* ECN status bits. */ |
302 | u8 keepalive_probes; /* num of allowed keep alive probes */ |
303 | u32 reordering; /* Packet reordering metric. */ |
304 | u32 reord_seen; /* number of data packet reordering events */ |
305 | u32 snd_up; /* Urgent pointer */ |
306 | |
307 | /* |
308 | * Options received (usually on last packet, some only on SYN packets). |
309 | */ |
310 | struct tcp_options_received rx_opt; |
311 | |
312 | /* |
313 | * Slow start and congestion control (see also Nagle, and Karn & Partridge) |
314 | */ |
315 | u32 snd_ssthresh; /* Slow start size threshold */ |
316 | u32 snd_cwnd; /* Sending congestion window */ |
317 | u32 snd_cwnd_cnt; /* Linear increase counter */ |
318 | u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ |
319 | u32 snd_cwnd_used; |
320 | u32 snd_cwnd_stamp; |
321 | u32 prior_cwnd; /* cwnd right before starting loss recovery */ |
322 | u32 prr_delivered; /* Number of newly delivered packets to |
323 | * receiver in Recovery. */ |
324 | u32 prr_out; /* Total number of pkts sent during Recovery. */ |
325 | u32 delivered; /* Total data packets delivered incl. rexmits */ |
326 | u32 delivered_ce; /* Like the above but only ECE marked packets */ |
327 | u32 lost; /* Total data packets lost incl. rexmits */ |
328 | u32 app_limited; /* limited until "delivered" reaches this val */ |
329 | u64 first_tx_mstamp; /* start of window send phase */ |
330 | u64 delivered_mstamp; /* time we reached "delivered" */ |
331 | u32 rate_delivered; /* saved rate sample: packets delivered */ |
332 | u32 rate_interval_us; /* saved rate sample: time elapsed */ |
333 | |
334 | u32 rcv_wnd; /* Current receiver window */ |
335 | u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ |
336 | u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ |
337 | u32 pushed_seq; /* Last pushed seq, required to talk to windows */ |
338 | u32 lost_out; /* Lost packets */ |
339 | u32 sacked_out; /* SACK'd packets */ |
340 | |
341 | struct hrtimer pacing_timer; |
342 | struct hrtimer compressed_ack_timer; |
343 | |
344 | /* from STCP, retrans queue hinting */ |
345 | struct sk_buff* lost_skb_hint; |
346 | struct sk_buff *retransmit_skb_hint; |
347 | |
348 | /* OOO segments go in this rbtree. Socket lock must be held. */ |
349 | struct rb_root out_of_order_queue; |
350 | struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */ |
351 | |
352 | /* SACKs data, these 2 need to be together (see tcp_options_write) */ |
353 | struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ |
354 | struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ |
355 | |
356 | struct tcp_sack_block recv_sack_cache[4]; |
357 | |
358 | struct sk_buff *highest_sack; /* skb just after the highest |
359 | * skb with SACKed bit set |
360 | * (validity guaranteed only if |
361 | * sacked_out > 0) |
362 | */ |
363 | |
364 | int lost_cnt_hint; |
365 | |
366 | u32 prior_ssthresh; /* ssthresh saved at recovery start */ |
367 | u32 high_seq; /* snd_nxt at onset of congestion */ |
368 | |
369 | u32 retrans_stamp; /* Timestamp of the last retransmit, |
370 | * also used in SYN-SENT to remember stamp of |
371 | * the first SYN. */ |
372 | u32 undo_marker; /* snd_una upon a new recovery episode. */ |
373 | int undo_retrans; /* number of undoable retransmissions. */ |
374 | u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans |
375 | * Total data bytes retransmitted |
376 | */ |
377 | u32 total_retrans; /* Total retransmits for entire connection */ |
378 | |
379 | u32 urg_seq; /* Seq of received urgent pointer */ |
380 | unsigned int keepalive_time; /* time before keep alive takes place */ |
381 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ |
382 | |
383 | int linger2; |
384 | |
385 | |
386 | /* Sock_ops bpf program related variables */ |
387 | #ifdef CONFIG_BPF |
388 | u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs |
389 | * values defined in uapi/linux/tcp.h |
390 | */ |
391 | #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) |
392 | #else |
393 | #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 |
394 | #endif |
395 | |
396 | u16 timeout_rehash; /* Timeout-triggered rehash attempts */ |
397 | |
398 | u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ |
399 | |
400 | /* Receiver side RTT estimation */ |
401 | u32 rcv_rtt_last_tsecr; |
402 | struct { |
403 | u32 rtt_us; |
404 | u32 seq; |
405 | u64 time; |
406 | } rcv_rtt_est; |
407 | |
408 | /* Receiver queue space */ |
409 | struct { |
410 | u32 space; |
411 | u32 seq; |
412 | u64 time; |
413 | } rcvq_space; |
414 | |
415 | /* TCP-specific MTU probe information. */ |
416 | struct { |
417 | u32 probe_seq_start; |
418 | u32 probe_seq_end; |
419 | } mtu_probe; |
420 | u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG |
421 | * while socket was owned by user. |
422 | */ |
423 | #if IS_ENABLED(CONFIG_MPTCP) |
424 | bool is_mptcp; |
425 | #endif |
426 | #if IS_ENABLED(CONFIG_SMC) |
427 | bool (*smc_hs_congested)(const struct sock *sk); |
428 | bool syn_smc; /* SYN includes SMC */ |
429 | #endif |
430 | |
431 | #ifdef CONFIG_TCP_MD5SIG |
432 | /* TCP AF-Specific parts; only used by MD5 Signature support so far */ |
433 | const struct tcp_sock_af_ops *af_specific; |
434 | |
435 | /* TCP MD5 Signature Option information */ |
436 | struct tcp_md5sig_info __rcu *md5sig_info; |
437 | #endif |
438 | |
439 | /* TCP fastopen related information */ |
440 | struct tcp_fastopen_request *fastopen_req; |
441 | /* fastopen_rsk points to request_sock that resulted in this big |
442 | * socket. Used to retransmit SYNACKs etc. |
443 | */ |
444 | struct request_sock __rcu *fastopen_rsk; |
445 | struct saved_syn *saved_syn; |
446 | }; |
447 | |
448 | enum tsq_enum { |
449 | TSQ_THROTTLED, |
450 | TSQ_QUEUED, |
451 | TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ |
452 | TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */ |
453 | TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */ |
454 | TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call |
455 | * tcp_v{4|6}_mtu_reduced() |
456 | */ |
457 | }; |
458 | |
459 | enum tsq_flags { |
460 | TSQF_THROTTLED = (1UL << TSQ_THROTTLED), |
461 | TSQF_QUEUED = (1UL << TSQ_QUEUED), |
462 | TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), |
463 | TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), |
464 | TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), |
465 | TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), |
466 | }; |
467 | |
468 | static inline struct tcp_sock *tcp_sk(const struct sock *sk) |
469 | { |
470 | return (struct tcp_sock *)sk; |
471 | } |
472 | |
473 | struct tcp_timewait_sock { |
474 | struct inet_timewait_sock tw_sk; |
475 | #define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt |
476 | #define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt |
477 | u32 tw_rcv_wnd; |
478 | u32 tw_ts_offset; |
479 | u32 tw_ts_recent; |
480 | |
481 | /* The time we sent the last out-of-window ACK: */ |
482 | u32 tw_last_oow_ack_time; |
483 | |
484 | int tw_ts_recent_stamp; |
485 | u32 tw_tx_delay; |
486 | #ifdef CONFIG_TCP_MD5SIG |
487 | struct tcp_md5sig_key *tw_md5_key; |
488 | #endif |
489 | }; |
490 | |
491 | static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) |
492 | { |
493 | return (struct tcp_timewait_sock *)sk; |
494 | } |
495 | |
496 | static inline bool tcp_passive_fastopen(const struct sock *sk) |
497 | { |
498 | return sk->sk_state == TCP_SYN_RECV && |
499 | rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL; |
500 | } |
501 | |
502 | static inline void fastopen_queue_tune(struct sock *sk, int backlog) |
503 | { |
504 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
505 | int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); |
506 | |
507 | queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); |
508 | } |
509 | |
510 | static inline void tcp_move_syn(struct tcp_sock *tp, |
511 | struct request_sock *req) |
512 | { |
513 | tp->saved_syn = req->saved_syn; |
514 | req->saved_syn = NULL; |
515 | } |
516 | |
517 | static inline void tcp_saved_syn_free(struct tcp_sock *tp) |
518 | { |
519 | kfree(tp->saved_syn); |
520 | tp->saved_syn = NULL; |
521 | } |
522 | |
523 | static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn) |
524 | { |
525 | return saved_syn->mac_hdrlen + saved_syn->network_hdrlen + |
526 | saved_syn->tcp_hdrlen; |
527 | } |
528 | |
529 | struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, |
530 | const struct sk_buff *orig_skb, |
531 | const struct sk_buff *ack_skb); |
532 | |
533 | static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) |
534 | { |
535 | /* We use READ_ONCE() here because socket might not be locked. |
536 | * This happens for listeners. |
537 | */ |
538 | u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); |
539 | |
540 | return (user_mss && user_mss < mss) ? user_mss : mss; |
541 | } |
542 | |
543 | int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, |
544 | int shiftlen); |
545 | |
546 | void __tcp_sock_set_cork(struct sock *sk, bool on); |
547 | void tcp_sock_set_cork(struct sock *sk, bool on); |
548 | int tcp_sock_set_keepcnt(struct sock *sk, int val); |
549 | int tcp_sock_set_keepidle_locked(struct sock *sk, int val); |
550 | int tcp_sock_set_keepidle(struct sock *sk, int val); |
551 | int tcp_sock_set_keepintvl(struct sock *sk, int val); |
552 | void __tcp_sock_set_nodelay(struct sock *sk, bool on); |
553 | void tcp_sock_set_nodelay(struct sock *sk); |
554 | void tcp_sock_set_quickack(struct sock *sk, int val); |
555 | int tcp_sock_set_syncnt(struct sock *sk, int val); |
556 | void tcp_sock_set_user_timeout(struct sock *sk, u32 val); |
557 | |
558 | #endif /* _LINUX_TCP_H */ |
559 | |