1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Implementation of the Transmission Control Protocol(TCP). |
8 | * |
9 | * Authors: Ross Biro |
10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
13 | * Florian La Roche, <flla@stud.uni-sb.de> |
14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
17 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
19 | * Jorge Cwik, <jorge@laser.satlink.net> |
20 | */ |
21 | |
22 | #include <net/tcp.h> |
23 | #include <net/xfrm.h> |
24 | #include <net/busy_poll.h> |
25 | |
26 | static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) |
27 | { |
28 | if (seq == s_win) |
29 | return true; |
30 | if (after(end_seq, s_win) && before(seq1: seq, seq2: e_win)) |
31 | return true; |
32 | return seq == e_win && seq == end_seq; |
33 | } |
34 | |
35 | static enum tcp_tw_status |
36 | tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, |
37 | const struct sk_buff *skb, int mib_idx) |
38 | { |
39 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk: (struct sock *)tw); |
40 | |
41 | if (!tcp_oow_rate_limited(net: twsk_net(twsk: tw), skb, mib_idx, |
42 | last_oow_ack_time: &tcptw->tw_last_oow_ack_time)) { |
43 | /* Send ACK. Note, we do not put the bucket, |
44 | * it will be released by caller. |
45 | */ |
46 | return TCP_TW_ACK; |
47 | } |
48 | |
49 | /* We are rate-limiting, so just release the tw sock and drop skb. */ |
50 | inet_twsk_put(tw); |
51 | return TCP_TW_SUCCESS; |
52 | } |
53 | |
54 | static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq) |
55 | { |
56 | #ifdef CONFIG_TCP_AO |
57 | struct tcp_ao_info *ao; |
58 | |
59 | ao = rcu_dereference(tcptw->ao_info); |
60 | if (unlikely(ao && seq < tcptw->tw_rcv_nxt)) |
61 | WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1); |
62 | #endif |
63 | tcptw->tw_rcv_nxt = seq; |
64 | } |
65 | |
66 | /* |
67 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
68 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN |
69 | * (and, probably, tail of data) and one or more our ACKs are lost. |
70 | * * What is TIME-WAIT timeout? It is associated with maximal packet |
71 | * lifetime in the internet, which results in wrong conclusion, that |
72 | * it is set to catch "old duplicate segments" wandering out of their path. |
73 | * It is not quite correct. This timeout is calculated so that it exceeds |
74 | * maximal retransmission timeout enough to allow to lose one (or more) |
75 | * segments sent by peer and our ACKs. This time may be calculated from RTO. |
76 | * * When TIME-WAIT socket receives RST, it means that another end |
77 | * finally closed and we are allowed to kill TIME-WAIT too. |
78 | * * Second purpose of TIME-WAIT is catching old duplicate segments. |
79 | * Well, certainly it is pure paranoia, but if we load TIME-WAIT |
80 | * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. |
81 | * * If we invented some more clever way to catch duplicates |
82 | * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. |
83 | * |
84 | * The algorithm below is based on FORMAL INTERPRETATION of RFCs. |
85 | * When you compare it to RFCs, please, read section SEGMENT ARRIVES |
86 | * from the very beginning. |
87 | * |
88 | * NOTE. With recycling (and later with fin-wait-2) TW bucket |
89 | * is _not_ stateless. It means, that strictly speaking we must |
90 | * spinlock it. I do not want! Well, probability of misbehaviour |
91 | * is ridiculously low and, seems, we could use some mb() tricks |
92 | * to avoid misread sequence numbers, states etc. --ANK |
93 | * |
94 | * We don't need to initialize tmp_out.sack_ok as we don't use the results |
95 | */ |
96 | enum tcp_tw_status |
97 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
98 | const struct tcphdr *th) |
99 | { |
100 | struct tcp_options_received tmp_opt; |
101 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk: (struct sock *)tw); |
102 | bool paws_reject = false; |
103 | |
104 | tmp_opt.saw_tstamp = 0; |
105 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
106 | tcp_parse_options(net: twsk_net(twsk: tw), skb, opt_rx: &tmp_opt, estab: 0, NULL); |
107 | |
108 | if (tmp_opt.saw_tstamp) { |
109 | if (tmp_opt.rcv_tsecr) |
110 | tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; |
111 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
112 | tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; |
113 | paws_reject = tcp_paws_reject(rx_opt: &tmp_opt, rst: th->rst); |
114 | } |
115 | } |
116 | |
117 | if (tw->tw_substate == TCP_FIN_WAIT2) { |
118 | /* Just repeat all the checks of tcp_rcv_state_process() */ |
119 | |
120 | /* Out of window, send ACK */ |
121 | if (paws_reject || |
122 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
123 | s_win: tcptw->tw_rcv_nxt, |
124 | e_win: tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) |
125 | return tcp_timewait_check_oow_rate_limit( |
126 | tw, skb, mib_idx: LINUX_MIB_TCPACKSKIPPEDFINWAIT2); |
127 | |
128 | if (th->rst) |
129 | goto kill; |
130 | |
131 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, seq2: tcptw->tw_rcv_nxt)) |
132 | return TCP_TW_RST; |
133 | |
134 | /* Dup ACK? */ |
135 | if (!th->ack || |
136 | !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || |
137 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
138 | inet_twsk_put(tw); |
139 | return TCP_TW_SUCCESS; |
140 | } |
141 | |
142 | /* New data or FIN. If new data arrive after half-duplex close, |
143 | * reset. |
144 | */ |
145 | if (!th->fin || |
146 | TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) |
147 | return TCP_TW_RST; |
148 | |
149 | /* FIN arrived, enter true time-wait state. */ |
150 | tw->tw_substate = TCP_TIME_WAIT; |
151 | twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq); |
152 | |
153 | if (tmp_opt.saw_tstamp) { |
154 | tcptw->tw_ts_recent_stamp = ktime_get_seconds(); |
155 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
156 | } |
157 | |
158 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
159 | return TCP_TW_ACK; |
160 | } |
161 | |
162 | /* |
163 | * Now real TIME-WAIT state. |
164 | * |
165 | * RFC 1122: |
166 | * "When a connection is [...] on TIME-WAIT state [...] |
167 | * [a TCP] MAY accept a new SYN from the remote TCP to |
168 | * reopen the connection directly, if it: |
169 | * |
170 | * (1) assigns its initial sequence number for the new |
171 | * connection to be larger than the largest sequence |
172 | * number it used on the previous connection incarnation, |
173 | * and |
174 | * |
175 | * (2) returns to TIME-WAIT state if the SYN turns out |
176 | * to be an old duplicate". |
177 | */ |
178 | |
179 | if (!paws_reject && |
180 | (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && |
181 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { |
182 | /* In window segment, it may be only reset or bare ack. */ |
183 | |
184 | if (th->rst) { |
185 | /* This is TIME_WAIT assassination, in two flavors. |
186 | * Oh well... nobody has a sufficient solution to this |
187 | * protocol bug yet. |
188 | */ |
189 | if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { |
190 | kill: |
191 | inet_twsk_deschedule_put(tw); |
192 | return TCP_TW_SUCCESS; |
193 | } |
194 | } else { |
195 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
196 | } |
197 | |
198 | if (tmp_opt.saw_tstamp) { |
199 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
200 | tcptw->tw_ts_recent_stamp = ktime_get_seconds(); |
201 | } |
202 | |
203 | inet_twsk_put(tw); |
204 | return TCP_TW_SUCCESS; |
205 | } |
206 | |
207 | /* Out of window segment. |
208 | |
209 | All the segments are ACKed immediately. |
210 | |
211 | The only exception is new SYN. We accept it, if it is |
212 | not old duplicate and we are not in danger to be killed |
213 | by delayed old duplicates. RFC check is that it has |
214 | newer sequence number works at rates <40Mbit/sec. |
215 | However, if paws works, it is reliable AND even more, |
216 | we even may relax silly seq space cutoff. |
217 | |
218 | RED-PEN: we violate main RFC requirement, if this SYN will appear |
219 | old duplicate (i.e. we receive RST in reply to SYN-ACK), |
220 | we must return socket to time-wait state. It is not good, |
221 | but not fatal yet. |
222 | */ |
223 | |
224 | if (th->syn && !th->rst && !th->ack && !paws_reject && |
225 | (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || |
226 | (tmp_opt.saw_tstamp && |
227 | (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { |
228 | u32 isn = tcptw->tw_snd_nxt + 65535 + 2; |
229 | if (isn == 0) |
230 | isn++; |
231 | TCP_SKB_CB(skb)->tcp_tw_isn = isn; |
232 | return TCP_TW_SYN; |
233 | } |
234 | |
235 | if (paws_reject) |
236 | __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); |
237 | |
238 | if (!th->rst) { |
239 | /* In this case we must reset the TIMEWAIT timer. |
240 | * |
241 | * If it is ACKless SYN it may be both old duplicate |
242 | * and new good SYN with random sequence number <rcv_nxt. |
243 | * Do not reschedule in the last case. |
244 | */ |
245 | if (paws_reject || th->ack) |
246 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
247 | |
248 | return tcp_timewait_check_oow_rate_limit( |
249 | tw, skb, mib_idx: LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); |
250 | } |
251 | inet_twsk_put(tw); |
252 | return TCP_TW_SUCCESS; |
253 | } |
254 | EXPORT_SYMBOL(tcp_timewait_state_process); |
255 | |
256 | static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) |
257 | { |
258 | #ifdef CONFIG_TCP_MD5SIG |
259 | const struct tcp_sock *tp = tcp_sk(sk); |
260 | struct tcp_md5sig_key *key; |
261 | |
262 | /* |
263 | * The timewait bucket does not have the key DB from the |
264 | * sock structure. We just make a quick copy of the |
265 | * md5 key being used (if indeed we are using one) |
266 | * so the timewait ack generating code has the key. |
267 | */ |
268 | tcptw->tw_md5_key = NULL; |
269 | if (!static_branch_unlikely(&tcp_md5_needed.key)) |
270 | return; |
271 | |
272 | key = tp->af_specific->md5_lookup(sk, sk); |
273 | if (key) { |
274 | tcptw->tw_md5_key = kmemdup(p: key, size: sizeof(*key), GFP_ATOMIC); |
275 | if (!tcptw->tw_md5_key) |
276 | return; |
277 | if (!static_key_fast_inc_not_disabled(key: &tcp_md5_needed.key.key)) |
278 | goto out_free; |
279 | tcp_md5_add_sigpool(); |
280 | } |
281 | return; |
282 | out_free: |
283 | WARN_ON_ONCE(1); |
284 | kfree(objp: tcptw->tw_md5_key); |
285 | tcptw->tw_md5_key = NULL; |
286 | #endif |
287 | } |
288 | |
289 | /* |
290 | * Move a socket to time-wait or dead fin-wait-2 state. |
291 | */ |
292 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
293 | { |
294 | const struct inet_connection_sock *icsk = inet_csk(sk); |
295 | struct tcp_sock *tp = tcp_sk(sk); |
296 | struct net *net = sock_net(sk); |
297 | struct inet_timewait_sock *tw; |
298 | |
299 | tw = inet_twsk_alloc(sk, dr: &net->ipv4.tcp_death_row, state); |
300 | |
301 | if (tw) { |
302 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk: (struct sock *)tw); |
303 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
304 | |
305 | tw->tw_transparent = inet_test_bit(TRANSPARENT, sk); |
306 | tw->tw_mark = sk->sk_mark; |
307 | tw->tw_priority = READ_ONCE(sk->sk_priority); |
308 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
309 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
310 | tcptw->tw_snd_nxt = tp->snd_nxt; |
311 | tcptw->tw_rcv_wnd = tcp_receive_window(tp); |
312 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; |
313 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; |
314 | tcptw->tw_ts_offset = tp->tsoffset; |
315 | tw->tw_usec_ts = tp->tcp_usec_ts; |
316 | tcptw->tw_last_oow_ack_time = 0; |
317 | tcptw->tw_tx_delay = tp->tcp_tx_delay; |
318 | tw->tw_txhash = sk->sk_txhash; |
319 | #if IS_ENABLED(CONFIG_IPV6) |
320 | if (tw->tw_family == PF_INET6) { |
321 | struct ipv6_pinfo *np = inet6_sk(sk: sk); |
322 | |
323 | tw->tw_v6_daddr = sk->sk_v6_daddr; |
324 | tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; |
325 | tw->tw_tclass = np->tclass; |
326 | tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); |
327 | tw->tw_ipv6only = sk->sk_ipv6only; |
328 | } |
329 | #endif |
330 | |
331 | tcp_time_wait_init(sk, tcptw); |
332 | tcp_ao_time_wait(tcptw, tp); |
333 | |
334 | /* Get the TIME_WAIT timeout firing. */ |
335 | if (timeo < rto) |
336 | timeo = rto; |
337 | |
338 | if (state == TCP_TIME_WAIT) |
339 | timeo = TCP_TIMEWAIT_LEN; |
340 | |
341 | /* tw_timer is pinned, so we need to make sure BH are disabled |
342 | * in following section, otherwise timer handler could run before |
343 | * we complete the initialization. |
344 | */ |
345 | local_bh_disable(); |
346 | inet_twsk_schedule(tw, timeo); |
347 | /* Linkage updates. |
348 | * Note that access to tw after this point is illegal. |
349 | */ |
350 | inet_twsk_hashdance(tw, sk, hashinfo: net->ipv4.tcp_death_row.hashinfo); |
351 | local_bh_enable(); |
352 | } else { |
353 | /* Sorry, if we're out of memory, just CLOSE this |
354 | * socket up. We've got bigger problems than |
355 | * non-graceful socket closings. |
356 | */ |
357 | NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW); |
358 | } |
359 | |
360 | tcp_update_metrics(sk); |
361 | tcp_done(sk); |
362 | } |
363 | EXPORT_SYMBOL(tcp_time_wait); |
364 | |
365 | #ifdef CONFIG_TCP_MD5SIG |
366 | static void tcp_md5_twsk_free_rcu(struct rcu_head *head) |
367 | { |
368 | struct tcp_md5sig_key *key; |
369 | |
370 | key = container_of(head, struct tcp_md5sig_key, rcu); |
371 | kfree(objp: key); |
372 | static_branch_slow_dec_deferred(&tcp_md5_needed); |
373 | tcp_md5_release_sigpool(); |
374 | } |
375 | #endif |
376 | |
377 | void tcp_twsk_destructor(struct sock *sk) |
378 | { |
379 | #ifdef CONFIG_TCP_MD5SIG |
380 | if (static_branch_unlikely(&tcp_md5_needed.key)) { |
381 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); |
382 | |
383 | if (twsk->tw_md5_key) |
384 | call_rcu(head: &twsk->tw_md5_key->rcu, func: tcp_md5_twsk_free_rcu); |
385 | } |
386 | #endif |
387 | tcp_ao_destroy_sock(sk, twsk: true); |
388 | } |
389 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); |
390 | |
391 | void tcp_twsk_purge(struct list_head *net_exit_list, int family) |
392 | { |
393 | bool purged_once = false; |
394 | struct net *net; |
395 | |
396 | list_for_each_entry(net, net_exit_list, exit_list) { |
397 | if (net->ipv4.tcp_death_row.hashinfo->pernet) { |
398 | /* Even if tw_refcount == 1, we must clean up kernel reqsk */ |
399 | inet_twsk_purge(hashinfo: net->ipv4.tcp_death_row.hashinfo, family); |
400 | } else if (!purged_once) { |
401 | inet_twsk_purge(hashinfo: &tcp_hashinfo, family); |
402 | purged_once = true; |
403 | } |
404 | } |
405 | } |
406 | EXPORT_SYMBOL_GPL(tcp_twsk_purge); |
407 | |
408 | /* Warning : This function is called without sk_listener being locked. |
409 | * Be sure to read socket fields once, as their value could change under us. |
410 | */ |
411 | void tcp_openreq_init_rwin(struct request_sock *req, |
412 | const struct sock *sk_listener, |
413 | const struct dst_entry *dst) |
414 | { |
415 | struct inet_request_sock *ireq = inet_rsk(sk: req); |
416 | const struct tcp_sock *tp = tcp_sk(sk_listener); |
417 | int full_space = tcp_full_space(sk: sk_listener); |
418 | u32 window_clamp; |
419 | __u8 rcv_wscale; |
420 | u32 rcv_wnd; |
421 | int mss; |
422 | |
423 | mss = tcp_mss_clamp(tp, mss: dst_metric_advmss(dst)); |
424 | window_clamp = READ_ONCE(tp->window_clamp); |
425 | /* Set this up on the first call only */ |
426 | req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
427 | |
428 | /* limit the window selection if the user enforce a smaller rx buffer */ |
429 | if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && |
430 | (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) |
431 | req->rsk_window_clamp = full_space; |
432 | |
433 | rcv_wnd = tcp_rwnd_init_bpf(sk: (struct sock *)req); |
434 | if (rcv_wnd == 0) |
435 | rcv_wnd = dst_metric(dst, RTAX_INITRWND); |
436 | else if (full_space < rcv_wnd * mss) |
437 | full_space = rcv_wnd * mss; |
438 | |
439 | /* tcp_full_space because it is guaranteed to be the first packet */ |
440 | tcp_select_initial_window(sk: sk_listener, space: full_space, |
441 | mss: mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
442 | rcv_wnd: &req->rsk_rcv_wnd, |
443 | window_clamp: &req->rsk_window_clamp, |
444 | wscale_ok: ireq->wscale_ok, |
445 | rcv_wscale: &rcv_wscale, |
446 | init_rcv_wnd: rcv_wnd); |
447 | ireq->rcv_wscale = rcv_wscale; |
448 | } |
449 | EXPORT_SYMBOL(tcp_openreq_init_rwin); |
450 | |
451 | static void tcp_ecn_openreq_child(struct tcp_sock *tp, |
452 | const struct request_sock *req) |
453 | { |
454 | tp->ecn_flags = inet_rsk(sk: req)->ecn_ok ? TCP_ECN_OK : 0; |
455 | } |
456 | |
457 | void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) |
458 | { |
459 | struct inet_connection_sock *icsk = inet_csk(sk); |
460 | u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); |
461 | bool ca_got_dst = false; |
462 | |
463 | if (ca_key != TCP_CA_UNSPEC) { |
464 | const struct tcp_congestion_ops *ca; |
465 | |
466 | rcu_read_lock(); |
467 | ca = tcp_ca_find_key(key: ca_key); |
468 | if (likely(ca && bpf_try_module_get(ca, ca->owner))) { |
469 | icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); |
470 | icsk->icsk_ca_ops = ca; |
471 | ca_got_dst = true; |
472 | } |
473 | rcu_read_unlock(); |
474 | } |
475 | |
476 | /* If no valid choice made yet, assign current system default ca. */ |
477 | if (!ca_got_dst && |
478 | (!icsk->icsk_ca_setsockopt || |
479 | !bpf_try_module_get(data: icsk->icsk_ca_ops, owner: icsk->icsk_ca_ops->owner))) |
480 | tcp_assign_congestion_control(sk); |
481 | |
482 | tcp_set_ca_state(sk, ca_state: TCP_CA_Open); |
483 | } |
484 | EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); |
485 | |
486 | static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, |
487 | struct request_sock *req, |
488 | struct tcp_sock *newtp) |
489 | { |
490 | #if IS_ENABLED(CONFIG_SMC) |
491 | struct inet_request_sock *ireq; |
492 | |
493 | if (static_branch_unlikely(&tcp_have_smc)) { |
494 | ireq = inet_rsk(sk: req); |
495 | if (oldtp->syn_smc && !ireq->smc_ok) |
496 | newtp->syn_smc = 0; |
497 | } |
498 | #endif |
499 | } |
500 | |
501 | /* This is not only more efficient than what we used to do, it eliminates |
502 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM |
503 | * |
504 | * Actually, we could lots of memory writes here. tp of listening |
505 | * socket contains all necessary default parameters. |
506 | */ |
507 | struct sock *tcp_create_openreq_child(const struct sock *sk, |
508 | struct request_sock *req, |
509 | struct sk_buff *skb) |
510 | { |
511 | struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); |
512 | const struct inet_request_sock *ireq = inet_rsk(sk: req); |
513 | struct tcp_request_sock *treq = tcp_rsk(req); |
514 | struct inet_connection_sock *newicsk; |
515 | const struct tcp_sock *oldtp; |
516 | struct tcp_sock *newtp; |
517 | u32 seq; |
518 | #ifdef CONFIG_TCP_AO |
519 | struct tcp_ao_key *ao_key; |
520 | #endif |
521 | |
522 | if (!newsk) |
523 | return NULL; |
524 | |
525 | newicsk = inet_csk(sk: newsk); |
526 | newtp = tcp_sk(newsk); |
527 | oldtp = tcp_sk(sk); |
528 | |
529 | smc_check_reset_syn_req(oldtp, req, newtp); |
530 | |
531 | /* Now setup tcp_sock */ |
532 | newtp->pred_flags = 0; |
533 | |
534 | seq = treq->rcv_isn + 1; |
535 | newtp->rcv_wup = seq; |
536 | WRITE_ONCE(newtp->copied_seq, seq); |
537 | WRITE_ONCE(newtp->rcv_nxt, seq); |
538 | newtp->segs_in = 1; |
539 | |
540 | seq = treq->snt_isn + 1; |
541 | newtp->snd_sml = newtp->snd_una = seq; |
542 | WRITE_ONCE(newtp->snd_nxt, seq); |
543 | newtp->snd_up = seq; |
544 | |
545 | INIT_LIST_HEAD(list: &newtp->tsq_node); |
546 | INIT_LIST_HEAD(list: &newtp->tsorted_sent_queue); |
547 | |
548 | tcp_init_wl(tp: newtp, seq: treq->rcv_isn); |
549 | |
550 | minmax_reset(m: &newtp->rtt_min, tcp_jiffies32, meas: ~0U); |
551 | newicsk->icsk_ack.lrcvtime = tcp_jiffies32; |
552 | |
553 | newtp->lsndtime = tcp_jiffies32; |
554 | newsk->sk_txhash = READ_ONCE(treq->txhash); |
555 | newtp->total_retrans = req->num_retrans; |
556 | |
557 | tcp_init_xmit_timers(newsk); |
558 | WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); |
559 | |
560 | if (sock_flag(sk: newsk, flag: SOCK_KEEPOPEN)) |
561 | inet_csk_reset_keepalive_timer(sk: newsk, |
562 | timeout: keepalive_time_when(tp: newtp)); |
563 | |
564 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
565 | newtp->rx_opt.sack_ok = ireq->sack_ok; |
566 | newtp->window_clamp = req->rsk_window_clamp; |
567 | newtp->rcv_ssthresh = req->rsk_rcv_wnd; |
568 | newtp->rcv_wnd = req->rsk_rcv_wnd; |
569 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
570 | if (newtp->rx_opt.wscale_ok) { |
571 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
572 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; |
573 | } else { |
574 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; |
575 | newtp->window_clamp = min(newtp->window_clamp, 65535U); |
576 | } |
577 | newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; |
578 | newtp->max_window = newtp->snd_wnd; |
579 | |
580 | if (newtp->rx_opt.tstamp_ok) { |
581 | newtp->tcp_usec_ts = treq->req_usec_ts; |
582 | newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent); |
583 | newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); |
584 | newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; |
585 | } else { |
586 | newtp->tcp_usec_ts = 0; |
587 | newtp->rx_opt.ts_recent_stamp = 0; |
588 | newtp->tcp_header_len = sizeof(struct tcphdr); |
589 | } |
590 | if (req->num_timeout) { |
591 | newtp->total_rto = req->num_timeout; |
592 | newtp->undo_marker = treq->snt_isn; |
593 | if (newtp->tcp_usec_ts) { |
594 | newtp->retrans_stamp = treq->snt_synack; |
595 | newtp->total_rto_time = (u32)(tcp_clock_us() - |
596 | newtp->retrans_stamp) / USEC_PER_MSEC; |
597 | } else { |
598 | newtp->retrans_stamp = div_u64(dividend: treq->snt_synack, |
599 | USEC_PER_SEC / TCP_TS_HZ); |
600 | newtp->total_rto_time = tcp_clock_ms() - |
601 | newtp->retrans_stamp; |
602 | } |
603 | newtp->total_rto_recoveries = 1; |
604 | } |
605 | newtp->tsoffset = treq->ts_off; |
606 | #ifdef CONFIG_TCP_MD5SIG |
607 | newtp->md5sig_info = NULL; /*XXX*/ |
608 | #endif |
609 | #ifdef CONFIG_TCP_AO |
610 | newtp->ao_info = NULL; |
611 | ao_key = treq->af_specific->ao_lookup(sk, req, |
612 | tcp_rsk(req)->ao_keyid, -1); |
613 | if (ao_key) |
614 | newtp->tcp_header_len += tcp_ao_len_aligned(key: ao_key); |
615 | #endif |
616 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
617 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
618 | newtp->rx_opt.mss_clamp = req->mss; |
619 | tcp_ecn_openreq_child(tp: newtp, req); |
620 | newtp->fastopen_req = NULL; |
621 | RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); |
622 | |
623 | newtp->bpf_chg_cc_inprogress = 0; |
624 | tcp_bpf_clone(sk, newsk); |
625 | |
626 | __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
627 | |
628 | return newsk; |
629 | } |
630 | EXPORT_SYMBOL(tcp_create_openreq_child); |
631 | |
632 | /* |
633 | * Process an incoming packet for SYN_RECV sockets represented as a |
634 | * request_sock. Normally sk is the listener socket but for TFO it |
635 | * points to the child socket. |
636 | * |
637 | * XXX (TFO) - The current impl contains a special check for ack |
638 | * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? |
639 | * |
640 | * We don't need to initialize tmp_opt.sack_ok as we don't use the results |
641 | * |
642 | * Note: If @fastopen is true, this can be called from process context. |
643 | * Otherwise, this is from BH context. |
644 | */ |
645 | |
646 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
647 | struct request_sock *req, |
648 | bool fastopen, bool *req_stolen) |
649 | { |
650 | struct tcp_options_received tmp_opt; |
651 | struct sock *child; |
652 | const struct tcphdr *th = tcp_hdr(skb); |
653 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
654 | bool paws_reject = false; |
655 | bool own_req; |
656 | |
657 | tmp_opt.saw_tstamp = 0; |
658 | if (th->doff > (sizeof(struct tcphdr)>>2)) { |
659 | tcp_parse_options(net: sock_net(sk), skb, opt_rx: &tmp_opt, estab: 0, NULL); |
660 | |
661 | if (tmp_opt.saw_tstamp) { |
662 | tmp_opt.ts_recent = READ_ONCE(req->ts_recent); |
663 | if (tmp_opt.rcv_tsecr) |
664 | tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; |
665 | /* We do not store true stamp, but it is not required, |
666 | * it can be estimated (approximately) |
667 | * from another data. |
668 | */ |
669 | tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ; |
670 | paws_reject = tcp_paws_reject(rx_opt: &tmp_opt, rst: th->rst); |
671 | } |
672 | } |
673 | |
674 | /* Check for pure retransmitted SYN. */ |
675 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
676 | flg == TCP_FLAG_SYN && |
677 | !paws_reject) { |
678 | /* |
679 | * RFC793 draws (Incorrectly! It was fixed in RFC1122) |
680 | * this case on figure 6 and figure 8, but formal |
681 | * protocol description says NOTHING. |
682 | * To be more exact, it says that we should send ACK, |
683 | * because this segment (at least, if it has no data) |
684 | * is out of window. |
685 | * |
686 | * CONCLUSION: RFC793 (even with RFC1122) DOES NOT |
687 | * describe SYN-RECV state. All the description |
688 | * is wrong, we cannot believe to it and should |
689 | * rely only on common sense and implementation |
690 | * experience. |
691 | * |
692 | * Enforce "SYN-ACK" according to figure 8, figure 6 |
693 | * of RFC793, fixed by RFC1122. |
694 | * |
695 | * Note that even if there is new data in the SYN packet |
696 | * they will be thrown away too. |
697 | * |
698 | * Reset timer after retransmitting SYNACK, similar to |
699 | * the idea of fast retransmit in recovery. |
700 | */ |
701 | if (!tcp_oow_rate_limited(net: sock_net(sk), skb, |
702 | mib_idx: LINUX_MIB_TCPACKSKIPPEDSYNRECV, |
703 | last_oow_ack_time: &tcp_rsk(req)->last_oow_ack_time) && |
704 | |
705 | !inet_rtx_syn_ack(parent: sk, req)) { |
706 | unsigned long expires = jiffies; |
707 | |
708 | expires += reqsk_timeout(req, TCP_RTO_MAX); |
709 | if (!fastopen) |
710 | mod_timer_pending(timer: &req->rsk_timer, expires); |
711 | else |
712 | req->rsk_timer.expires = expires; |
713 | } |
714 | return NULL; |
715 | } |
716 | |
717 | /* Further reproduces section "SEGMENT ARRIVES" |
718 | for state SYN-RECEIVED of RFC793. |
719 | It is broken, however, it does not work only |
720 | when SYNs are crossed. |
721 | |
722 | You would think that SYN crossing is impossible here, since |
723 | we should have a SYN_SENT socket (from connect()) on our end, |
724 | but this is not true if the crossed SYNs were sent to both |
725 | ends by a malicious third party. We must defend against this, |
726 | and to do that we first verify the ACK (as per RFC793, page |
727 | 36) and reset if it is invalid. Is this a true full defense? |
728 | To convince ourselves, let us consider a way in which the ACK |
729 | test can still pass in this 'malicious crossed SYNs' case. |
730 | Malicious sender sends identical SYNs (and thus identical sequence |
731 | numbers) to both A and B: |
732 | |
733 | A: gets SYN, seq=7 |
734 | B: gets SYN, seq=7 |
735 | |
736 | By our good fortune, both A and B select the same initial |
737 | send sequence number of seven :-) |
738 | |
739 | A: sends SYN|ACK, seq=7, ack_seq=8 |
740 | B: sends SYN|ACK, seq=7, ack_seq=8 |
741 | |
742 | So we are now A eating this SYN|ACK, ACK test passes. So |
743 | does sequence test, SYN is truncated, and thus we consider |
744 | it a bare ACK. |
745 | |
746 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this |
747 | bare ACK. Otherwise, we create an established connection. Both |
748 | ends (listening sockets) accept the new incoming connection and try |
749 | to talk to each other. 8-) |
750 | |
751 | Note: This case is both harmless, and rare. Possibility is about the |
752 | same as us discovering intelligent life on another plant tomorrow. |
753 | |
754 | But generally, we should (RFC lies!) to accept ACK |
755 | from SYNACK both here and in tcp_rcv_state_process(). |
756 | tcp_rcv_state_process() does not, hence, we do not too. |
757 | |
758 | Note that the case is absolutely generic: |
759 | we cannot optimize anything here without |
760 | violating protocol. All the checks must be made |
761 | before attempt to create socket. |
762 | */ |
763 | |
764 | /* RFC793 page 36: "If the connection is in any non-synchronized state ... |
765 | * and the incoming segment acknowledges something not yet |
766 | * sent (the segment carries an unacceptable ACK) ... |
767 | * a reset is sent." |
768 | * |
769 | * Invalid ACK: reset will be sent by listening socket. |
770 | * Note that the ACK validity check for a Fast Open socket is done |
771 | * elsewhere and is checked directly against the child socket rather |
772 | * than req because user data may have been sent out. |
773 | */ |
774 | if ((flg & TCP_FLAG_ACK) && !fastopen && |
775 | (TCP_SKB_CB(skb)->ack_seq != |
776 | tcp_rsk(req)->snt_isn + 1)) |
777 | return sk; |
778 | |
779 | /* Also, it would be not so bad idea to check rcv_tsecr, which |
780 | * is essentially ACK extension and too early or too late values |
781 | * should cause reset in unsynchronized states. |
782 | */ |
783 | |
784 | /* RFC793: "first check sequence number". */ |
785 | |
786 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
787 | s_win: tcp_rsk(req)->rcv_nxt, e_win: tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { |
788 | /* Out of window: send ACK and drop. */ |
789 | if (!(flg & TCP_FLAG_RST) && |
790 | !tcp_oow_rate_limited(net: sock_net(sk), skb, |
791 | mib_idx: LINUX_MIB_TCPACKSKIPPEDSYNRECV, |
792 | last_oow_ack_time: &tcp_rsk(req)->last_oow_ack_time)) |
793 | req->rsk_ops->send_ack(sk, skb, req); |
794 | if (paws_reject) |
795 | NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
796 | return NULL; |
797 | } |
798 | |
799 | /* In sequence, PAWS is OK. */ |
800 | |
801 | /* TODO: We probably should defer ts_recent change once |
802 | * we take ownership of @req. |
803 | */ |
804 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) |
805 | WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval); |
806 | |
807 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
808 | /* Truncate SYN, it is out of window starting |
809 | at tcp_rsk(req)->rcv_isn + 1. */ |
810 | flg &= ~TCP_FLAG_SYN; |
811 | } |
812 | |
813 | /* RFC793: "second check the RST bit" and |
814 | * "fourth, check the SYN bit" |
815 | */ |
816 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { |
817 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
818 | goto embryonic_reset; |
819 | } |
820 | |
821 | /* ACK sequence verified above, just make sure ACK is |
822 | * set. If ACK not set, just silently drop the packet. |
823 | * |
824 | * XXX (TFO) - if we ever allow "data after SYN", the |
825 | * following check needs to be removed. |
826 | */ |
827 | if (!(flg & TCP_FLAG_ACK)) |
828 | return NULL; |
829 | |
830 | /* For Fast Open no more processing is needed (sk is the |
831 | * child socket). |
832 | */ |
833 | if (fastopen) |
834 | return sk; |
835 | |
836 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
837 | if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && |
838 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
839 | inet_rsk(sk: req)->acked = 1; |
840 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); |
841 | return NULL; |
842 | } |
843 | |
844 | /* OK, ACK is valid, create big socket and |
845 | * feed this segment to it. It will repeat all |
846 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO |
847 | * ESTABLISHED STATE. If it will be dropped after |
848 | * socket is created, wait for troubles. |
849 | */ |
850 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
851 | req, &own_req); |
852 | if (!child) |
853 | goto listen_overflow; |
854 | |
855 | if (own_req && rsk_drop_req(req)) { |
856 | reqsk_queue_removed(queue: &inet_csk(sk: req->rsk_listener)->icsk_accept_queue, req); |
857 | inet_csk_reqsk_queue_drop_and_put(sk: req->rsk_listener, req); |
858 | return child; |
859 | } |
860 | |
861 | sock_rps_save_rxhash(sk: child, skb); |
862 | tcp_synack_rtt_meas(sk: child, req); |
863 | *req_stolen = !own_req; |
864 | return inet_csk_complete_hashdance(sk, child, req, own_req); |
865 | |
866 | listen_overflow: |
867 | if (sk != req->rsk_listener) |
868 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
869 | |
870 | if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { |
871 | inet_rsk(sk: req)->acked = 1; |
872 | return NULL; |
873 | } |
874 | |
875 | embryonic_reset: |
876 | if (!(flg & TCP_FLAG_RST)) { |
877 | /* Received a bad SYN pkt - for TFO We try not to reset |
878 | * the local connection unless it's really necessary to |
879 | * avoid becoming vulnerable to outside attack aiming at |
880 | * resetting legit local connections. |
881 | */ |
882 | req->rsk_ops->send_reset(sk, skb); |
883 | } else if (fastopen) { /* received a valid RST pkt */ |
884 | reqsk_fastopen_remove(sk, req, reset: true); |
885 | tcp_reset(sk, skb); |
886 | } |
887 | if (!fastopen) { |
888 | bool unlinked = inet_csk_reqsk_queue_drop(sk, req); |
889 | |
890 | if (unlinked) |
891 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); |
892 | *req_stolen = !unlinked; |
893 | } |
894 | return NULL; |
895 | } |
896 | EXPORT_SYMBOL(tcp_check_req); |
897 | |
898 | /* |
899 | * Queue segment on the new socket if the new socket is active, |
900 | * otherwise we just shortcircuit this and continue with |
901 | * the new socket. |
902 | * |
903 | * For the vast majority of cases child->sk_state will be TCP_SYN_RECV |
904 | * when entering. But other states are possible due to a race condition |
905 | * where after __inet_lookup_established() fails but before the listener |
906 | * locked is obtained, other packets cause the same connection to |
907 | * be created. |
908 | */ |
909 | |
910 | enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, |
911 | struct sk_buff *skb) |
912 | __releases(&((child)->sk_lock.slock)) |
913 | { |
914 | enum skb_drop_reason reason = SKB_NOT_DROPPED_YET; |
915 | int state = child->sk_state; |
916 | |
917 | /* record sk_napi_id and sk_rx_queue_mapping of child. */ |
918 | sk_mark_napi_id_set(sk: child, skb); |
919 | |
920 | tcp_segs_in(tcp_sk(child), skb); |
921 | if (!sock_owned_by_user(sk: child)) { |
922 | reason = tcp_rcv_state_process(sk: child, skb); |
923 | /* Wakeup parent, send SIGIO */ |
924 | if (state == TCP_SYN_RECV && child->sk_state != state) |
925 | parent->sk_data_ready(parent); |
926 | } else { |
927 | /* Alas, it is possible again, because we do lookup |
928 | * in main socket hash table and lock on listening |
929 | * socket does not protect us more. |
930 | */ |
931 | __sk_add_backlog(sk: child, skb); |
932 | } |
933 | |
934 | bh_unlock_sock(child); |
935 | sock_put(sk: child); |
936 | return reason; |
937 | } |
938 | EXPORT_SYMBOL(tcp_child_process); |
939 | |