1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP |
3 | * |
4 | * Copyright (c) 2017 - 2019, Intel Corporation. |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) "MPTCP: " fmt |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | #include <linux/netdevice.h> |
12 | #include <crypto/sha2.h> |
13 | #include <crypto/utils.h> |
14 | #include <net/sock.h> |
15 | #include <net/inet_common.h> |
16 | #include <net/inet_hashtables.h> |
17 | #include <net/protocol.h> |
18 | #include <net/tcp.h> |
19 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
20 | #include <net/ip6_route.h> |
21 | #include <net/transp_v6.h> |
22 | #endif |
23 | #include <net/mptcp.h> |
24 | #include <uapi/linux/mptcp.h> |
25 | #include "protocol.h" |
26 | #include "mib.h" |
27 | |
28 | #include <trace/events/mptcp.h> |
29 | #include <trace/events/sock.h> |
30 | |
31 | static void mptcp_subflow_ops_undo_override(struct sock *ssk); |
32 | |
33 | static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, |
34 | enum linux_mptcp_mib_field field) |
35 | { |
36 | MPTCP_INC_STATS(net: sock_net(sk: req_to_sk(req)), field); |
37 | } |
38 | |
39 | static void subflow_req_destructor(struct request_sock *req) |
40 | { |
41 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
42 | |
43 | pr_debug("subflow_req=%p" , subflow_req); |
44 | |
45 | if (subflow_req->msk) |
46 | sock_put(sk: (struct sock *)subflow_req->msk); |
47 | |
48 | mptcp_token_destroy_request(req); |
49 | } |
50 | |
51 | static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, |
52 | void *hmac) |
53 | { |
54 | u8 msg[8]; |
55 | |
56 | put_unaligned_be32(val: nonce1, p: &msg[0]); |
57 | put_unaligned_be32(val: nonce2, p: &msg[4]); |
58 | |
59 | mptcp_crypto_hmac_sha(key1, key2, msg, len: 8, hmac); |
60 | } |
61 | |
62 | static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) |
63 | { |
64 | return mptcp_is_fully_established(sk: (void *)msk) && |
65 | ((mptcp_pm_is_userspace(msk) && |
66 | mptcp_userspace_pm_active(msk)) || |
67 | READ_ONCE(msk->pm.accept_subflow)); |
68 | } |
69 | |
70 | /* validate received token and create truncated hmac and nonce for SYN-ACK */ |
71 | static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) |
72 | { |
73 | struct mptcp_sock *msk = subflow_req->msk; |
74 | u8 hmac[SHA256_DIGEST_SIZE]; |
75 | |
76 | get_random_bytes(buf: &subflow_req->local_nonce, len: sizeof(u32)); |
77 | |
78 | subflow_generate_hmac(key1: msk->local_key, key2: msk->remote_key, |
79 | nonce1: subflow_req->local_nonce, |
80 | nonce2: subflow_req->remote_nonce, hmac); |
81 | |
82 | subflow_req->thmac = get_unaligned_be64(p: hmac); |
83 | } |
84 | |
85 | static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) |
86 | { |
87 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
88 | struct mptcp_sock *msk; |
89 | int local_id; |
90 | |
91 | msk = mptcp_token_get_sock(net: sock_net(sk: req_to_sk(req)), token: subflow_req->token); |
92 | if (!msk) { |
93 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_JOINNOTOKEN); |
94 | return NULL; |
95 | } |
96 | |
97 | local_id = mptcp_pm_get_local_id(msk, skc: (struct sock_common *)req); |
98 | if (local_id < 0) { |
99 | sock_put(sk: (struct sock *)msk); |
100 | return NULL; |
101 | } |
102 | subflow_req->local_id = local_id; |
103 | |
104 | return msk; |
105 | } |
106 | |
107 | static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) |
108 | { |
109 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
110 | |
111 | subflow_req->mp_capable = 0; |
112 | subflow_req->mp_join = 0; |
113 | subflow_req->csum_reqd = mptcp_is_checksum_enabled(net: sock_net(sk: sk_listener)); |
114 | subflow_req->allow_join_id0 = mptcp_allow_join_id0(net: sock_net(sk: sk_listener)); |
115 | subflow_req->msk = NULL; |
116 | mptcp_token_init_request(req); |
117 | } |
118 | |
119 | static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) |
120 | { |
121 | return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; |
122 | } |
123 | |
124 | static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) |
125 | { |
126 | struct mptcp_ext *mpext = skb_ext_add(skb, id: SKB_EXT_MPTCP); |
127 | |
128 | if (mpext) { |
129 | memset(mpext, 0, sizeof(*mpext)); |
130 | mpext->reset_reason = reason; |
131 | } |
132 | } |
133 | |
134 | /* Init mptcp request socket. |
135 | * |
136 | * Returns an error code if a JOIN has failed and a TCP reset |
137 | * should be sent. |
138 | */ |
139 | static int subflow_check_req(struct request_sock *req, |
140 | const struct sock *sk_listener, |
141 | struct sk_buff *skb) |
142 | { |
143 | struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk: sk_listener); |
144 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
145 | struct mptcp_options_received mp_opt; |
146 | bool opt_mp_capable, opt_mp_join; |
147 | |
148 | pr_debug("subflow_req=%p, listener=%p" , subflow_req, listener); |
149 | |
150 | #ifdef CONFIG_TCP_MD5SIG |
151 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of |
152 | * TCP option space. |
153 | */ |
154 | if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) |
155 | return -EINVAL; |
156 | #endif |
157 | |
158 | mptcp_get_options(skb, mp_opt: &mp_opt); |
159 | |
160 | opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC); |
161 | opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ); |
162 | if (opt_mp_capable) { |
163 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_MPCAPABLEPASSIVE); |
164 | |
165 | if (opt_mp_join) |
166 | return 0; |
167 | } else if (opt_mp_join) { |
168 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_JOINSYNRX); |
169 | } |
170 | |
171 | if (opt_mp_capable && listener->request_mptcp) { |
172 | int err, retries = MPTCP_TOKEN_MAX_RETRIES; |
173 | |
174 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; |
175 | again: |
176 | do { |
177 | get_random_bytes(buf: &subflow_req->local_key, len: sizeof(subflow_req->local_key)); |
178 | } while (subflow_req->local_key == 0); |
179 | |
180 | if (unlikely(req->syncookie)) { |
181 | mptcp_crypto_key_sha(key: subflow_req->local_key, |
182 | token: &subflow_req->token, |
183 | idsn: &subflow_req->idsn); |
184 | if (mptcp_token_exists(token: subflow_req->token)) { |
185 | if (retries-- > 0) |
186 | goto again; |
187 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_TOKENFALLBACKINIT); |
188 | } else { |
189 | subflow_req->mp_capable = 1; |
190 | } |
191 | return 0; |
192 | } |
193 | |
194 | err = mptcp_token_new_request(req); |
195 | if (err == 0) |
196 | subflow_req->mp_capable = 1; |
197 | else if (retries-- > 0) |
198 | goto again; |
199 | else |
200 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_TOKENFALLBACKINIT); |
201 | |
202 | } else if (opt_mp_join && listener->request_mptcp) { |
203 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; |
204 | subflow_req->mp_join = 1; |
205 | subflow_req->backup = mp_opt.backup; |
206 | subflow_req->remote_id = mp_opt.join_id; |
207 | subflow_req->token = mp_opt.token; |
208 | subflow_req->remote_nonce = mp_opt.nonce; |
209 | subflow_req->msk = subflow_token_join_request(req); |
210 | |
211 | /* Can't fall back to TCP in this case. */ |
212 | if (!subflow_req->msk) { |
213 | subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); |
214 | return -EPERM; |
215 | } |
216 | |
217 | if (subflow_use_different_sport(msk: subflow_req->msk, sk: sk_listener)) { |
218 | pr_debug("syn inet_sport=%d %d" , |
219 | ntohs(inet_sk(sk_listener)->inet_sport), |
220 | ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); |
221 | if (!mptcp_pm_sport_in_anno_list(msk: subflow_req->msk, sk: sk_listener)) { |
222 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_MISMATCHPORTSYNRX); |
223 | return -EPERM; |
224 | } |
225 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_JOINPORTSYNRX); |
226 | } |
227 | |
228 | subflow_req_create_thmac(subflow_req); |
229 | |
230 | if (unlikely(req->syncookie)) { |
231 | if (mptcp_can_accept_new_subflow(msk: subflow_req->msk)) |
232 | subflow_init_req_cookie_join_save(subflow_req, skb); |
233 | else |
234 | return -EPERM; |
235 | } |
236 | |
237 | pr_debug("token=%u, remote_nonce=%u msk=%p" , subflow_req->token, |
238 | subflow_req->remote_nonce, subflow_req->msk); |
239 | } |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | int mptcp_subflow_init_cookie_req(struct request_sock *req, |
245 | const struct sock *sk_listener, |
246 | struct sk_buff *skb) |
247 | { |
248 | struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk: sk_listener); |
249 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
250 | struct mptcp_options_received mp_opt; |
251 | bool opt_mp_capable, opt_mp_join; |
252 | int err; |
253 | |
254 | subflow_init_req(req, sk_listener); |
255 | mptcp_get_options(skb, mp_opt: &mp_opt); |
256 | |
257 | opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC); |
258 | opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ); |
259 | if (opt_mp_capable && opt_mp_join) |
260 | return -EINVAL; |
261 | |
262 | if (opt_mp_capable && listener->request_mptcp) { |
263 | if (mp_opt.sndr_key == 0) |
264 | return -EINVAL; |
265 | |
266 | subflow_req->local_key = mp_opt.rcvr_key; |
267 | err = mptcp_token_new_request(req); |
268 | if (err) |
269 | return err; |
270 | |
271 | subflow_req->mp_capable = 1; |
272 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; |
273 | } else if (opt_mp_join && listener->request_mptcp) { |
274 | if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) |
275 | return -EINVAL; |
276 | |
277 | subflow_req->mp_join = 1; |
278 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; |
279 | } |
280 | |
281 | return 0; |
282 | } |
283 | EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); |
284 | |
285 | static struct dst_entry *subflow_v4_route_req(const struct sock *sk, |
286 | struct sk_buff *skb, |
287 | struct flowi *fl, |
288 | struct request_sock *req) |
289 | { |
290 | struct dst_entry *dst; |
291 | int err; |
292 | |
293 | tcp_rsk(req)->is_mptcp = 1; |
294 | subflow_init_req(req, sk_listener: sk); |
295 | |
296 | dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req); |
297 | if (!dst) |
298 | return NULL; |
299 | |
300 | err = subflow_check_req(req, sk_listener: sk, skb); |
301 | if (err == 0) |
302 | return dst; |
303 | |
304 | dst_release(dst); |
305 | if (!req->syncookie) |
306 | tcp_request_sock_ops.send_reset(sk, skb); |
307 | return NULL; |
308 | } |
309 | |
310 | static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, |
311 | struct tcp_fastopen_cookie *foc, |
312 | enum tcp_synack_type synack_type) |
313 | { |
314 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
315 | struct inet_request_sock *ireq = inet_rsk(sk: req); |
316 | |
317 | /* clear tstamp_ok, as needed depending on cookie */ |
318 | if (foc && foc->len > -1) |
319 | ireq->tstamp_ok = 0; |
320 | |
321 | if (synack_type == TCP_SYNACK_FASTOPEN) |
322 | mptcp_fastopen_subflow_synack_set_params(subflow, req); |
323 | } |
324 | |
325 | static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, |
326 | struct flowi *fl, |
327 | struct request_sock *req, |
328 | struct tcp_fastopen_cookie *foc, |
329 | enum tcp_synack_type synack_type, |
330 | struct sk_buff *syn_skb) |
331 | { |
332 | subflow_prep_synack(sk, req, foc, synack_type); |
333 | |
334 | return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, |
335 | synack_type, syn_skb); |
336 | } |
337 | |
338 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
339 | static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, |
340 | struct flowi *fl, |
341 | struct request_sock *req, |
342 | struct tcp_fastopen_cookie *foc, |
343 | enum tcp_synack_type synack_type, |
344 | struct sk_buff *syn_skb) |
345 | { |
346 | subflow_prep_synack(sk, req, foc, synack_type); |
347 | |
348 | return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, |
349 | synack_type, syn_skb); |
350 | } |
351 | |
352 | static struct dst_entry *subflow_v6_route_req(const struct sock *sk, |
353 | struct sk_buff *skb, |
354 | struct flowi *fl, |
355 | struct request_sock *req) |
356 | { |
357 | struct dst_entry *dst; |
358 | int err; |
359 | |
360 | tcp_rsk(req)->is_mptcp = 1; |
361 | subflow_init_req(req, sk_listener: sk); |
362 | |
363 | dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req); |
364 | if (!dst) |
365 | return NULL; |
366 | |
367 | err = subflow_check_req(req, sk_listener: sk, skb); |
368 | if (err == 0) |
369 | return dst; |
370 | |
371 | dst_release(dst); |
372 | if (!req->syncookie) |
373 | tcp6_request_sock_ops.send_reset(sk, skb); |
374 | return NULL; |
375 | } |
376 | #endif |
377 | |
378 | /* validate received truncated hmac and create hmac for third ACK */ |
379 | static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) |
380 | { |
381 | u8 hmac[SHA256_DIGEST_SIZE]; |
382 | u64 thmac; |
383 | |
384 | subflow_generate_hmac(key1: subflow->remote_key, key2: subflow->local_key, |
385 | nonce1: subflow->remote_nonce, nonce2: subflow->local_nonce, |
386 | hmac); |
387 | |
388 | thmac = get_unaligned_be64(p: hmac); |
389 | pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n" , |
390 | subflow, subflow->token, thmac, subflow->thmac); |
391 | |
392 | return thmac == subflow->thmac; |
393 | } |
394 | |
395 | void mptcp_subflow_reset(struct sock *ssk) |
396 | { |
397 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
398 | struct sock *sk = subflow->conn; |
399 | |
400 | /* mptcp_mp_fail_no_response() can reach here on an already closed |
401 | * socket |
402 | */ |
403 | if (ssk->sk_state == TCP_CLOSE) |
404 | return; |
405 | |
406 | /* must hold: tcp_done() could drop last reference on parent */ |
407 | sock_hold(sk); |
408 | |
409 | tcp_send_active_reset(sk: ssk, GFP_ATOMIC); |
410 | tcp_done(sk: ssk); |
411 | if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, addr: &mptcp_sk(sk)->flags)) |
412 | mptcp_schedule_work(sk); |
413 | |
414 | sock_put(sk); |
415 | } |
416 | |
417 | static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) |
418 | { |
419 | return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; |
420 | } |
421 | |
422 | void __mptcp_set_connected(struct sock *sk) |
423 | { |
424 | __mptcp_propagate_sndbuf(sk, mptcp_sk(sk)->first); |
425 | if (sk->sk_state == TCP_SYN_SENT) { |
426 | inet_sk_state_store(sk, newstate: TCP_ESTABLISHED); |
427 | sk->sk_state_change(sk); |
428 | } |
429 | } |
430 | |
431 | static void mptcp_set_connected(struct sock *sk) |
432 | { |
433 | mptcp_data_lock(sk); |
434 | if (!sock_owned_by_user(sk)) |
435 | __mptcp_set_connected(sk); |
436 | else |
437 | __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags); |
438 | mptcp_data_unlock(sk); |
439 | } |
440 | |
441 | static void subflow_set_remote_key(struct mptcp_sock *msk, |
442 | struct mptcp_subflow_context *subflow, |
443 | const struct mptcp_options_received *mp_opt) |
444 | { |
445 | /* active MPC subflow will reach here multiple times: |
446 | * at subflow_finish_connect() time and at 4th ack time |
447 | */ |
448 | if (subflow->remote_key_valid) |
449 | return; |
450 | |
451 | subflow->remote_key_valid = 1; |
452 | subflow->remote_key = mp_opt->sndr_key; |
453 | mptcp_crypto_key_sha(key: subflow->remote_key, NULL, idsn: &subflow->iasn); |
454 | subflow->iasn++; |
455 | |
456 | WRITE_ONCE(msk->remote_key, subflow->remote_key); |
457 | WRITE_ONCE(msk->ack_seq, subflow->iasn); |
458 | WRITE_ONCE(msk->can_ack, true); |
459 | atomic64_set(v: &msk->rcv_wnd_sent, i: subflow->iasn); |
460 | } |
461 | |
462 | static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) |
463 | { |
464 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
465 | struct mptcp_options_received mp_opt; |
466 | struct sock *parent = subflow->conn; |
467 | struct mptcp_sock *msk; |
468 | |
469 | subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); |
470 | |
471 | /* be sure no special action on any packet other than syn-ack */ |
472 | if (subflow->conn_finished) |
473 | return; |
474 | |
475 | msk = mptcp_sk(parent); |
476 | subflow->rel_write_seq = 1; |
477 | subflow->conn_finished = 1; |
478 | subflow->ssn_offset = TCP_SKB_CB(skb)->seq; |
479 | pr_debug("subflow=%p synack seq=%x" , subflow, subflow->ssn_offset); |
480 | |
481 | mptcp_get_options(skb, mp_opt: &mp_opt); |
482 | if (subflow->request_mptcp) { |
483 | if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) { |
484 | MPTCP_INC_STATS(net: sock_net(sk), |
485 | field: MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); |
486 | mptcp_do_fallback(ssk: sk); |
487 | pr_fallback(msk); |
488 | goto fallback; |
489 | } |
490 | |
491 | if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) |
492 | WRITE_ONCE(msk->csum_enabled, true); |
493 | if (mp_opt.deny_join_id0) |
494 | WRITE_ONCE(msk->pm.remote_deny_join_id0, true); |
495 | subflow->mp_capable = 1; |
496 | subflow_set_remote_key(msk, subflow, mp_opt: &mp_opt); |
497 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_MPCAPABLEACTIVEACK); |
498 | mptcp_finish_connect(sk); |
499 | mptcp_set_connected(sk: parent); |
500 | } else if (subflow->request_join) { |
501 | u8 hmac[SHA256_DIGEST_SIZE]; |
502 | |
503 | if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) { |
504 | subflow->reset_reason = MPTCP_RST_EMPTCP; |
505 | goto do_reset; |
506 | } |
507 | |
508 | subflow->backup = mp_opt.backup; |
509 | subflow->thmac = mp_opt.thmac; |
510 | subflow->remote_nonce = mp_opt.nonce; |
511 | subflow->remote_id = mp_opt.join_id; |
512 | pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d" , |
513 | subflow, subflow->thmac, subflow->remote_nonce, |
514 | subflow->backup); |
515 | |
516 | if (!subflow_thmac_valid(subflow)) { |
517 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_JOINACKMAC); |
518 | subflow->reset_reason = MPTCP_RST_EMPTCP; |
519 | goto do_reset; |
520 | } |
521 | |
522 | if (!mptcp_finish_join(sk)) |
523 | goto do_reset; |
524 | |
525 | subflow_generate_hmac(key1: subflow->local_key, key2: subflow->remote_key, |
526 | nonce1: subflow->local_nonce, |
527 | nonce2: subflow->remote_nonce, |
528 | hmac); |
529 | memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); |
530 | |
531 | subflow->mp_join = 1; |
532 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_JOINSYNACKRX); |
533 | |
534 | if (subflow_use_different_dport(msk, sk)) { |
535 | pr_debug("synack inet_dport=%d %d" , |
536 | ntohs(inet_sk(sk)->inet_dport), |
537 | ntohs(inet_sk(parent)->inet_dport)); |
538 | MPTCP_INC_STATS(net: sock_net(sk), field: MPTCP_MIB_JOINPORTSYNACKRX); |
539 | } |
540 | } else if (mptcp_check_fallback(sk)) { |
541 | fallback: |
542 | mptcp_rcv_space_init(msk, ssk: sk); |
543 | mptcp_set_connected(sk: parent); |
544 | } |
545 | return; |
546 | |
547 | do_reset: |
548 | subflow->reset_transient = 0; |
549 | mptcp_subflow_reset(ssk: sk); |
550 | } |
551 | |
552 | static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) |
553 | { |
554 | subflow->local_id = local_id; |
555 | subflow->local_id_valid = 1; |
556 | } |
557 | |
558 | static int subflow_chk_local_id(struct sock *sk) |
559 | { |
560 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
561 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
562 | int err; |
563 | |
564 | if (likely(subflow->local_id_valid)) |
565 | return 0; |
566 | |
567 | err = mptcp_pm_get_local_id(msk, skc: (struct sock_common *)sk); |
568 | if (err < 0) |
569 | return err; |
570 | |
571 | subflow_set_local_id(subflow, local_id: err); |
572 | return 0; |
573 | } |
574 | |
575 | static int (struct sock *sk) |
576 | { |
577 | int err = subflow_chk_local_id(sk); |
578 | |
579 | if (unlikely(err < 0)) |
580 | return err; |
581 | |
582 | return inet_sk_rebuild_header(sk); |
583 | } |
584 | |
585 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
586 | static int (struct sock *sk) |
587 | { |
588 | int err = subflow_chk_local_id(sk); |
589 | |
590 | if (unlikely(err < 0)) |
591 | return err; |
592 | |
593 | return inet6_sk_rebuild_header(sk); |
594 | } |
595 | #endif |
596 | |
597 | static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; |
598 | static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; |
599 | |
600 | static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
601 | { |
602 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
603 | |
604 | pr_debug("subflow=%p" , subflow); |
605 | |
606 | /* Never answer to SYNs sent to broadcast or multicast */ |
607 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
608 | goto drop; |
609 | |
610 | return tcp_conn_request(rsk_ops: &mptcp_subflow_v4_request_sock_ops, |
611 | af_ops: &subflow_request_sock_ipv4_ops, |
612 | sk, skb); |
613 | drop: |
614 | tcp_listendrop(sk); |
615 | return 0; |
616 | } |
617 | |
618 | static void subflow_v4_req_destructor(struct request_sock *req) |
619 | { |
620 | subflow_req_destructor(req); |
621 | tcp_request_sock_ops.destructor(req); |
622 | } |
623 | |
624 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
625 | static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; |
626 | static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; |
627 | static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; |
628 | static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; |
629 | static struct proto tcpv6_prot_override __ro_after_init; |
630 | |
631 | static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
632 | { |
633 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
634 | |
635 | pr_debug("subflow=%p" , subflow); |
636 | |
637 | if (skb->protocol == htons(ETH_P_IP)) |
638 | return subflow_v4_conn_request(sk, skb); |
639 | |
640 | if (!ipv6_unicast_destination(skb)) |
641 | goto drop; |
642 | |
643 | if (ipv6_addr_v4mapped(a: &ipv6_hdr(skb)->saddr)) { |
644 | __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); |
645 | return 0; |
646 | } |
647 | |
648 | return tcp_conn_request(rsk_ops: &mptcp_subflow_v6_request_sock_ops, |
649 | af_ops: &subflow_request_sock_ipv6_ops, sk, skb); |
650 | |
651 | drop: |
652 | tcp_listendrop(sk); |
653 | return 0; /* don't send reset */ |
654 | } |
655 | |
656 | static void subflow_v6_req_destructor(struct request_sock *req) |
657 | { |
658 | subflow_req_destructor(req); |
659 | tcp6_request_sock_ops.destructor(req); |
660 | } |
661 | #endif |
662 | |
663 | struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, |
664 | struct sock *sk_listener, |
665 | bool attach_listener) |
666 | { |
667 | if (ops->family == AF_INET) |
668 | ops = &mptcp_subflow_v4_request_sock_ops; |
669 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
670 | else if (ops->family == AF_INET6) |
671 | ops = &mptcp_subflow_v6_request_sock_ops; |
672 | #endif |
673 | |
674 | return inet_reqsk_alloc(ops, sk_listener, attach_listener); |
675 | } |
676 | EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); |
677 | |
678 | /* validate hmac received in third ACK */ |
679 | static bool subflow_hmac_valid(const struct request_sock *req, |
680 | const struct mptcp_options_received *mp_opt) |
681 | { |
682 | const struct mptcp_subflow_request_sock *subflow_req; |
683 | u8 hmac[SHA256_DIGEST_SIZE]; |
684 | struct mptcp_sock *msk; |
685 | |
686 | subflow_req = mptcp_subflow_rsk(rsk: req); |
687 | msk = subflow_req->msk; |
688 | if (!msk) |
689 | return false; |
690 | |
691 | subflow_generate_hmac(key1: msk->remote_key, key2: msk->local_key, |
692 | nonce1: subflow_req->remote_nonce, |
693 | nonce2: subflow_req->local_nonce, hmac); |
694 | |
695 | return !crypto_memneq(a: hmac, b: mp_opt->hmac, MPTCPOPT_HMAC_LEN); |
696 | } |
697 | |
698 | static void subflow_ulp_fallback(struct sock *sk, |
699 | struct mptcp_subflow_context *old_ctx) |
700 | { |
701 | struct inet_connection_sock *icsk = inet_csk(sk); |
702 | |
703 | mptcp_subflow_tcp_fallback(sk, ctx: old_ctx); |
704 | icsk->icsk_ulp_ops = NULL; |
705 | rcu_assign_pointer(icsk->icsk_ulp_data, NULL); |
706 | tcp_sk(sk)->is_mptcp = 0; |
707 | |
708 | mptcp_subflow_ops_undo_override(ssk: sk); |
709 | } |
710 | |
711 | void mptcp_subflow_drop_ctx(struct sock *ssk) |
712 | { |
713 | struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk: ssk); |
714 | |
715 | if (!ctx) |
716 | return; |
717 | |
718 | list_del(entry: &mptcp_subflow_ctx(sk: ssk)->node); |
719 | if (inet_csk(sk: ssk)->icsk_ulp_ops) { |
720 | subflow_ulp_fallback(sk: ssk, old_ctx: ctx); |
721 | if (ctx->conn) |
722 | sock_put(sk: ctx->conn); |
723 | } |
724 | |
725 | kfree_rcu(ctx, rcu); |
726 | } |
727 | |
728 | void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, |
729 | const struct mptcp_options_received *mp_opt) |
730 | { |
731 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
732 | |
733 | subflow_set_remote_key(msk, subflow, mp_opt); |
734 | subflow->fully_established = 1; |
735 | WRITE_ONCE(msk->fully_established, true); |
736 | |
737 | if (subflow->is_mptfo) |
738 | mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); |
739 | } |
740 | |
741 | static struct sock *subflow_syn_recv_sock(const struct sock *sk, |
742 | struct sk_buff *skb, |
743 | struct request_sock *req, |
744 | struct dst_entry *dst, |
745 | struct request_sock *req_unhash, |
746 | bool *own_req) |
747 | { |
748 | struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); |
749 | struct mptcp_subflow_request_sock *subflow_req; |
750 | struct mptcp_options_received mp_opt; |
751 | bool fallback, fallback_is_fatal; |
752 | struct mptcp_sock *owner; |
753 | struct sock *child; |
754 | |
755 | pr_debug("listener=%p, req=%p, conn=%p" , listener, req, listener->conn); |
756 | |
757 | /* After child creation we must look for MPC even when options |
758 | * are not parsed |
759 | */ |
760 | mp_opt.suboptions = 0; |
761 | |
762 | /* hopefully temporary handling for MP_JOIN+syncookie */ |
763 | subflow_req = mptcp_subflow_rsk(rsk: req); |
764 | fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; |
765 | fallback = !tcp_rsk(req)->is_mptcp; |
766 | if (fallback) |
767 | goto create_child; |
768 | |
769 | /* if the sk is MP_CAPABLE, we try to fetch the client key */ |
770 | if (subflow_req->mp_capable) { |
771 | /* we can receive and accept an in-window, out-of-order pkt, |
772 | * which may not carry the MP_CAPABLE opt even on mptcp enabled |
773 | * paths: always try to extract the peer key, and fallback |
774 | * for packets missing it. |
775 | * Even OoO DSS packets coming legitly after dropped or |
776 | * reordered MPC will cause fallback, but we don't have other |
777 | * options. |
778 | */ |
779 | mptcp_get_options(skb, mp_opt: &mp_opt); |
780 | if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) |
781 | fallback = true; |
782 | |
783 | } else if (subflow_req->mp_join) { |
784 | mptcp_get_options(skb, mp_opt: &mp_opt); |
785 | if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) || |
786 | !subflow_hmac_valid(req, mp_opt: &mp_opt) || |
787 | !mptcp_can_accept_new_subflow(msk: subflow_req->msk)) { |
788 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_JOINACKMAC); |
789 | fallback = true; |
790 | } |
791 | } |
792 | |
793 | create_child: |
794 | child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, |
795 | req_unhash, own_req); |
796 | |
797 | if (child && *own_req) { |
798 | struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk: child); |
799 | |
800 | tcp_rsk(req)->drop_req = false; |
801 | |
802 | /* we need to fallback on ctx allocation failure and on pre-reqs |
803 | * checking above. In the latter scenario we additionally need |
804 | * to reset the context to non MPTCP status. |
805 | */ |
806 | if (!ctx || fallback) { |
807 | if (fallback_is_fatal) { |
808 | subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); |
809 | goto dispose_child; |
810 | } |
811 | goto fallback; |
812 | } |
813 | |
814 | /* ssk inherits options of listener sk */ |
815 | ctx->setsockopt_seq = listener->setsockopt_seq; |
816 | |
817 | if (ctx->mp_capable) { |
818 | ctx->conn = mptcp_sk_clone_init(sk: listener->conn, mp_opt: &mp_opt, ssk: child, req); |
819 | if (!ctx->conn) |
820 | goto fallback; |
821 | |
822 | ctx->subflow_id = 1; |
823 | owner = mptcp_sk(ctx->conn); |
824 | mptcp_pm_new_connection(msk: owner, ssk: child, server_side: 1); |
825 | |
826 | /* with OoO packets we can reach here without ingress |
827 | * mpc option |
828 | */ |
829 | if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { |
830 | mptcp_subflow_fully_established(subflow: ctx, mp_opt: &mp_opt); |
831 | mptcp_pm_fully_established(msk: owner, ssk: child); |
832 | ctx->pm_notified = 1; |
833 | } |
834 | } else if (ctx->mp_join) { |
835 | owner = subflow_req->msk; |
836 | if (!owner) { |
837 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); |
838 | goto dispose_child; |
839 | } |
840 | |
841 | /* move the msk reference ownership to the subflow */ |
842 | subflow_req->msk = NULL; |
843 | ctx->conn = (struct sock *)owner; |
844 | |
845 | if (subflow_use_different_sport(msk: owner, sk)) { |
846 | pr_debug("ack inet_sport=%d %d" , |
847 | ntohs(inet_sk(sk)->inet_sport), |
848 | ntohs(inet_sk((struct sock *)owner)->inet_sport)); |
849 | if (!mptcp_pm_sport_in_anno_list(msk: owner, sk)) { |
850 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_MISMATCHPORTACKRX); |
851 | goto dispose_child; |
852 | } |
853 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_JOINPORTACKRX); |
854 | } |
855 | |
856 | if (!mptcp_finish_join(sk: child)) |
857 | goto dispose_child; |
858 | |
859 | SUBFLOW_REQ_INC_STATS(req, field: MPTCP_MIB_JOINACKRX); |
860 | tcp_rsk(req)->drop_req = true; |
861 | } |
862 | } |
863 | |
864 | /* check for expected invariant - should never trigger, just help |
865 | * catching eariler subtle bugs |
866 | */ |
867 | WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && |
868 | (!mptcp_subflow_ctx(child) || |
869 | !mptcp_subflow_ctx(child)->conn)); |
870 | return child; |
871 | |
872 | dispose_child: |
873 | mptcp_subflow_drop_ctx(ssk: child); |
874 | tcp_rsk(req)->drop_req = true; |
875 | inet_csk_prepare_for_destroy_sock(sk: child); |
876 | tcp_done(sk: child); |
877 | req->rsk_ops->send_reset(sk, skb); |
878 | |
879 | /* The last child reference will be released by the caller */ |
880 | return child; |
881 | |
882 | fallback: |
883 | mptcp_subflow_drop_ctx(ssk: child); |
884 | return child; |
885 | } |
886 | |
887 | static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; |
888 | static struct proto tcp_prot_override __ro_after_init; |
889 | |
890 | enum mapping_status { |
891 | MAPPING_OK, |
892 | MAPPING_INVALID, |
893 | MAPPING_EMPTY, |
894 | MAPPING_DATA_FIN, |
895 | MAPPING_DUMMY, |
896 | MAPPING_BAD_CSUM |
897 | }; |
898 | |
899 | static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) |
900 | { |
901 | pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d" , |
902 | ssn, subflow->map_subflow_seq, subflow->map_data_len); |
903 | } |
904 | |
905 | static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) |
906 | { |
907 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
908 | unsigned int skb_consumed; |
909 | |
910 | skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; |
911 | if (WARN_ON_ONCE(skb_consumed >= skb->len)) |
912 | return true; |
913 | |
914 | return skb->len - skb_consumed <= subflow->map_data_len - |
915 | mptcp_subflow_get_map_offset(subflow); |
916 | } |
917 | |
918 | static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) |
919 | { |
920 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
921 | u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; |
922 | |
923 | if (unlikely(before(ssn, subflow->map_subflow_seq))) { |
924 | /* Mapping covers data later in the subflow stream, |
925 | * currently unsupported. |
926 | */ |
927 | dbg_bad_map(subflow, ssn); |
928 | return false; |
929 | } |
930 | if (unlikely(!before(ssn, subflow->map_subflow_seq + |
931 | subflow->map_data_len))) { |
932 | /* Mapping does covers past subflow data, invalid */ |
933 | dbg_bad_map(subflow, ssn); |
934 | return false; |
935 | } |
936 | return true; |
937 | } |
938 | |
939 | static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, |
940 | bool csum_reqd) |
941 | { |
942 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
943 | u32 offset, seq, delta; |
944 | __sum16 csum; |
945 | int len; |
946 | |
947 | if (!csum_reqd) |
948 | return MAPPING_OK; |
949 | |
950 | /* mapping already validated on previous traversal */ |
951 | if (subflow->map_csum_len == subflow->map_data_len) |
952 | return MAPPING_OK; |
953 | |
954 | /* traverse the receive queue, ensuring it contains a full |
955 | * DSS mapping and accumulating the related csum. |
956 | * Preserve the accoumlate csum across multiple calls, to compute |
957 | * the csum only once |
958 | */ |
959 | delta = subflow->map_data_len - subflow->map_csum_len; |
960 | for (;;) { |
961 | seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; |
962 | offset = seq - TCP_SKB_CB(skb)->seq; |
963 | |
964 | /* if the current skb has not been accounted yet, csum its contents |
965 | * up to the amount covered by the current DSS |
966 | */ |
967 | if (offset < skb->len) { |
968 | __wsum csum; |
969 | |
970 | len = min(skb->len - offset, delta); |
971 | csum = skb_checksum(skb, offset, len, csum: 0); |
972 | subflow->map_data_csum = csum_block_add(csum: subflow->map_data_csum, csum2: csum, |
973 | offset: subflow->map_csum_len); |
974 | |
975 | delta -= len; |
976 | subflow->map_csum_len += len; |
977 | } |
978 | if (delta == 0) |
979 | break; |
980 | |
981 | if (skb_queue_is_last(list: &ssk->sk_receive_queue, skb)) { |
982 | /* if this subflow is closed, the partial mapping |
983 | * will be never completed; flush the pending skbs, so |
984 | * that subflow_sched_work_if_closed() can kick in |
985 | */ |
986 | if (unlikely(ssk->sk_state == TCP_CLOSE)) |
987 | while ((skb = skb_peek(list_: &ssk->sk_receive_queue))) |
988 | sk_eat_skb(sk: ssk, skb); |
989 | |
990 | /* not enough data to validate the csum */ |
991 | return MAPPING_EMPTY; |
992 | } |
993 | |
994 | /* the DSS mapping for next skbs will be validated later, |
995 | * when a get_mapping_status call will process such skb |
996 | */ |
997 | skb = skb->next; |
998 | } |
999 | |
1000 | /* note that 'map_data_len' accounts only for the carried data, does |
1001 | * not include the eventual seq increment due to the data fin, |
1002 | * while the pseudo header requires the original DSS data len, |
1003 | * including that |
1004 | */ |
1005 | csum = __mptcp_make_csum(data_seq: subflow->map_seq, |
1006 | subflow_seq: subflow->map_subflow_seq, |
1007 | data_len: subflow->map_data_len + subflow->map_data_fin, |
1008 | sum: subflow->map_data_csum); |
1009 | if (unlikely(csum)) { |
1010 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_DATACSUMERR); |
1011 | return MAPPING_BAD_CSUM; |
1012 | } |
1013 | |
1014 | subflow->valid_csum_seen = 1; |
1015 | return MAPPING_OK; |
1016 | } |
1017 | |
1018 | static enum mapping_status get_mapping_status(struct sock *ssk, |
1019 | struct mptcp_sock *msk) |
1020 | { |
1021 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
1022 | bool csum_reqd = READ_ONCE(msk->csum_enabled); |
1023 | struct mptcp_ext *mpext; |
1024 | struct sk_buff *skb; |
1025 | u16 data_len; |
1026 | u64 map_seq; |
1027 | |
1028 | skb = skb_peek(list_: &ssk->sk_receive_queue); |
1029 | if (!skb) |
1030 | return MAPPING_EMPTY; |
1031 | |
1032 | if (mptcp_check_fallback(sk: ssk)) |
1033 | return MAPPING_DUMMY; |
1034 | |
1035 | mpext = mptcp_get_ext(skb); |
1036 | if (!mpext || !mpext->use_map) { |
1037 | if (!subflow->map_valid && !skb->len) { |
1038 | /* the TCP stack deliver 0 len FIN pkt to the receive |
1039 | * queue, that is the only 0len pkts ever expected here, |
1040 | * and we can admit no mapping only for 0 len pkts |
1041 | */ |
1042 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) |
1043 | WARN_ONCE(1, "0len seq %d:%d flags %x" , |
1044 | TCP_SKB_CB(skb)->seq, |
1045 | TCP_SKB_CB(skb)->end_seq, |
1046 | TCP_SKB_CB(skb)->tcp_flags); |
1047 | sk_eat_skb(sk: ssk, skb); |
1048 | return MAPPING_EMPTY; |
1049 | } |
1050 | |
1051 | if (!subflow->map_valid) |
1052 | return MAPPING_INVALID; |
1053 | |
1054 | goto validate_seq; |
1055 | } |
1056 | |
1057 | trace_get_mapping_status(mpext); |
1058 | |
1059 | data_len = mpext->data_len; |
1060 | if (data_len == 0) { |
1061 | pr_debug("infinite mapping received" ); |
1062 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_INFINITEMAPRX); |
1063 | subflow->map_data_len = 0; |
1064 | return MAPPING_INVALID; |
1065 | } |
1066 | |
1067 | if (mpext->data_fin == 1) { |
1068 | if (data_len == 1) { |
1069 | bool updated = mptcp_update_rcv_data_fin(msk, data_fin_seq: mpext->data_seq, |
1070 | use_64bit: mpext->dsn64); |
1071 | pr_debug("DATA_FIN with no payload seq=%llu" , mpext->data_seq); |
1072 | if (subflow->map_valid) { |
1073 | /* A DATA_FIN might arrive in a DSS |
1074 | * option before the previous mapping |
1075 | * has been fully consumed. Continue |
1076 | * handling the existing mapping. |
1077 | */ |
1078 | skb_ext_del(skb, id: SKB_EXT_MPTCP); |
1079 | return MAPPING_OK; |
1080 | } else { |
1081 | if (updated) |
1082 | mptcp_schedule_work(sk: (struct sock *)msk); |
1083 | |
1084 | return MAPPING_DATA_FIN; |
1085 | } |
1086 | } else { |
1087 | u64 data_fin_seq = mpext->data_seq + data_len - 1; |
1088 | |
1089 | /* If mpext->data_seq is a 32-bit value, data_fin_seq |
1090 | * must also be limited to 32 bits. |
1091 | */ |
1092 | if (!mpext->dsn64) |
1093 | data_fin_seq &= GENMASK_ULL(31, 0); |
1094 | |
1095 | mptcp_update_rcv_data_fin(msk, data_fin_seq, use_64bit: mpext->dsn64); |
1096 | pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d" , |
1097 | data_fin_seq, mpext->dsn64); |
1098 | } |
1099 | |
1100 | /* Adjust for DATA_FIN using 1 byte of sequence space */ |
1101 | data_len--; |
1102 | } |
1103 | |
1104 | map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), cur_seq: mpext->data_seq, use_64bit: mpext->dsn64); |
1105 | WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); |
1106 | |
1107 | if (subflow->map_valid) { |
1108 | /* Allow replacing only with an identical map */ |
1109 | if (subflow->map_seq == map_seq && |
1110 | subflow->map_subflow_seq == mpext->subflow_seq && |
1111 | subflow->map_data_len == data_len && |
1112 | subflow->map_csum_reqd == mpext->csum_reqd) { |
1113 | skb_ext_del(skb, id: SKB_EXT_MPTCP); |
1114 | goto validate_csum; |
1115 | } |
1116 | |
1117 | /* If this skb data are fully covered by the current mapping, |
1118 | * the new map would need caching, which is not supported |
1119 | */ |
1120 | if (skb_is_fully_mapped(ssk, skb)) { |
1121 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_DSSNOMATCH); |
1122 | return MAPPING_INVALID; |
1123 | } |
1124 | |
1125 | /* will validate the next map after consuming the current one */ |
1126 | goto validate_csum; |
1127 | } |
1128 | |
1129 | subflow->map_seq = map_seq; |
1130 | subflow->map_subflow_seq = mpext->subflow_seq; |
1131 | subflow->map_data_len = data_len; |
1132 | subflow->map_valid = 1; |
1133 | subflow->map_data_fin = mpext->data_fin; |
1134 | subflow->mpc_map = mpext->mpc_map; |
1135 | subflow->map_csum_reqd = mpext->csum_reqd; |
1136 | subflow->map_csum_len = 0; |
1137 | subflow->map_data_csum = csum_unfold(n: mpext->csum); |
1138 | |
1139 | /* Cfr RFC 8684 Section 3.3.0 */ |
1140 | if (unlikely(subflow->map_csum_reqd != csum_reqd)) |
1141 | return MAPPING_INVALID; |
1142 | |
1143 | pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u" , |
1144 | subflow->map_seq, subflow->map_subflow_seq, |
1145 | subflow->map_data_len, subflow->map_csum_reqd, |
1146 | subflow->map_data_csum); |
1147 | |
1148 | validate_seq: |
1149 | /* we revalidate valid mapping on new skb, because we must ensure |
1150 | * the current skb is completely covered by the available mapping |
1151 | */ |
1152 | if (!validate_mapping(ssk, skb)) { |
1153 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_DSSTCPMISMATCH); |
1154 | return MAPPING_INVALID; |
1155 | } |
1156 | |
1157 | skb_ext_del(skb, id: SKB_EXT_MPTCP); |
1158 | |
1159 | validate_csum: |
1160 | return validate_data_csum(ssk, skb, csum_reqd); |
1161 | } |
1162 | |
1163 | static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, |
1164 | u64 limit) |
1165 | { |
1166 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
1167 | bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; |
1168 | u32 incr; |
1169 | |
1170 | incr = limit >= skb->len ? skb->len + fin : limit; |
1171 | |
1172 | pr_debug("discarding=%d len=%d seq=%d" , incr, skb->len, |
1173 | subflow->map_subflow_seq); |
1174 | MPTCP_INC_STATS(net: sock_net(sk: ssk), field: MPTCP_MIB_DUPDATA); |
1175 | tcp_sk(ssk)->copied_seq += incr; |
1176 | if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) |
1177 | sk_eat_skb(sk: ssk, skb); |
1178 | if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) |
1179 | subflow->map_valid = 0; |
1180 | } |
1181 | |
1182 | /* sched mptcp worker to remove the subflow if no more data is pending */ |
1183 | static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) |
1184 | { |
1185 | if (likely(ssk->sk_state != TCP_CLOSE)) |
1186 | return; |
1187 | |
1188 | if (skb_queue_empty(list: &ssk->sk_receive_queue) && |
1189 | !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, addr: &msk->flags)) |
1190 | mptcp_schedule_work(sk: (struct sock *)msk); |
1191 | } |
1192 | |
1193 | static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) |
1194 | { |
1195 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
1196 | |
1197 | if (subflow->mp_join) |
1198 | return false; |
1199 | else if (READ_ONCE(msk->csum_enabled)) |
1200 | return !subflow->valid_csum_seen; |
1201 | else |
1202 | return !subflow->fully_established; |
1203 | } |
1204 | |
1205 | static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) |
1206 | { |
1207 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
1208 | unsigned long fail_tout; |
1209 | |
1210 | /* greceful failure can happen only on the MPC subflow */ |
1211 | if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) |
1212 | return; |
1213 | |
1214 | /* since the close timeout take precedence on the fail one, |
1215 | * no need to start the latter when the first is already set |
1216 | */ |
1217 | if (sock_flag(sk: (struct sock *)msk, flag: SOCK_DEAD)) |
1218 | return; |
1219 | |
1220 | /* we don't need extreme accuracy here, use a zero fail_tout as special |
1221 | * value meaning no fail timeout at all; |
1222 | */ |
1223 | fail_tout = jiffies + TCP_RTO_MAX; |
1224 | if (!fail_tout) |
1225 | fail_tout = 1; |
1226 | WRITE_ONCE(subflow->fail_tout, fail_tout); |
1227 | tcp_send_ack(sk: ssk); |
1228 | |
1229 | mptcp_reset_tout_timer(msk, fail_tout: subflow->fail_tout); |
1230 | } |
1231 | |
1232 | static bool subflow_check_data_avail(struct sock *ssk) |
1233 | { |
1234 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
1235 | enum mapping_status status; |
1236 | struct mptcp_sock *msk; |
1237 | struct sk_buff *skb; |
1238 | |
1239 | if (!skb_peek(list_: &ssk->sk_receive_queue)) |
1240 | WRITE_ONCE(subflow->data_avail, false); |
1241 | if (subflow->data_avail) |
1242 | return true; |
1243 | |
1244 | msk = mptcp_sk(subflow->conn); |
1245 | for (;;) { |
1246 | u64 ack_seq; |
1247 | u64 old_ack; |
1248 | |
1249 | status = get_mapping_status(ssk, msk); |
1250 | trace_subflow_check_data_avail(status, skb: skb_peek(list_: &ssk->sk_receive_queue)); |
1251 | if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || |
1252 | status == MAPPING_BAD_CSUM)) |
1253 | goto fallback; |
1254 | |
1255 | if (status != MAPPING_OK) |
1256 | goto no_data; |
1257 | |
1258 | skb = skb_peek(list_: &ssk->sk_receive_queue); |
1259 | if (WARN_ON_ONCE(!skb)) |
1260 | goto no_data; |
1261 | |
1262 | if (unlikely(!READ_ONCE(msk->can_ack))) |
1263 | goto fallback; |
1264 | |
1265 | old_ack = READ_ONCE(msk->ack_seq); |
1266 | ack_seq = mptcp_subflow_get_mapped_dsn(subflow); |
1267 | pr_debug("msk ack_seq=%llx subflow ack_seq=%llx" , old_ack, |
1268 | ack_seq); |
1269 | if (unlikely(before64(ack_seq, old_ack))) { |
1270 | mptcp_subflow_discard_data(ssk, skb, limit: old_ack - ack_seq); |
1271 | continue; |
1272 | } |
1273 | |
1274 | WRITE_ONCE(subflow->data_avail, true); |
1275 | break; |
1276 | } |
1277 | return true; |
1278 | |
1279 | no_data: |
1280 | subflow_sched_work_if_closed(msk, ssk); |
1281 | return false; |
1282 | |
1283 | fallback: |
1284 | if (!__mptcp_check_fallback(msk)) { |
1285 | /* RFC 8684 section 3.7. */ |
1286 | if (status == MAPPING_BAD_CSUM && |
1287 | (subflow->mp_join || subflow->valid_csum_seen)) { |
1288 | subflow->send_mp_fail = 1; |
1289 | |
1290 | if (!READ_ONCE(msk->allow_infinite_fallback)) { |
1291 | subflow->reset_transient = 0; |
1292 | subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; |
1293 | goto reset; |
1294 | } |
1295 | mptcp_subflow_fail(msk, ssk); |
1296 | WRITE_ONCE(subflow->data_avail, true); |
1297 | return true; |
1298 | } |
1299 | |
1300 | if (!subflow_can_fallback(subflow) && subflow->map_data_len) { |
1301 | /* fatal protocol error, close the socket. |
1302 | * subflow_error_report() will introduce the appropriate barriers |
1303 | */ |
1304 | subflow->reset_transient = 0; |
1305 | subflow->reset_reason = MPTCP_RST_EMPTCP; |
1306 | |
1307 | reset: |
1308 | WRITE_ONCE(ssk->sk_err, EBADMSG); |
1309 | tcp_set_state(sk: ssk, state: TCP_CLOSE); |
1310 | while ((skb = skb_peek(list_: &ssk->sk_receive_queue))) |
1311 | sk_eat_skb(sk: ssk, skb); |
1312 | tcp_send_active_reset(sk: ssk, GFP_ATOMIC); |
1313 | WRITE_ONCE(subflow->data_avail, false); |
1314 | return false; |
1315 | } |
1316 | |
1317 | mptcp_do_fallback(ssk); |
1318 | } |
1319 | |
1320 | skb = skb_peek(list_: &ssk->sk_receive_queue); |
1321 | subflow->map_valid = 1; |
1322 | subflow->map_seq = READ_ONCE(msk->ack_seq); |
1323 | subflow->map_data_len = skb->len; |
1324 | subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; |
1325 | WRITE_ONCE(subflow->data_avail, true); |
1326 | return true; |
1327 | } |
1328 | |
1329 | bool mptcp_subflow_data_available(struct sock *sk) |
1330 | { |
1331 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
1332 | |
1333 | /* check if current mapping is still valid */ |
1334 | if (subflow->map_valid && |
1335 | mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { |
1336 | subflow->map_valid = 0; |
1337 | WRITE_ONCE(subflow->data_avail, false); |
1338 | |
1339 | pr_debug("Done with mapping: seq=%u data_len=%u" , |
1340 | subflow->map_subflow_seq, |
1341 | subflow->map_data_len); |
1342 | } |
1343 | |
1344 | return subflow_check_data_avail(ssk: sk); |
1345 | } |
1346 | |
1347 | /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, |
1348 | * not the ssk one. |
1349 | * |
1350 | * In mptcp, rwin is about the mptcp-level connection data. |
1351 | * |
1352 | * Data that is still on the ssk rx queue can thus be ignored, |
1353 | * as far as mptcp peer is concerned that data is still inflight. |
1354 | * DSS ACK is updated when skb is moved to the mptcp rx queue. |
1355 | */ |
1356 | void mptcp_space(const struct sock *ssk, int *space, int *full_space) |
1357 | { |
1358 | const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
1359 | const struct sock *sk = subflow->conn; |
1360 | |
1361 | *space = __mptcp_space(sk); |
1362 | *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); |
1363 | } |
1364 | |
1365 | static void subflow_error_report(struct sock *ssk) |
1366 | { |
1367 | struct sock *sk = mptcp_subflow_ctx(sk: ssk)->conn; |
1368 | |
1369 | /* bail early if this is a no-op, so that we avoid introducing a |
1370 | * problematic lockdep dependency between TCP accept queue lock |
1371 | * and msk socket spinlock |
1372 | */ |
1373 | if (!sk->sk_socket) |
1374 | return; |
1375 | |
1376 | mptcp_data_lock(sk); |
1377 | if (!sock_owned_by_user(sk)) |
1378 | __mptcp_error_report(sk); |
1379 | else |
1380 | __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); |
1381 | mptcp_data_unlock(sk); |
1382 | } |
1383 | |
1384 | static void subflow_data_ready(struct sock *sk) |
1385 | { |
1386 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
1387 | u16 state = 1 << inet_sk_state_load(sk); |
1388 | struct sock *parent = subflow->conn; |
1389 | struct mptcp_sock *msk; |
1390 | |
1391 | trace_sk_data_ready(sk); |
1392 | |
1393 | msk = mptcp_sk(parent); |
1394 | if (state & TCPF_LISTEN) { |
1395 | /* MPJ subflow are removed from accept queue before reaching here, |
1396 | * avoid stray wakeups |
1397 | */ |
1398 | if (reqsk_queue_empty(queue: &inet_csk(sk)->icsk_accept_queue)) |
1399 | return; |
1400 | |
1401 | parent->sk_data_ready(parent); |
1402 | return; |
1403 | } |
1404 | |
1405 | WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && |
1406 | !subflow->mp_join && !(state & TCPF_CLOSE)); |
1407 | |
1408 | if (mptcp_subflow_data_available(sk)) { |
1409 | mptcp_data_ready(sk: parent, ssk: sk); |
1410 | |
1411 | /* subflow-level lowat test are not relevant. |
1412 | * respect the msk-level threshold eventually mandating an immediate ack |
1413 | */ |
1414 | if (mptcp_data_avail(msk) < parent->sk_rcvlowat && |
1415 | (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) |
1416 | inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; |
1417 | } else if (unlikely(sk->sk_err)) { |
1418 | subflow_error_report(ssk: sk); |
1419 | } |
1420 | } |
1421 | |
1422 | static void subflow_write_space(struct sock *ssk) |
1423 | { |
1424 | struct sock *sk = mptcp_subflow_ctx(sk: ssk)->conn; |
1425 | |
1426 | mptcp_propagate_sndbuf(sk, ssk); |
1427 | mptcp_write_space(sk); |
1428 | } |
1429 | |
1430 | static const struct inet_connection_sock_af_ops * |
1431 | subflow_default_af_ops(struct sock *sk) |
1432 | { |
1433 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1434 | if (sk->sk_family == AF_INET6) |
1435 | return &subflow_v6_specific; |
1436 | #endif |
1437 | return &subflow_specific; |
1438 | } |
1439 | |
1440 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1441 | void mptcpv6_handle_mapped(struct sock *sk, bool mapped) |
1442 | { |
1443 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
1444 | struct inet_connection_sock *icsk = inet_csk(sk); |
1445 | const struct inet_connection_sock_af_ops *target; |
1446 | |
1447 | target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); |
1448 | |
1449 | pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d" , |
1450 | subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); |
1451 | |
1452 | if (likely(icsk->icsk_af_ops == target)) |
1453 | return; |
1454 | |
1455 | subflow->icsk_af_ops = icsk->icsk_af_ops; |
1456 | icsk->icsk_af_ops = target; |
1457 | } |
1458 | #endif |
1459 | |
1460 | void mptcp_info2sockaddr(const struct mptcp_addr_info *info, |
1461 | struct sockaddr_storage *addr, |
1462 | unsigned short family) |
1463 | { |
1464 | memset(addr, 0, sizeof(*addr)); |
1465 | addr->ss_family = family; |
1466 | if (addr->ss_family == AF_INET) { |
1467 | struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; |
1468 | |
1469 | if (info->family == AF_INET) |
1470 | in_addr->sin_addr = info->addr; |
1471 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1472 | else if (ipv6_addr_v4mapped(a: &info->addr6)) |
1473 | in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; |
1474 | #endif |
1475 | in_addr->sin_port = info->port; |
1476 | } |
1477 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1478 | else if (addr->ss_family == AF_INET6) { |
1479 | struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; |
1480 | |
1481 | if (info->family == AF_INET) |
1482 | ipv6_addr_set_v4mapped(addr: info->addr.s_addr, |
1483 | v4mapped: &in6_addr->sin6_addr); |
1484 | else |
1485 | in6_addr->sin6_addr = info->addr6; |
1486 | in6_addr->sin6_port = info->port; |
1487 | } |
1488 | #endif |
1489 | } |
1490 | |
1491 | int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, |
1492 | const struct mptcp_addr_info *remote) |
1493 | { |
1494 | struct mptcp_sock *msk = mptcp_sk(sk); |
1495 | struct mptcp_subflow_context *subflow; |
1496 | struct sockaddr_storage addr; |
1497 | int remote_id = remote->id; |
1498 | int local_id = loc->id; |
1499 | int err = -ENOTCONN; |
1500 | struct socket *sf; |
1501 | struct sock *ssk; |
1502 | u32 remote_token; |
1503 | int addrlen; |
1504 | int ifindex; |
1505 | u8 flags; |
1506 | |
1507 | if (!mptcp_is_fully_established(sk)) |
1508 | goto err_out; |
1509 | |
1510 | err = mptcp_subflow_create_socket(sk, family: loc->family, new_sock: &sf); |
1511 | if (err) |
1512 | goto err_out; |
1513 | |
1514 | ssk = sf->sk; |
1515 | subflow = mptcp_subflow_ctx(sk: ssk); |
1516 | do { |
1517 | get_random_bytes(buf: &subflow->local_nonce, len: sizeof(u32)); |
1518 | } while (!subflow->local_nonce); |
1519 | |
1520 | if (local_id) |
1521 | subflow_set_local_id(subflow, local_id); |
1522 | |
1523 | mptcp_pm_get_flags_and_ifindex_by_id(msk, id: local_id, |
1524 | flags: &flags, ifindex: &ifindex); |
1525 | subflow->remote_key_valid = 1; |
1526 | subflow->remote_key = msk->remote_key; |
1527 | subflow->local_key = msk->local_key; |
1528 | subflow->token = msk->token; |
1529 | mptcp_info2sockaddr(info: loc, addr: &addr, family: ssk->sk_family); |
1530 | |
1531 | addrlen = sizeof(struct sockaddr_in); |
1532 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1533 | if (addr.ss_family == AF_INET6) |
1534 | addrlen = sizeof(struct sockaddr_in6); |
1535 | #endif |
1536 | ssk->sk_bound_dev_if = ifindex; |
1537 | err = kernel_bind(sock: sf, addr: (struct sockaddr *)&addr, addrlen); |
1538 | if (err) |
1539 | goto failed; |
1540 | |
1541 | mptcp_crypto_key_sha(key: subflow->remote_key, token: &remote_token, NULL); |
1542 | pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d" , msk, |
1543 | remote_token, local_id, remote_id); |
1544 | subflow->remote_token = remote_token; |
1545 | subflow->remote_id = remote_id; |
1546 | subflow->request_join = 1; |
1547 | subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); |
1548 | subflow->subflow_id = msk->subflow_id++; |
1549 | mptcp_info2sockaddr(info: remote, addr: &addr, family: ssk->sk_family); |
1550 | |
1551 | sock_hold(sk: ssk); |
1552 | list_add_tail(new: &subflow->node, head: &msk->conn_list); |
1553 | err = kernel_connect(sock: sf, addr: (struct sockaddr *)&addr, addrlen, O_NONBLOCK); |
1554 | if (err && err != -EINPROGRESS) |
1555 | goto failed_unlink; |
1556 | |
1557 | /* discard the subflow socket */ |
1558 | mptcp_sock_graft(sk: ssk, parent: sk->sk_socket); |
1559 | iput(SOCK_INODE(socket: sf)); |
1560 | WRITE_ONCE(msk->allow_infinite_fallback, false); |
1561 | mptcp_stop_tout_timer(sk); |
1562 | return 0; |
1563 | |
1564 | failed_unlink: |
1565 | list_del(entry: &subflow->node); |
1566 | sock_put(sk: mptcp_subflow_tcp_sock(subflow)); |
1567 | |
1568 | failed: |
1569 | subflow->disposable = 1; |
1570 | sock_release(sock: sf); |
1571 | |
1572 | err_out: |
1573 | /* we account subflows before the creation, and this failures will not |
1574 | * be caught by sk_state_change() |
1575 | */ |
1576 | mptcp_pm_close_subflow(msk); |
1577 | return err; |
1578 | } |
1579 | |
1580 | static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) |
1581 | { |
1582 | #ifdef CONFIG_SOCK_CGROUP_DATA |
1583 | struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, |
1584 | *child_skcd = &child->sk_cgrp_data; |
1585 | |
1586 | /* only the additional subflows created by kworkers have to be modified */ |
1587 | if (cgroup_id(cgrp: sock_cgroup_ptr(skcd: parent_skcd)) != |
1588 | cgroup_id(cgrp: sock_cgroup_ptr(skcd: child_skcd))) { |
1589 | #ifdef CONFIG_MEMCG |
1590 | struct mem_cgroup *memcg = parent->sk_memcg; |
1591 | |
1592 | mem_cgroup_sk_free(sk: child); |
1593 | if (memcg && css_tryget(css: &memcg->css)) |
1594 | child->sk_memcg = memcg; |
1595 | #endif /* CONFIG_MEMCG */ |
1596 | |
1597 | cgroup_sk_free(skcd: child_skcd); |
1598 | *child_skcd = *parent_skcd; |
1599 | cgroup_sk_clone(skcd: child_skcd); |
1600 | } |
1601 | #endif /* CONFIG_SOCK_CGROUP_DATA */ |
1602 | } |
1603 | |
1604 | static void mptcp_subflow_ops_override(struct sock *ssk) |
1605 | { |
1606 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1607 | if (ssk->sk_prot == &tcpv6_prot) |
1608 | ssk->sk_prot = &tcpv6_prot_override; |
1609 | else |
1610 | #endif |
1611 | ssk->sk_prot = &tcp_prot_override; |
1612 | } |
1613 | |
1614 | static void mptcp_subflow_ops_undo_override(struct sock *ssk) |
1615 | { |
1616 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1617 | if (ssk->sk_prot == &tcpv6_prot_override) |
1618 | ssk->sk_prot = &tcpv6_prot; |
1619 | else |
1620 | #endif |
1621 | ssk->sk_prot = &tcp_prot; |
1622 | } |
1623 | |
1624 | int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, |
1625 | struct socket **new_sock) |
1626 | { |
1627 | struct mptcp_subflow_context *subflow; |
1628 | struct net *net = sock_net(sk); |
1629 | struct socket *sf; |
1630 | int err; |
1631 | |
1632 | /* un-accepted server sockets can reach here - on bad configuration |
1633 | * bail early to avoid greater trouble later |
1634 | */ |
1635 | if (unlikely(!sk->sk_socket)) |
1636 | return -EINVAL; |
1637 | |
1638 | err = sock_create_kern(net, family, type: SOCK_STREAM, IPPROTO_TCP, res: &sf); |
1639 | if (err) |
1640 | return err; |
1641 | |
1642 | lock_sock_nested(sk: sf->sk, SINGLE_DEPTH_NESTING); |
1643 | |
1644 | err = security_mptcp_add_subflow(sk, ssk: sf->sk); |
1645 | if (err) |
1646 | goto err_free; |
1647 | |
1648 | /* the newly created socket has to be in the same cgroup as its parent */ |
1649 | mptcp_attach_cgroup(parent: sk, child: sf->sk); |
1650 | |
1651 | /* kernel sockets do not by default acquire net ref, but TCP timer |
1652 | * needs it. |
1653 | * Update ns_tracker to current stack trace and refcounted tracker. |
1654 | */ |
1655 | __netns_tracker_free(net, tracker: &sf->sk->ns_tracker, refcounted: false); |
1656 | sf->sk->sk_net_refcnt = 1; |
1657 | get_net_track(net, tracker: &sf->sk->ns_tracker, GFP_KERNEL); |
1658 | sock_inuse_add(net, val: 1); |
1659 | err = tcp_set_ulp(sk: sf->sk, name: "mptcp" ); |
1660 | if (err) |
1661 | goto err_free; |
1662 | |
1663 | mptcp_sockopt_sync_locked(mptcp_sk(sk), ssk: sf->sk); |
1664 | release_sock(sk: sf->sk); |
1665 | |
1666 | /* the newly created socket really belongs to the owning MPTCP master |
1667 | * socket, even if for additional subflows the allocation is performed |
1668 | * by a kernel workqueue. Adjust inode references, so that the |
1669 | * procfs/diag interfaces really show this one belonging to the correct |
1670 | * user. |
1671 | */ |
1672 | SOCK_INODE(socket: sf)->i_ino = SOCK_INODE(socket: sk->sk_socket)->i_ino; |
1673 | SOCK_INODE(socket: sf)->i_uid = SOCK_INODE(socket: sk->sk_socket)->i_uid; |
1674 | SOCK_INODE(socket: sf)->i_gid = SOCK_INODE(socket: sk->sk_socket)->i_gid; |
1675 | |
1676 | subflow = mptcp_subflow_ctx(sk: sf->sk); |
1677 | pr_debug("subflow=%p" , subflow); |
1678 | |
1679 | *new_sock = sf; |
1680 | sock_hold(sk); |
1681 | subflow->conn = sk; |
1682 | mptcp_subflow_ops_override(ssk: sf->sk); |
1683 | |
1684 | return 0; |
1685 | |
1686 | err_free: |
1687 | release_sock(sk: sf->sk); |
1688 | sock_release(sock: sf); |
1689 | return err; |
1690 | } |
1691 | |
1692 | static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, |
1693 | gfp_t priority) |
1694 | { |
1695 | struct inet_connection_sock *icsk = inet_csk(sk); |
1696 | struct mptcp_subflow_context *ctx; |
1697 | |
1698 | ctx = kzalloc(size: sizeof(*ctx), flags: priority); |
1699 | if (!ctx) |
1700 | return NULL; |
1701 | |
1702 | rcu_assign_pointer(icsk->icsk_ulp_data, ctx); |
1703 | INIT_LIST_HEAD(list: &ctx->node); |
1704 | INIT_LIST_HEAD(list: &ctx->delegated_node); |
1705 | |
1706 | pr_debug("subflow=%p" , ctx); |
1707 | |
1708 | ctx->tcp_sock = sk; |
1709 | |
1710 | return ctx; |
1711 | } |
1712 | |
1713 | static void __subflow_state_change(struct sock *sk) |
1714 | { |
1715 | struct socket_wq *wq; |
1716 | |
1717 | rcu_read_lock(); |
1718 | wq = rcu_dereference(sk->sk_wq); |
1719 | if (skwq_has_sleeper(wq)) |
1720 | wake_up_interruptible_all(&wq->wait); |
1721 | rcu_read_unlock(); |
1722 | } |
1723 | |
1724 | static bool subflow_is_done(const struct sock *sk) |
1725 | { |
1726 | return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; |
1727 | } |
1728 | |
1729 | static void subflow_state_change(struct sock *sk) |
1730 | { |
1731 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
1732 | struct sock *parent = subflow->conn; |
1733 | struct mptcp_sock *msk; |
1734 | |
1735 | __subflow_state_change(sk); |
1736 | |
1737 | msk = mptcp_sk(parent); |
1738 | if (subflow_simultaneous_connect(sk)) { |
1739 | mptcp_do_fallback(ssk: sk); |
1740 | mptcp_rcv_space_init(msk, ssk: sk); |
1741 | pr_fallback(msk); |
1742 | subflow->conn_finished = 1; |
1743 | mptcp_set_connected(sk: parent); |
1744 | } |
1745 | |
1746 | /* as recvmsg() does not acquire the subflow socket for ssk selection |
1747 | * a fin packet carrying a DSS can be unnoticed if we don't trigger |
1748 | * the data available machinery here. |
1749 | */ |
1750 | if (mptcp_subflow_data_available(sk)) |
1751 | mptcp_data_ready(sk: parent, ssk: sk); |
1752 | else if (unlikely(sk->sk_err)) |
1753 | subflow_error_report(ssk: sk); |
1754 | |
1755 | subflow_sched_work_if_closed(mptcp_sk(parent), ssk: sk); |
1756 | |
1757 | /* when the fallback subflow closes the rx side, trigger a 'dummy' |
1758 | * ingress data fin, so that the msk state will follow along |
1759 | */ |
1760 | if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk && |
1761 | mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), use_64bit: true)) |
1762 | mptcp_schedule_work(sk: parent); |
1763 | } |
1764 | |
1765 | void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) |
1766 | { |
1767 | struct request_sock_queue *queue = &inet_csk(sk: listener_ssk)->icsk_accept_queue; |
1768 | struct request_sock *req, *head, *tail; |
1769 | struct mptcp_subflow_context *subflow; |
1770 | struct sock *sk, *ssk; |
1771 | |
1772 | /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. |
1773 | * Splice the req list, so that accept() can not reach the pending ssk after |
1774 | * the listener socket is released below. |
1775 | */ |
1776 | spin_lock_bh(lock: &queue->rskq_lock); |
1777 | head = queue->rskq_accept_head; |
1778 | tail = queue->rskq_accept_tail; |
1779 | queue->rskq_accept_head = NULL; |
1780 | queue->rskq_accept_tail = NULL; |
1781 | spin_unlock_bh(lock: &queue->rskq_lock); |
1782 | |
1783 | if (!head) |
1784 | return; |
1785 | |
1786 | /* can't acquire the msk socket lock under the subflow one, |
1787 | * or will cause ABBA deadlock |
1788 | */ |
1789 | release_sock(sk: listener_ssk); |
1790 | |
1791 | for (req = head; req; req = req->dl_next) { |
1792 | ssk = req->sk; |
1793 | if (!sk_is_mptcp(sk: ssk)) |
1794 | continue; |
1795 | |
1796 | subflow = mptcp_subflow_ctx(sk: ssk); |
1797 | if (!subflow || !subflow->conn) |
1798 | continue; |
1799 | |
1800 | sk = subflow->conn; |
1801 | sock_hold(sk); |
1802 | |
1803 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
1804 | __mptcp_unaccepted_force_close(sk); |
1805 | release_sock(sk); |
1806 | |
1807 | /* lockdep will report a false positive ABBA deadlock |
1808 | * between cancel_work_sync and the listener socket. |
1809 | * The involved locks belong to different sockets WRT |
1810 | * the existing AB chain. |
1811 | * Using a per socket key is problematic as key |
1812 | * deregistration requires process context and must be |
1813 | * performed at socket disposal time, in atomic |
1814 | * context. |
1815 | * Just tell lockdep to consider the listener socket |
1816 | * released here. |
1817 | */ |
1818 | mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); |
1819 | mptcp_cancel_work(sk); |
1820 | mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); |
1821 | |
1822 | sock_put(sk); |
1823 | } |
1824 | |
1825 | /* we are still under the listener msk socket lock */ |
1826 | lock_sock_nested(sk: listener_ssk, SINGLE_DEPTH_NESTING); |
1827 | |
1828 | /* restore the listener queue, to let the TCP code clean it up */ |
1829 | spin_lock_bh(lock: &queue->rskq_lock); |
1830 | WARN_ON_ONCE(queue->rskq_accept_head); |
1831 | queue->rskq_accept_head = head; |
1832 | queue->rskq_accept_tail = tail; |
1833 | spin_unlock_bh(lock: &queue->rskq_lock); |
1834 | } |
1835 | |
1836 | static int subflow_ulp_init(struct sock *sk) |
1837 | { |
1838 | struct inet_connection_sock *icsk = inet_csk(sk); |
1839 | struct mptcp_subflow_context *ctx; |
1840 | struct tcp_sock *tp = tcp_sk(sk); |
1841 | int err = 0; |
1842 | |
1843 | /* disallow attaching ULP to a socket unless it has been |
1844 | * created with sock_create_kern() |
1845 | */ |
1846 | if (!sk->sk_kern_sock) { |
1847 | err = -EOPNOTSUPP; |
1848 | goto out; |
1849 | } |
1850 | |
1851 | ctx = subflow_create_ctx(sk, GFP_KERNEL); |
1852 | if (!ctx) { |
1853 | err = -ENOMEM; |
1854 | goto out; |
1855 | } |
1856 | |
1857 | pr_debug("subflow=%p, family=%d" , ctx, sk->sk_family); |
1858 | |
1859 | tp->is_mptcp = 1; |
1860 | ctx->icsk_af_ops = icsk->icsk_af_ops; |
1861 | icsk->icsk_af_ops = subflow_default_af_ops(sk); |
1862 | ctx->tcp_state_change = sk->sk_state_change; |
1863 | ctx->tcp_error_report = sk->sk_error_report; |
1864 | |
1865 | WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); |
1866 | WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); |
1867 | |
1868 | sk->sk_data_ready = subflow_data_ready; |
1869 | sk->sk_write_space = subflow_write_space; |
1870 | sk->sk_state_change = subflow_state_change; |
1871 | sk->sk_error_report = subflow_error_report; |
1872 | out: |
1873 | return err; |
1874 | } |
1875 | |
1876 | static void subflow_ulp_release(struct sock *ssk) |
1877 | { |
1878 | struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk: ssk); |
1879 | bool release = true; |
1880 | struct sock *sk; |
1881 | |
1882 | if (!ctx) |
1883 | return; |
1884 | |
1885 | sk = ctx->conn; |
1886 | if (sk) { |
1887 | /* if the msk has been orphaned, keep the ctx |
1888 | * alive, will be freed by __mptcp_close_ssk(), |
1889 | * when the subflow is still unaccepted |
1890 | */ |
1891 | release = ctx->disposable || list_empty(head: &ctx->node); |
1892 | |
1893 | /* inet_child_forget() does not call sk_state_change(), |
1894 | * explicitly trigger the socket close machinery |
1895 | */ |
1896 | if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, |
1897 | addr: &mptcp_sk(sk)->flags)) |
1898 | mptcp_schedule_work(sk); |
1899 | sock_put(sk); |
1900 | } |
1901 | |
1902 | mptcp_subflow_ops_undo_override(ssk); |
1903 | if (release) |
1904 | kfree_rcu(ctx, rcu); |
1905 | } |
1906 | |
1907 | static void subflow_ulp_clone(const struct request_sock *req, |
1908 | struct sock *newsk, |
1909 | const gfp_t priority) |
1910 | { |
1911 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(rsk: req); |
1912 | struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(sk: newsk); |
1913 | struct mptcp_subflow_context *new_ctx; |
1914 | |
1915 | if (!tcp_rsk(req)->is_mptcp || |
1916 | (!subflow_req->mp_capable && !subflow_req->mp_join)) { |
1917 | subflow_ulp_fallback(sk: newsk, old_ctx); |
1918 | return; |
1919 | } |
1920 | |
1921 | new_ctx = subflow_create_ctx(sk: newsk, priority); |
1922 | if (!new_ctx) { |
1923 | subflow_ulp_fallback(sk: newsk, old_ctx); |
1924 | return; |
1925 | } |
1926 | |
1927 | new_ctx->conn_finished = 1; |
1928 | new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; |
1929 | new_ctx->tcp_state_change = old_ctx->tcp_state_change; |
1930 | new_ctx->tcp_error_report = old_ctx->tcp_error_report; |
1931 | new_ctx->rel_write_seq = 1; |
1932 | new_ctx->tcp_sock = newsk; |
1933 | |
1934 | if (subflow_req->mp_capable) { |
1935 | /* see comments in subflow_syn_recv_sock(), MPTCP connection |
1936 | * is fully established only after we receive the remote key |
1937 | */ |
1938 | new_ctx->mp_capable = 1; |
1939 | new_ctx->local_key = subflow_req->local_key; |
1940 | new_ctx->token = subflow_req->token; |
1941 | new_ctx->ssn_offset = subflow_req->ssn_offset; |
1942 | new_ctx->idsn = subflow_req->idsn; |
1943 | |
1944 | /* this is the first subflow, id is always 0 */ |
1945 | new_ctx->local_id_valid = 1; |
1946 | } else if (subflow_req->mp_join) { |
1947 | new_ctx->ssn_offset = subflow_req->ssn_offset; |
1948 | new_ctx->mp_join = 1; |
1949 | new_ctx->fully_established = 1; |
1950 | new_ctx->remote_key_valid = 1; |
1951 | new_ctx->backup = subflow_req->backup; |
1952 | new_ctx->remote_id = subflow_req->remote_id; |
1953 | new_ctx->token = subflow_req->token; |
1954 | new_ctx->thmac = subflow_req->thmac; |
1955 | |
1956 | /* the subflow req id is valid, fetched via subflow_check_req() |
1957 | * and subflow_token_join_request() |
1958 | */ |
1959 | subflow_set_local_id(subflow: new_ctx, local_id: subflow_req->local_id); |
1960 | } |
1961 | } |
1962 | |
1963 | static void tcp_release_cb_override(struct sock *ssk) |
1964 | { |
1965 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
1966 | long status; |
1967 | |
1968 | /* process and clear all the pending actions, but leave the subflow into |
1969 | * the napi queue. To respect locking, only the same CPU that originated |
1970 | * the action can touch the list. mptcp_napi_poll will take care of it. |
1971 | */ |
1972 | status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); |
1973 | if (status) |
1974 | mptcp_subflow_process_delegated(ssk, actions: status); |
1975 | |
1976 | tcp_release_cb(sk: ssk); |
1977 | } |
1978 | |
1979 | static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { |
1980 | .name = "mptcp" , |
1981 | .owner = THIS_MODULE, |
1982 | .init = subflow_ulp_init, |
1983 | .release = subflow_ulp_release, |
1984 | .clone = subflow_ulp_clone, |
1985 | }; |
1986 | |
1987 | static int subflow_ops_init(struct request_sock_ops *subflow_ops) |
1988 | { |
1989 | subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); |
1990 | |
1991 | subflow_ops->slab = kmem_cache_create(name: subflow_ops->slab_name, |
1992 | size: subflow_ops->obj_size, align: 0, |
1993 | SLAB_ACCOUNT | |
1994 | SLAB_TYPESAFE_BY_RCU, |
1995 | NULL); |
1996 | if (!subflow_ops->slab) |
1997 | return -ENOMEM; |
1998 | |
1999 | return 0; |
2000 | } |
2001 | |
2002 | void __init mptcp_subflow_init(void) |
2003 | { |
2004 | mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; |
2005 | mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4" ; |
2006 | mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; |
2007 | |
2008 | if (subflow_ops_init(subflow_ops: &mptcp_subflow_v4_request_sock_ops) != 0) |
2009 | panic(fmt: "MPTCP: failed to init subflow v4 request sock ops\n" ); |
2010 | |
2011 | subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; |
2012 | subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; |
2013 | subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; |
2014 | |
2015 | subflow_specific = ipv4_specific; |
2016 | subflow_specific.conn_request = subflow_v4_conn_request; |
2017 | subflow_specific.syn_recv_sock = subflow_syn_recv_sock; |
2018 | subflow_specific.sk_rx_dst_set = subflow_finish_connect; |
2019 | subflow_specific.rebuild_header = subflow_rebuild_header; |
2020 | |
2021 | tcp_prot_override = tcp_prot; |
2022 | tcp_prot_override.release_cb = tcp_release_cb_override; |
2023 | |
2024 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
2025 | /* In struct mptcp_subflow_request_sock, we assume the TCP request sock |
2026 | * structures for v4 and v6 have the same size. It should not changed in |
2027 | * the future but better to make sure to be warned if it is no longer |
2028 | * the case. |
2029 | */ |
2030 | BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); |
2031 | |
2032 | mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; |
2033 | mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6" ; |
2034 | mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; |
2035 | |
2036 | if (subflow_ops_init(subflow_ops: &mptcp_subflow_v6_request_sock_ops) != 0) |
2037 | panic(fmt: "MPTCP: failed to init subflow v6 request sock ops\n" ); |
2038 | |
2039 | subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; |
2040 | subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; |
2041 | subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; |
2042 | |
2043 | subflow_v6_specific = ipv6_specific; |
2044 | subflow_v6_specific.conn_request = subflow_v6_conn_request; |
2045 | subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; |
2046 | subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; |
2047 | subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; |
2048 | |
2049 | subflow_v6m_specific = subflow_v6_specific; |
2050 | subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; |
2051 | subflow_v6m_specific.send_check = ipv4_specific.send_check; |
2052 | subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; |
2053 | subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; |
2054 | subflow_v6m_specific.rebuild_header = subflow_rebuild_header; |
2055 | |
2056 | tcpv6_prot_override = tcpv6_prot; |
2057 | tcpv6_prot_override.release_cb = tcp_release_cb_override; |
2058 | #endif |
2059 | |
2060 | mptcp_diag_subflow_init(ops: &subflow_ulp_ops); |
2061 | |
2062 | if (tcp_register_ulp(type: &subflow_ulp_ops) != 0) |
2063 | panic(fmt: "MPTCP: failed to register subflows to ULP\n" ); |
2064 | } |
2065 | |