1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP |
3 | * |
4 | * Copyright (c) 2019, Intel Corporation. |
5 | */ |
6 | #define pr_fmt(fmt) "MPTCP: " fmt |
7 | |
8 | #include <linux/kernel.h> |
9 | #include <net/tcp.h> |
10 | #include <net/mptcp.h> |
11 | #include "protocol.h" |
12 | |
13 | #include "mib.h" |
14 | |
15 | /* path manager command handlers */ |
16 | |
17 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, |
18 | const struct mptcp_addr_info *addr, |
19 | bool echo) |
20 | { |
21 | u8 add_addr = READ_ONCE(msk->pm.addr_signal); |
22 | |
23 | pr_debug("msk=%p, local_id=%d, echo=%d" , msk, addr->id, echo); |
24 | |
25 | lockdep_assert_held(&msk->pm.lock); |
26 | |
27 | if (add_addr & |
28 | (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { |
29 | MPTCP_INC_STATS(net: sock_net(sk: (struct sock *)msk), |
30 | field: echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP); |
31 | return -EINVAL; |
32 | } |
33 | |
34 | if (echo) { |
35 | msk->pm.remote = *addr; |
36 | add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); |
37 | } else { |
38 | msk->pm.local = *addr; |
39 | add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); |
40 | } |
41 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
42 | return 0; |
43 | } |
44 | |
45 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
46 | { |
47 | u8 rm_addr = READ_ONCE(msk->pm.addr_signal); |
48 | |
49 | pr_debug("msk=%p, rm_list_nr=%d" , msk, rm_list->nr); |
50 | |
51 | if (rm_addr) { |
52 | MPTCP_ADD_STATS(net: sock_net(sk: (struct sock *)msk), |
53 | field: MPTCP_MIB_RMADDRTXDROP, val: rm_list->nr); |
54 | return -EINVAL; |
55 | } |
56 | |
57 | msk->pm.rm_list_tx = *rm_list; |
58 | rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); |
59 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
60 | mptcp_pm_nl_addr_send_ack(msk); |
61 | return 0; |
62 | } |
63 | |
64 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
65 | { |
66 | pr_debug("msk=%p, rm_list_nr=%d" , msk, rm_list->nr); |
67 | |
68 | spin_lock_bh(lock: &msk->pm.lock); |
69 | mptcp_pm_nl_rm_subflow_received(msk, rm_list); |
70 | spin_unlock_bh(lock: &msk->pm.lock); |
71 | return 0; |
72 | } |
73 | |
74 | /* path manager event handlers */ |
75 | |
76 | void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) |
77 | { |
78 | struct mptcp_pm_data *pm = &msk->pm; |
79 | |
80 | pr_debug("msk=%p, token=%u side=%d" , msk, msk->token, server_side); |
81 | |
82 | WRITE_ONCE(pm->server_side, server_side); |
83 | mptcp_event(type: MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); |
84 | } |
85 | |
86 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) |
87 | { |
88 | struct mptcp_pm_data *pm = &msk->pm; |
89 | unsigned int subflows_max; |
90 | int ret = 0; |
91 | |
92 | if (mptcp_pm_is_userspace(msk)) { |
93 | if (mptcp_userspace_pm_active(msk)) { |
94 | spin_lock_bh(lock: &pm->lock); |
95 | pm->subflows++; |
96 | spin_unlock_bh(lock: &pm->lock); |
97 | return true; |
98 | } |
99 | return false; |
100 | } |
101 | |
102 | subflows_max = mptcp_pm_get_subflows_max(msk); |
103 | |
104 | pr_debug("msk=%p subflows=%d max=%d allow=%d" , msk, pm->subflows, |
105 | subflows_max, READ_ONCE(pm->accept_subflow)); |
106 | |
107 | /* try to avoid acquiring the lock below */ |
108 | if (!READ_ONCE(pm->accept_subflow)) |
109 | return false; |
110 | |
111 | spin_lock_bh(lock: &pm->lock); |
112 | if (READ_ONCE(pm->accept_subflow)) { |
113 | ret = pm->subflows < subflows_max; |
114 | if (ret && ++pm->subflows == subflows_max) |
115 | WRITE_ONCE(pm->accept_subflow, false); |
116 | } |
117 | spin_unlock_bh(lock: &pm->lock); |
118 | |
119 | return ret; |
120 | } |
121 | |
122 | /* return true if the new status bit is currently cleared, that is, this event |
123 | * can be server, eventually by an already scheduled work |
124 | */ |
125 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, |
126 | enum mptcp_pm_status new_status) |
127 | { |
128 | pr_debug("msk=%p status=%x new=%lx" , msk, msk->pm.status, |
129 | BIT(new_status)); |
130 | if (msk->pm.status & BIT(new_status)) |
131 | return false; |
132 | |
133 | msk->pm.status |= BIT(new_status); |
134 | mptcp_schedule_work(sk: (struct sock *)msk); |
135 | return true; |
136 | } |
137 | |
138 | void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) |
139 | { |
140 | struct mptcp_pm_data *pm = &msk->pm; |
141 | bool announce = false; |
142 | |
143 | pr_debug("msk=%p" , msk); |
144 | |
145 | spin_lock_bh(lock: &pm->lock); |
146 | |
147 | /* mptcp_pm_fully_established() can be invoked by multiple |
148 | * racing paths - accept() and check_fully_established() |
149 | * be sure to serve this event only once. |
150 | */ |
151 | if (READ_ONCE(pm->work_pending) && |
152 | !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) |
153 | mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_ESTABLISHED); |
154 | |
155 | if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) |
156 | announce = true; |
157 | |
158 | msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); |
159 | spin_unlock_bh(lock: &pm->lock); |
160 | |
161 | if (announce) |
162 | mptcp_event(type: MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC); |
163 | } |
164 | |
165 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) |
166 | { |
167 | pr_debug("msk=%p" , msk); |
168 | } |
169 | |
170 | void mptcp_pm_subflow_established(struct mptcp_sock *msk) |
171 | { |
172 | struct mptcp_pm_data *pm = &msk->pm; |
173 | |
174 | pr_debug("msk=%p" , msk); |
175 | |
176 | if (!READ_ONCE(pm->work_pending)) |
177 | return; |
178 | |
179 | spin_lock_bh(lock: &pm->lock); |
180 | |
181 | if (READ_ONCE(pm->work_pending)) |
182 | mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_SUBFLOW_ESTABLISHED); |
183 | |
184 | spin_unlock_bh(lock: &pm->lock); |
185 | } |
186 | |
187 | void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, |
188 | const struct mptcp_subflow_context *subflow) |
189 | { |
190 | struct mptcp_pm_data *pm = &msk->pm; |
191 | bool update_subflows; |
192 | |
193 | update_subflows = subflow->request_join || subflow->mp_join; |
194 | if (mptcp_pm_is_userspace(msk)) { |
195 | if (update_subflows) { |
196 | spin_lock_bh(lock: &pm->lock); |
197 | pm->subflows--; |
198 | spin_unlock_bh(lock: &pm->lock); |
199 | } |
200 | return; |
201 | } |
202 | |
203 | if (!READ_ONCE(pm->work_pending) && !update_subflows) |
204 | return; |
205 | |
206 | spin_lock_bh(lock: &pm->lock); |
207 | if (update_subflows) |
208 | __mptcp_pm_close_subflow(msk); |
209 | |
210 | /* Even if this subflow is not really established, tell the PM to try |
211 | * to pick the next ones, if possible. |
212 | */ |
213 | if (mptcp_pm_nl_check_work_pending(msk)) |
214 | mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_SUBFLOW_ESTABLISHED); |
215 | |
216 | spin_unlock_bh(lock: &pm->lock); |
217 | } |
218 | |
219 | void mptcp_pm_add_addr_received(const struct sock *ssk, |
220 | const struct mptcp_addr_info *addr) |
221 | { |
222 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
223 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
224 | struct mptcp_pm_data *pm = &msk->pm; |
225 | |
226 | pr_debug("msk=%p remote_id=%d accept=%d" , msk, addr->id, |
227 | READ_ONCE(pm->accept_addr)); |
228 | |
229 | mptcp_event_addr_announced(ssk, info: addr); |
230 | |
231 | spin_lock_bh(lock: &pm->lock); |
232 | |
233 | if (mptcp_pm_is_userspace(msk)) { |
234 | if (mptcp_userspace_pm_active(msk)) { |
235 | mptcp_pm_announce_addr(msk, addr, echo: true); |
236 | mptcp_pm_add_addr_send_ack(msk); |
237 | } else { |
238 | __MPTCP_INC_STATS(net: sock_net(sk: (struct sock *)msk), field: MPTCP_MIB_ADDADDRDROP); |
239 | } |
240 | } else if (!READ_ONCE(pm->accept_addr)) { |
241 | mptcp_pm_announce_addr(msk, addr, echo: true); |
242 | mptcp_pm_add_addr_send_ack(msk); |
243 | } else if (mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_ADD_ADDR_RECEIVED)) { |
244 | pm->remote = *addr; |
245 | } else { |
246 | __MPTCP_INC_STATS(net: sock_net(sk: (struct sock *)msk), field: MPTCP_MIB_ADDADDRDROP); |
247 | } |
248 | |
249 | spin_unlock_bh(lock: &pm->lock); |
250 | } |
251 | |
252 | void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, |
253 | const struct mptcp_addr_info *addr) |
254 | { |
255 | struct mptcp_pm_data *pm = &msk->pm; |
256 | |
257 | pr_debug("msk=%p" , msk); |
258 | |
259 | spin_lock_bh(lock: &pm->lock); |
260 | |
261 | if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) |
262 | mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_SUBFLOW_ESTABLISHED); |
263 | |
264 | spin_unlock_bh(lock: &pm->lock); |
265 | } |
266 | |
267 | void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) |
268 | { |
269 | if (!mptcp_pm_should_add_signal(msk)) |
270 | return; |
271 | |
272 | mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_ADD_ADDR_SEND_ACK); |
273 | } |
274 | |
275 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, |
276 | const struct mptcp_rm_list *rm_list) |
277 | { |
278 | struct mptcp_pm_data *pm = &msk->pm; |
279 | u8 i; |
280 | |
281 | pr_debug("msk=%p remote_ids_nr=%d" , msk, rm_list->nr); |
282 | |
283 | for (i = 0; i < rm_list->nr; i++) |
284 | mptcp_event_addr_removed(msk, id: rm_list->ids[i]); |
285 | |
286 | spin_lock_bh(lock: &pm->lock); |
287 | if (mptcp_pm_schedule_work(msk, new_status: MPTCP_PM_RM_ADDR_RECEIVED)) |
288 | pm->rm_list_rx = *rm_list; |
289 | else |
290 | __MPTCP_INC_STATS(net: sock_net(sk: (struct sock *)msk), field: MPTCP_MIB_RMADDRDROP); |
291 | spin_unlock_bh(lock: &pm->lock); |
292 | } |
293 | |
294 | void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) |
295 | { |
296 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
297 | struct sock *sk = subflow->conn; |
298 | struct mptcp_sock *msk; |
299 | |
300 | pr_debug("subflow->backup=%d, bkup=%d\n" , subflow->backup, bkup); |
301 | msk = mptcp_sk(sk); |
302 | if (subflow->backup != bkup) |
303 | subflow->backup = bkup; |
304 | |
305 | mptcp_event(type: MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); |
306 | } |
307 | |
308 | void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) |
309 | { |
310 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
311 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
312 | |
313 | pr_debug("fail_seq=%llu" , fail_seq); |
314 | |
315 | if (!READ_ONCE(msk->allow_infinite_fallback)) |
316 | return; |
317 | |
318 | if (!subflow->fail_tout) { |
319 | pr_debug("send MP_FAIL response and infinite map" ); |
320 | |
321 | subflow->send_mp_fail = 1; |
322 | subflow->send_infinite_map = 1; |
323 | tcp_send_ack(sk); |
324 | } else { |
325 | pr_debug("MP_FAIL response received" ); |
326 | WRITE_ONCE(subflow->fail_tout, 0); |
327 | } |
328 | } |
329 | |
330 | /* path manager helpers */ |
331 | |
332 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, |
333 | unsigned int opt_size, unsigned int remaining, |
334 | struct mptcp_addr_info *addr, bool *echo, |
335 | bool *drop_other_suboptions) |
336 | { |
337 | int ret = false; |
338 | u8 add_addr; |
339 | u8 family; |
340 | bool port; |
341 | |
342 | spin_lock_bh(lock: &msk->pm.lock); |
343 | |
344 | /* double check after the lock is acquired */ |
345 | if (!mptcp_pm_should_add_signal(msk)) |
346 | goto out_unlock; |
347 | |
348 | /* always drop every other options for pure ack ADD_ADDR; this is a |
349 | * plain dup-ack from TCP perspective. The other MPTCP-relevant info, |
350 | * if any, will be carried by the 'original' TCP ack |
351 | */ |
352 | if (skb && skb_is_tcp_pure_ack(skb)) { |
353 | remaining += opt_size; |
354 | *drop_other_suboptions = true; |
355 | } |
356 | |
357 | *echo = mptcp_pm_should_add_signal_echo(msk); |
358 | port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); |
359 | |
360 | family = *echo ? msk->pm.remote.family : msk->pm.local.family; |
361 | if (remaining < mptcp_add_addr_len(family, echo: *echo, port)) |
362 | goto out_unlock; |
363 | |
364 | if (*echo) { |
365 | *addr = msk->pm.remote; |
366 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); |
367 | } else { |
368 | *addr = msk->pm.local; |
369 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); |
370 | } |
371 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
372 | ret = true; |
373 | |
374 | out_unlock: |
375 | spin_unlock_bh(lock: &msk->pm.lock); |
376 | return ret; |
377 | } |
378 | |
379 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
380 | struct mptcp_rm_list *rm_list) |
381 | { |
382 | int ret = false, len; |
383 | u8 rm_addr; |
384 | |
385 | spin_lock_bh(lock: &msk->pm.lock); |
386 | |
387 | /* double check after the lock is acquired */ |
388 | if (!mptcp_pm_should_rm_signal(msk)) |
389 | goto out_unlock; |
390 | |
391 | rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); |
392 | len = mptcp_rm_addr_len(rm_list: &msk->pm.rm_list_tx); |
393 | if (len < 0) { |
394 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
395 | goto out_unlock; |
396 | } |
397 | if (remaining < len) |
398 | goto out_unlock; |
399 | |
400 | *rm_list = msk->pm.rm_list_tx; |
401 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
402 | ret = true; |
403 | |
404 | out_unlock: |
405 | spin_unlock_bh(lock: &msk->pm.lock); |
406 | return ret; |
407 | } |
408 | |
409 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
410 | { |
411 | struct mptcp_addr_info skc_local; |
412 | struct mptcp_addr_info msk_local; |
413 | |
414 | if (WARN_ON_ONCE(!msk)) |
415 | return -1; |
416 | |
417 | /* The 0 ID mapping is defined by the first subflow, copied into the msk |
418 | * addr |
419 | */ |
420 | mptcp_local_address(skc: (struct sock_common *)msk, addr: &msk_local); |
421 | mptcp_local_address(skc: (struct sock_common *)skc, addr: &skc_local); |
422 | if (mptcp_addresses_equal(a: &msk_local, b: &skc_local, use_port: false)) |
423 | return 0; |
424 | |
425 | if (mptcp_pm_is_userspace(msk)) |
426 | return mptcp_userspace_pm_get_local_id(msk, skc: &skc_local); |
427 | return mptcp_pm_nl_get_local_id(msk, skc: &skc_local); |
428 | } |
429 | |
430 | int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, |
431 | u8 *flags, int *ifindex) |
432 | { |
433 | *flags = 0; |
434 | *ifindex = 0; |
435 | |
436 | if (!id) |
437 | return 0; |
438 | |
439 | if (mptcp_pm_is_userspace(msk)) |
440 | return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); |
441 | return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); |
442 | } |
443 | |
444 | int mptcp_pm_set_flags(struct net *net, struct nlattr *token, |
445 | struct mptcp_pm_addr_entry *loc, |
446 | struct mptcp_pm_addr_entry *rem, u8 bkup) |
447 | { |
448 | if (token) |
449 | return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup); |
450 | return mptcp_pm_nl_set_flags(net, addr: loc, bkup); |
451 | } |
452 | |
453 | void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) |
454 | { |
455 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk: ssk); |
456 | u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); |
457 | |
458 | /* keep track of rtx periods with no progress */ |
459 | if (!subflow->stale_count) { |
460 | subflow->stale_rcv_tstamp = rcv_tstamp; |
461 | subflow->stale_count++; |
462 | } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { |
463 | if (subflow->stale_count < U8_MAX) |
464 | subflow->stale_count++; |
465 | mptcp_pm_nl_subflow_chk_stale(msk, ssk); |
466 | } else { |
467 | subflow->stale_count = 0; |
468 | mptcp_subflow_set_active(subflow); |
469 | } |
470 | } |
471 | |
472 | /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, |
473 | * otherwise allow any matching local/remote pair |
474 | */ |
475 | bool mptcp_pm_addr_families_match(const struct sock *sk, |
476 | const struct mptcp_addr_info *loc, |
477 | const struct mptcp_addr_info *rem) |
478 | { |
479 | bool mptcp_is_v4 = sk->sk_family == AF_INET; |
480 | |
481 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
482 | bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(a: &loc->addr6); |
483 | bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(a: &rem->addr6); |
484 | |
485 | if (mptcp_is_v4) |
486 | return loc_is_v4 && rem_is_v4; |
487 | |
488 | if (ipv6_only_sock(sk)) |
489 | return !loc_is_v4 && !rem_is_v4; |
490 | |
491 | return loc_is_v4 == rem_is_v4; |
492 | #else |
493 | return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; |
494 | #endif |
495 | } |
496 | |
497 | void mptcp_pm_data_reset(struct mptcp_sock *msk) |
498 | { |
499 | u8 pm_type = mptcp_get_pm_type(net: sock_net(sk: (struct sock *)msk)); |
500 | struct mptcp_pm_data *pm = &msk->pm; |
501 | |
502 | pm->add_addr_signaled = 0; |
503 | pm->add_addr_accepted = 0; |
504 | pm->local_addr_used = 0; |
505 | pm->subflows = 0; |
506 | pm->rm_list_tx.nr = 0; |
507 | pm->rm_list_rx.nr = 0; |
508 | WRITE_ONCE(pm->pm_type, pm_type); |
509 | |
510 | if (pm_type == MPTCP_PM_TYPE_KERNEL) { |
511 | bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk); |
512 | |
513 | /* pm->work_pending must be only be set to 'true' when |
514 | * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL |
515 | */ |
516 | WRITE_ONCE(pm->work_pending, |
517 | (!!mptcp_pm_get_local_addr_max(msk) && |
518 | subflows_allowed) || |
519 | !!mptcp_pm_get_add_addr_signal_max(msk)); |
520 | WRITE_ONCE(pm->accept_addr, |
521 | !!mptcp_pm_get_add_addr_accept_max(msk) && |
522 | subflows_allowed); |
523 | WRITE_ONCE(pm->accept_subflow, subflows_allowed); |
524 | } else { |
525 | WRITE_ONCE(pm->work_pending, 0); |
526 | WRITE_ONCE(pm->accept_addr, 0); |
527 | WRITE_ONCE(pm->accept_subflow, 0); |
528 | } |
529 | |
530 | WRITE_ONCE(pm->addr_signal, 0); |
531 | WRITE_ONCE(pm->remote_deny_join_id0, false); |
532 | pm->status = 0; |
533 | bitmap_fill(dst: msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); |
534 | } |
535 | |
536 | void mptcp_pm_data_init(struct mptcp_sock *msk) |
537 | { |
538 | spin_lock_init(&msk->pm.lock); |
539 | INIT_LIST_HEAD(list: &msk->pm.anno_list); |
540 | INIT_LIST_HEAD(list: &msk->pm.userspace_pm_local_addr_list); |
541 | mptcp_pm_data_reset(msk); |
542 | } |
543 | |
544 | void __init mptcp_pm_init(void) |
545 | { |
546 | mptcp_pm_nl_init(); |
547 | } |
548 | |