1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17#include <linux/err.h>
18#include <linux/slab.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/workqueue.h>
23#include <linux/notifier.h>
24#include <linux/netdevice.h>
25#include <linux/netfilter.h>
26#include <linux/module.h>
27#include <linux/cache.h>
28#include <linux/cpu.h>
29#include <linux/audit.h>
30#include <linux/rhashtable.h>
31#include <linux/if_tunnel.h>
32#include <linux/icmp.h>
33#include <net/dst.h>
34#include <net/flow.h>
35#include <net/inet_ecn.h>
36#include <net/xfrm.h>
37#include <net/ip.h>
38#include <net/gre.h>
39#if IS_ENABLED(CONFIG_IPV6_MIP6)
40#include <net/mip6.h>
41#endif
42#ifdef CONFIG_XFRM_STATISTICS
43#include <net/snmp.h>
44#endif
45#ifdef CONFIG_XFRM_ESPINTCP
46#include <net/espintcp.h>
47#endif
48
49#include "xfrm_hash.h"
50
51#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
52#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
53#define XFRM_MAX_QUEUE_LEN 100
54
55struct xfrm_flo {
56 struct dst_entry *dst_orig;
57 u8 flags;
58};
59
60/* prefixes smaller than this are stored in lists, not trees. */
61#define INEXACT_PREFIXLEN_IPV4 16
62#define INEXACT_PREFIXLEN_IPV6 48
63
64struct xfrm_pol_inexact_node {
65 struct rb_node node;
66 union {
67 xfrm_address_t addr;
68 struct rcu_head rcu;
69 };
70 u8 prefixlen;
71
72 struct rb_root root;
73
74 /* the policies matching this node, can be empty list */
75 struct hlist_head hhead;
76};
77
78/* xfrm inexact policy search tree:
79 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
80 * |
81 * +---- root_d: sorted by daddr:prefix
82 * | |
83 * | xfrm_pol_inexact_node
84 * | |
85 * | +- root: sorted by saddr/prefix
86 * | | |
87 * | | xfrm_pol_inexact_node
88 * | | |
89 * | | + root: unused
90 * | | |
91 * | | + hhead: saddr:daddr policies
92 * | |
93 * | +- coarse policies and all any:daddr policies
94 * |
95 * +---- root_s: sorted by saddr:prefix
96 * | |
97 * | xfrm_pol_inexact_node
98 * | |
99 * | + root: unused
100 * | |
101 * | + hhead: saddr:any policies
102 * |
103 * +---- coarse policies and all any:any policies
104 *
105 * Lookups return four candidate lists:
106 * 1. any:any list from top-level xfrm_pol_inexact_bin
107 * 2. any:daddr list from daddr tree
108 * 3. saddr:daddr list from 2nd level daddr tree
109 * 4. saddr:any list from saddr tree
110 *
111 * This result set then needs to be searched for the policy with
112 * the lowest priority. If two results have same prio, youngest one wins.
113 */
114
115struct xfrm_pol_inexact_key {
116 possible_net_t net;
117 u32 if_id;
118 u16 family;
119 u8 dir, type;
120};
121
122struct xfrm_pol_inexact_bin {
123 struct xfrm_pol_inexact_key k;
124 struct rhash_head head;
125 /* list containing '*:*' policies */
126 struct hlist_head hhead;
127
128 seqcount_spinlock_t count;
129 /* tree sorted by daddr/prefix */
130 struct rb_root root_d;
131
132 /* tree sorted by saddr/prefix */
133 struct rb_root root_s;
134
135 /* slow path below */
136 struct list_head inexact_bins;
137 struct rcu_head rcu;
138};
139
140enum xfrm_pol_inexact_candidate_type {
141 XFRM_POL_CAND_BOTH,
142 XFRM_POL_CAND_SADDR,
143 XFRM_POL_CAND_DADDR,
144 XFRM_POL_CAND_ANY,
145
146 XFRM_POL_CAND_MAX,
147};
148
149struct xfrm_pol_inexact_candidates {
150 struct hlist_head *res[XFRM_POL_CAND_MAX];
151};
152
153struct xfrm_flow_keys {
154 struct flow_dissector_key_basic basic;
155 struct flow_dissector_key_control control;
156 union {
157 struct flow_dissector_key_ipv4_addrs ipv4;
158 struct flow_dissector_key_ipv6_addrs ipv6;
159 } addrs;
160 struct flow_dissector_key_ip ip;
161 struct flow_dissector_key_icmp icmp;
162 struct flow_dissector_key_ports ports;
163 struct flow_dissector_key_keyid gre;
164};
165
166static struct flow_dissector xfrm_session_dissector __ro_after_init;
167
168static DEFINE_SPINLOCK(xfrm_if_cb_lock);
169static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
170
171static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
172static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
173 __read_mostly;
174
175static struct kmem_cache *xfrm_dst_cache __ro_after_init;
176
177static struct rhashtable xfrm_policy_inexact_table;
178static const struct rhashtable_params xfrm_pol_inexact_params;
179
180static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
181static int stale_bundle(struct dst_entry *dst);
182static int xfrm_bundle_ok(struct xfrm_dst *xdst);
183static void xfrm_policy_queue_process(struct timer_list *t);
184
185static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
186static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
187 int dir);
188
189static struct xfrm_pol_inexact_bin *
190xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
191 u32 if_id);
192
193static struct xfrm_pol_inexact_bin *
194xfrm_policy_inexact_lookup_rcu(struct net *net,
195 u8 type, u16 family, u8 dir, u32 if_id);
196static struct xfrm_policy *
197xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
198 bool excl);
199static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
200 struct xfrm_policy *policy);
201
202static bool
203xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
204 struct xfrm_pol_inexact_bin *b,
205 const xfrm_address_t *saddr,
206 const xfrm_address_t *daddr);
207
208static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
209{
210 return refcount_inc_not_zero(r: &policy->refcnt);
211}
212
213static inline bool
214__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
215{
216 const struct flowi4 *fl4 = &fl->u.ip4;
217
218 return addr4_match(a1: fl4->daddr, a2: sel->daddr.a4, prefixlen: sel->prefixlen_d) &&
219 addr4_match(a1: fl4->saddr, a2: sel->saddr.a4, prefixlen: sel->prefixlen_s) &&
220 !((xfrm_flowi_dport(fl, uli: &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
221 !((xfrm_flowi_sport(fl, uli: &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
222 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
223 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
224}
225
226static inline bool
227__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
228{
229 const struct flowi6 *fl6 = &fl->u.ip6;
230
231 return addr_match(token1: &fl6->daddr, token2: &sel->daddr, prefixlen: sel->prefixlen_d) &&
232 addr_match(token1: &fl6->saddr, token2: &sel->saddr, prefixlen: sel->prefixlen_s) &&
233 !((xfrm_flowi_dport(fl, uli: &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
234 !((xfrm_flowi_sport(fl, uli: &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
235 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
236 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
237}
238
239bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
240 unsigned short family)
241{
242 switch (family) {
243 case AF_INET:
244 return __xfrm4_selector_match(sel, fl);
245 case AF_INET6:
246 return __xfrm6_selector_match(sel, fl);
247 }
248 return false;
249}
250
251static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
252{
253 const struct xfrm_policy_afinfo *afinfo;
254
255 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
256 return NULL;
257 rcu_read_lock();
258 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
259 if (unlikely(!afinfo))
260 rcu_read_unlock();
261 return afinfo;
262}
263
264/* Called with rcu_read_lock(). */
265static const struct xfrm_if_cb *xfrm_if_get_cb(void)
266{
267 return rcu_dereference(xfrm_if_cb);
268}
269
270struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
271 const xfrm_address_t *saddr,
272 const xfrm_address_t *daddr,
273 int family, u32 mark)
274{
275 const struct xfrm_policy_afinfo *afinfo;
276 struct dst_entry *dst;
277
278 afinfo = xfrm_policy_get_afinfo(family);
279 if (unlikely(afinfo == NULL))
280 return ERR_PTR(error: -EAFNOSUPPORT);
281
282 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
283
284 rcu_read_unlock();
285
286 return dst;
287}
288EXPORT_SYMBOL(__xfrm_dst_lookup);
289
290static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
291 int tos, int oif,
292 xfrm_address_t *prev_saddr,
293 xfrm_address_t *prev_daddr,
294 int family, u32 mark)
295{
296 struct net *net = xs_net(x);
297 xfrm_address_t *saddr = &x->props.saddr;
298 xfrm_address_t *daddr = &x->id.daddr;
299 struct dst_entry *dst;
300
301 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
302 saddr = x->coaddr;
303 daddr = prev_daddr;
304 }
305 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
306 saddr = prev_saddr;
307 daddr = x->coaddr;
308 }
309
310 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
311
312 if (!IS_ERR(ptr: dst)) {
313 if (prev_saddr != saddr)
314 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
315 if (prev_daddr != daddr)
316 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
317 }
318
319 return dst;
320}
321
322static inline unsigned long make_jiffies(long secs)
323{
324 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
325 return MAX_SCHEDULE_TIMEOUT-1;
326 else
327 return secs*HZ;
328}
329
330static void xfrm_policy_timer(struct timer_list *t)
331{
332 struct xfrm_policy *xp = from_timer(xp, t, timer);
333 time64_t now = ktime_get_real_seconds();
334 time64_t next = TIME64_MAX;
335 int warn = 0;
336 int dir;
337
338 read_lock(&xp->lock);
339
340 if (unlikely(xp->walk.dead))
341 goto out;
342
343 dir = xfrm_policy_id2dir(index: xp->index);
344
345 if (xp->lft.hard_add_expires_seconds) {
346 time64_t tmo = xp->lft.hard_add_expires_seconds +
347 xp->curlft.add_time - now;
348 if (tmo <= 0)
349 goto expired;
350 if (tmo < next)
351 next = tmo;
352 }
353 if (xp->lft.hard_use_expires_seconds) {
354 time64_t tmo = xp->lft.hard_use_expires_seconds +
355 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
356 if (tmo <= 0)
357 goto expired;
358 if (tmo < next)
359 next = tmo;
360 }
361 if (xp->lft.soft_add_expires_seconds) {
362 time64_t tmo = xp->lft.soft_add_expires_seconds +
363 xp->curlft.add_time - now;
364 if (tmo <= 0) {
365 warn = 1;
366 tmo = XFRM_KM_TIMEOUT;
367 }
368 if (tmo < next)
369 next = tmo;
370 }
371 if (xp->lft.soft_use_expires_seconds) {
372 time64_t tmo = xp->lft.soft_use_expires_seconds +
373 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
374 if (tmo <= 0) {
375 warn = 1;
376 tmo = XFRM_KM_TIMEOUT;
377 }
378 if (tmo < next)
379 next = tmo;
380 }
381
382 if (warn)
383 km_policy_expired(pol: xp, dir, hard: 0, portid: 0);
384 if (next != TIME64_MAX &&
385 !mod_timer(timer: &xp->timer, expires: jiffies + make_jiffies(secs: next)))
386 xfrm_pol_hold(policy: xp);
387
388out:
389 read_unlock(&xp->lock);
390 xfrm_pol_put(policy: xp);
391 return;
392
393expired:
394 read_unlock(&xp->lock);
395 if (!xfrm_policy_delete(pol: xp, dir))
396 km_policy_expired(pol: xp, dir, hard: 1, portid: 0);
397 xfrm_pol_put(policy: xp);
398}
399
400/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
401 * SPD calls.
402 */
403
404struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
405{
406 struct xfrm_policy *policy;
407
408 policy = kzalloc(size: sizeof(struct xfrm_policy), flags: gfp);
409
410 if (policy) {
411 write_pnet(pnet: &policy->xp_net, net);
412 INIT_LIST_HEAD(list: &policy->walk.all);
413 INIT_HLIST_NODE(h: &policy->bydst_inexact_list);
414 INIT_HLIST_NODE(h: &policy->bydst);
415 INIT_HLIST_NODE(h: &policy->byidx);
416 rwlock_init(&policy->lock);
417 refcount_set(r: &policy->refcnt, n: 1);
418 skb_queue_head_init(list: &policy->polq.hold_queue);
419 timer_setup(&policy->timer, xfrm_policy_timer, 0);
420 timer_setup(&policy->polq.hold_timer,
421 xfrm_policy_queue_process, 0);
422 }
423 return policy;
424}
425EXPORT_SYMBOL(xfrm_policy_alloc);
426
427static void xfrm_policy_destroy_rcu(struct rcu_head *head)
428{
429 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
430
431 security_xfrm_policy_free(ctx: policy->security);
432 kfree(objp: policy);
433}
434
435/* Destroy xfrm_policy: descendant resources must be released to this moment. */
436
437void xfrm_policy_destroy(struct xfrm_policy *policy)
438{
439 BUG_ON(!policy->walk.dead);
440
441 if (del_timer(timer: &policy->timer) || del_timer(timer: &policy->polq.hold_timer))
442 BUG();
443
444 xfrm_dev_policy_free(x: policy);
445 call_rcu(head: &policy->rcu, func: xfrm_policy_destroy_rcu);
446}
447EXPORT_SYMBOL(xfrm_policy_destroy);
448
449/* Rule must be locked. Release descendant resources, announce
450 * entry dead. The rule must be unlinked from lists to the moment.
451 */
452
453static void xfrm_policy_kill(struct xfrm_policy *policy)
454{
455 write_lock_bh(&policy->lock);
456 policy->walk.dead = 1;
457 write_unlock_bh(&policy->lock);
458
459 atomic_inc(v: &policy->genid);
460
461 if (del_timer(timer: &policy->polq.hold_timer))
462 xfrm_pol_put(policy);
463 skb_queue_purge(list: &policy->polq.hold_queue);
464
465 if (del_timer(timer: &policy->timer))
466 xfrm_pol_put(policy);
467
468 xfrm_pol_put(policy);
469}
470
471static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
472
473static inline unsigned int idx_hash(struct net *net, u32 index)
474{
475 return __idx_hash(index, hmask: net->xfrm.policy_idx_hmask);
476}
477
478/* calculate policy hash thresholds */
479static void __get_hash_thresh(struct net *net,
480 unsigned short family, int dir,
481 u8 *dbits, u8 *sbits)
482{
483 switch (family) {
484 case AF_INET:
485 *dbits = net->xfrm.policy_bydst[dir].dbits4;
486 *sbits = net->xfrm.policy_bydst[dir].sbits4;
487 break;
488
489 case AF_INET6:
490 *dbits = net->xfrm.policy_bydst[dir].dbits6;
491 *sbits = net->xfrm.policy_bydst[dir].sbits6;
492 break;
493
494 default:
495 *dbits = 0;
496 *sbits = 0;
497 }
498}
499
500static struct hlist_head *policy_hash_bysel(struct net *net,
501 const struct xfrm_selector *sel,
502 unsigned short family, int dir)
503{
504 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
505 unsigned int hash;
506 u8 dbits;
507 u8 sbits;
508
509 __get_hash_thresh(net, family, dir, dbits: &dbits, sbits: &sbits);
510 hash = __sel_hash(sel, family, hmask, dbits, sbits);
511
512 if (hash == hmask + 1)
513 return NULL;
514
515 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
516 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
517}
518
519static struct hlist_head *policy_hash_direct(struct net *net,
520 const xfrm_address_t *daddr,
521 const xfrm_address_t *saddr,
522 unsigned short family, int dir)
523{
524 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
525 unsigned int hash;
526 u8 dbits;
527 u8 sbits;
528
529 __get_hash_thresh(net, family, dir, dbits: &dbits, sbits: &sbits);
530 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
531
532 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
533 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
534}
535
536static void xfrm_dst_hash_transfer(struct net *net,
537 struct hlist_head *list,
538 struct hlist_head *ndsttable,
539 unsigned int nhashmask,
540 int dir)
541{
542 struct hlist_node *tmp, *entry0 = NULL;
543 struct xfrm_policy *pol;
544 unsigned int h0 = 0;
545 u8 dbits;
546 u8 sbits;
547
548redo:
549 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
550 unsigned int h;
551
552 __get_hash_thresh(net, family: pol->family, dir, dbits: &dbits, sbits: &sbits);
553 h = __addr_hash(daddr: &pol->selector.daddr, saddr: &pol->selector.saddr,
554 family: pol->family, hmask: nhashmask, dbits, sbits);
555 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
556 hlist_del_rcu(n: &pol->bydst);
557 hlist_add_head_rcu(n: &pol->bydst, h: ndsttable + h);
558 h0 = h;
559 } else {
560 if (h != h0)
561 continue;
562 hlist_del_rcu(n: &pol->bydst);
563 hlist_add_behind_rcu(n: &pol->bydst, prev: entry0);
564 }
565 entry0 = &pol->bydst;
566 }
567 if (!hlist_empty(h: list)) {
568 entry0 = NULL;
569 goto redo;
570 }
571}
572
573static void xfrm_idx_hash_transfer(struct hlist_head *list,
574 struct hlist_head *nidxtable,
575 unsigned int nhashmask)
576{
577 struct hlist_node *tmp;
578 struct xfrm_policy *pol;
579
580 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
581 unsigned int h;
582
583 h = __idx_hash(index: pol->index, hmask: nhashmask);
584 hlist_add_head(n: &pol->byidx, h: nidxtable+h);
585 }
586}
587
588static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
589{
590 return ((old_hmask + 1) << 1) - 1;
591}
592
593static void xfrm_bydst_resize(struct net *net, int dir)
594{
595 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
596 unsigned int nhashmask = xfrm_new_hash_mask(old_hmask: hmask);
597 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
598 struct hlist_head *ndst = xfrm_hash_alloc(sz: nsize);
599 struct hlist_head *odst;
600 int i;
601
602 if (!ndst)
603 return;
604
605 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
606 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
607
608 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
609 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
610
611 for (i = hmask; i >= 0; i--)
612 xfrm_dst_hash_transfer(net, list: odst + i, ndsttable: ndst, nhashmask, dir);
613
614 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
615 net->xfrm.policy_bydst[dir].hmask = nhashmask;
616
617 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
618 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
619
620 synchronize_rcu();
621
622 xfrm_hash_free(n: odst, sz: (hmask + 1) * sizeof(struct hlist_head));
623}
624
625static void xfrm_byidx_resize(struct net *net)
626{
627 unsigned int hmask = net->xfrm.policy_idx_hmask;
628 unsigned int nhashmask = xfrm_new_hash_mask(old_hmask: hmask);
629 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
630 struct hlist_head *oidx = net->xfrm.policy_byidx;
631 struct hlist_head *nidx = xfrm_hash_alloc(sz: nsize);
632 int i;
633
634 if (!nidx)
635 return;
636
637 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
638
639 for (i = hmask; i >= 0; i--)
640 xfrm_idx_hash_transfer(list: oidx + i, nidxtable: nidx, nhashmask);
641
642 net->xfrm.policy_byidx = nidx;
643 net->xfrm.policy_idx_hmask = nhashmask;
644
645 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
646
647 xfrm_hash_free(n: oidx, sz: (hmask + 1) * sizeof(struct hlist_head));
648}
649
650static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
651{
652 unsigned int cnt = net->xfrm.policy_count[dir];
653 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
654
655 if (total)
656 *total += cnt;
657
658 if ((hmask + 1) < xfrm_policy_hashmax &&
659 cnt > hmask)
660 return 1;
661
662 return 0;
663}
664
665static inline int xfrm_byidx_should_resize(struct net *net, int total)
666{
667 unsigned int hmask = net->xfrm.policy_idx_hmask;
668
669 if ((hmask + 1) < xfrm_policy_hashmax &&
670 total > hmask)
671 return 1;
672
673 return 0;
674}
675
676void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
677{
678 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
679 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
680 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
681 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
682 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
683 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
684 si->spdhcnt = net->xfrm.policy_idx_hmask;
685 si->spdhmcnt = xfrm_policy_hashmax;
686}
687EXPORT_SYMBOL(xfrm_spd_getinfo);
688
689static DEFINE_MUTEX(hash_resize_mutex);
690static void xfrm_hash_resize(struct work_struct *work)
691{
692 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
693 int dir, total;
694
695 mutex_lock(&hash_resize_mutex);
696
697 total = 0;
698 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
699 if (xfrm_bydst_should_resize(net, dir, total: &total))
700 xfrm_bydst_resize(net, dir);
701 }
702 if (xfrm_byidx_should_resize(net, total))
703 xfrm_byidx_resize(net);
704
705 mutex_unlock(lock: &hash_resize_mutex);
706}
707
708/* Make sure *pol can be inserted into fastbin.
709 * Useful to check that later insert requests will be successful
710 * (provided xfrm_policy_lock is held throughout).
711 */
712static struct xfrm_pol_inexact_bin *
713xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
714{
715 struct xfrm_pol_inexact_bin *bin, *prev;
716 struct xfrm_pol_inexact_key k = {
717 .family = pol->family,
718 .type = pol->type,
719 .dir = dir,
720 .if_id = pol->if_id,
721 };
722 struct net *net = xp_net(xp: pol);
723
724 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
725
726 write_pnet(pnet: &k.net, net);
727 bin = rhashtable_lookup_fast(ht: &xfrm_policy_inexact_table, key: &k,
728 params: xfrm_pol_inexact_params);
729 if (bin)
730 return bin;
731
732 bin = kzalloc(size: sizeof(*bin), GFP_ATOMIC);
733 if (!bin)
734 return NULL;
735
736 bin->k = k;
737 INIT_HLIST_HEAD(&bin->hhead);
738 bin->root_d = RB_ROOT;
739 bin->root_s = RB_ROOT;
740 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
741
742 prev = rhashtable_lookup_get_insert_key(ht: &xfrm_policy_inexact_table,
743 key: &bin->k, obj: &bin->head,
744 params: xfrm_pol_inexact_params);
745 if (!prev) {
746 list_add(new: &bin->inexact_bins, head: &net->xfrm.inexact_bins);
747 return bin;
748 }
749
750 kfree(objp: bin);
751
752 return IS_ERR(ptr: prev) ? NULL : prev;
753}
754
755static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
756 int family, u8 prefixlen)
757{
758 if (xfrm_addr_any(addr, family))
759 return true;
760
761 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
762 return true;
763
764 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
765 return true;
766
767 return false;
768}
769
770static bool
771xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
772{
773 const xfrm_address_t *addr;
774 bool saddr_any, daddr_any;
775 u8 prefixlen;
776
777 addr = &policy->selector.saddr;
778 prefixlen = policy->selector.prefixlen_s;
779
780 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
781 family: policy->family,
782 prefixlen);
783 addr = &policy->selector.daddr;
784 prefixlen = policy->selector.prefixlen_d;
785 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
786 family: policy->family,
787 prefixlen);
788 return saddr_any && daddr_any;
789}
790
791static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
792 const xfrm_address_t *addr, u8 prefixlen)
793{
794 node->addr = *addr;
795 node->prefixlen = prefixlen;
796}
797
798static struct xfrm_pol_inexact_node *
799xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
800{
801 struct xfrm_pol_inexact_node *node;
802
803 node = kzalloc(size: sizeof(*node), GFP_ATOMIC);
804 if (node)
805 xfrm_pol_inexact_node_init(node, addr, prefixlen);
806
807 return node;
808}
809
810static int xfrm_policy_addr_delta(const xfrm_address_t *a,
811 const xfrm_address_t *b,
812 u8 prefixlen, u16 family)
813{
814 u32 ma, mb, mask;
815 unsigned int pdw, pbi;
816 int delta = 0;
817
818 switch (family) {
819 case AF_INET:
820 if (prefixlen == 0)
821 return 0;
822 mask = ~0U << (32 - prefixlen);
823 ma = ntohl(a->a4) & mask;
824 mb = ntohl(b->a4) & mask;
825 if (ma < mb)
826 delta = -1;
827 else if (ma > mb)
828 delta = 1;
829 break;
830 case AF_INET6:
831 pdw = prefixlen >> 5;
832 pbi = prefixlen & 0x1f;
833
834 if (pdw) {
835 delta = memcmp(p: a->a6, q: b->a6, size: pdw << 2);
836 if (delta)
837 return delta;
838 }
839 if (pbi) {
840 mask = ~0U << (32 - pbi);
841 ma = ntohl(a->a6[pdw]) & mask;
842 mb = ntohl(b->a6[pdw]) & mask;
843 if (ma < mb)
844 delta = -1;
845 else if (ma > mb)
846 delta = 1;
847 }
848 break;
849 default:
850 break;
851 }
852
853 return delta;
854}
855
856static void xfrm_policy_inexact_list_reinsert(struct net *net,
857 struct xfrm_pol_inexact_node *n,
858 u16 family)
859{
860 unsigned int matched_s, matched_d;
861 struct xfrm_policy *policy, *p;
862
863 matched_s = 0;
864 matched_d = 0;
865
866 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
867 struct hlist_node *newpos = NULL;
868 bool matches_s, matches_d;
869
870 if (policy->walk.dead || !policy->bydst_reinsert)
871 continue;
872
873 WARN_ON_ONCE(policy->family != family);
874
875 policy->bydst_reinsert = false;
876 hlist_for_each_entry(p, &n->hhead, bydst) {
877 if (policy->priority > p->priority)
878 newpos = &p->bydst;
879 else if (policy->priority == p->priority &&
880 policy->pos > p->pos)
881 newpos = &p->bydst;
882 else
883 break;
884 }
885
886 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
887 hlist_add_behind_rcu(n: &policy->bydst, prev: newpos);
888 else
889 hlist_add_head_rcu(n: &policy->bydst, h: &n->hhead);
890
891 /* paranoia checks follow.
892 * Check that the reinserted policy matches at least
893 * saddr or daddr for current node prefix.
894 *
895 * Matching both is fine, matching saddr in one policy
896 * (but not daddr) and then matching only daddr in another
897 * is a bug.
898 */
899 matches_s = xfrm_policy_addr_delta(a: &policy->selector.saddr,
900 b: &n->addr,
901 prefixlen: n->prefixlen,
902 family) == 0;
903 matches_d = xfrm_policy_addr_delta(a: &policy->selector.daddr,
904 b: &n->addr,
905 prefixlen: n->prefixlen,
906 family) == 0;
907 if (matches_s && matches_d)
908 continue;
909
910 WARN_ON_ONCE(!matches_s && !matches_d);
911 if (matches_s)
912 matched_s++;
913 if (matches_d)
914 matched_d++;
915 WARN_ON_ONCE(matched_s && matched_d);
916 }
917}
918
919static void xfrm_policy_inexact_node_reinsert(struct net *net,
920 struct xfrm_pol_inexact_node *n,
921 struct rb_root *new,
922 u16 family)
923{
924 struct xfrm_pol_inexact_node *node;
925 struct rb_node **p, *parent;
926
927 /* we should not have another subtree here */
928 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
929restart:
930 parent = NULL;
931 p = &new->rb_node;
932 while (*p) {
933 u8 prefixlen;
934 int delta;
935
936 parent = *p;
937 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
938
939 prefixlen = min(node->prefixlen, n->prefixlen);
940
941 delta = xfrm_policy_addr_delta(a: &n->addr, b: &node->addr,
942 prefixlen, family);
943 if (delta < 0) {
944 p = &parent->rb_left;
945 } else if (delta > 0) {
946 p = &parent->rb_right;
947 } else {
948 bool same_prefixlen = node->prefixlen == n->prefixlen;
949 struct xfrm_policy *tmp;
950
951 hlist_for_each_entry(tmp, &n->hhead, bydst) {
952 tmp->bydst_reinsert = true;
953 hlist_del_rcu(n: &tmp->bydst);
954 }
955
956 node->prefixlen = prefixlen;
957
958 xfrm_policy_inexact_list_reinsert(net, n: node, family);
959
960 if (same_prefixlen) {
961 kfree_rcu(n, rcu);
962 return;
963 }
964
965 rb_erase(*p, new);
966 kfree_rcu(n, rcu);
967 n = node;
968 goto restart;
969 }
970 }
971
972 rb_link_node_rcu(node: &n->node, parent, rb_link: p);
973 rb_insert_color(&n->node, new);
974}
975
976/* merge nodes v and n */
977static void xfrm_policy_inexact_node_merge(struct net *net,
978 struct xfrm_pol_inexact_node *v,
979 struct xfrm_pol_inexact_node *n,
980 u16 family)
981{
982 struct xfrm_pol_inexact_node *node;
983 struct xfrm_policy *tmp;
984 struct rb_node *rnode;
985
986 /* To-be-merged node v has a subtree.
987 *
988 * Dismantle it and insert its nodes to n->root.
989 */
990 while ((rnode = rb_first(&v->root)) != NULL) {
991 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
992 rb_erase(&node->node, &v->root);
993 xfrm_policy_inexact_node_reinsert(net, n: node, new: &n->root,
994 family);
995 }
996
997 hlist_for_each_entry(tmp, &v->hhead, bydst) {
998 tmp->bydst_reinsert = true;
999 hlist_del_rcu(n: &tmp->bydst);
1000 }
1001
1002 xfrm_policy_inexact_list_reinsert(net, n, family);
1003}
1004
1005static struct xfrm_pol_inexact_node *
1006xfrm_policy_inexact_insert_node(struct net *net,
1007 struct rb_root *root,
1008 xfrm_address_t *addr,
1009 u16 family, u8 prefixlen, u8 dir)
1010{
1011 struct xfrm_pol_inexact_node *cached = NULL;
1012 struct rb_node **p, *parent = NULL;
1013 struct xfrm_pol_inexact_node *node;
1014
1015 p = &root->rb_node;
1016 while (*p) {
1017 int delta;
1018
1019 parent = *p;
1020 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1021
1022 delta = xfrm_policy_addr_delta(a: addr, b: &node->addr,
1023 prefixlen: node->prefixlen,
1024 family);
1025 if (delta == 0 && prefixlen >= node->prefixlen) {
1026 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1027 return node;
1028 }
1029
1030 if (delta < 0)
1031 p = &parent->rb_left;
1032 else
1033 p = &parent->rb_right;
1034
1035 if (prefixlen < node->prefixlen) {
1036 delta = xfrm_policy_addr_delta(a: addr, b: &node->addr,
1037 prefixlen,
1038 family);
1039 if (delta)
1040 continue;
1041
1042 /* This node is a subnet of the new prefix. It needs
1043 * to be removed and re-inserted with the smaller
1044 * prefix and all nodes that are now also covered
1045 * by the reduced prefixlen.
1046 */
1047 rb_erase(&node->node, root);
1048
1049 if (!cached) {
1050 xfrm_pol_inexact_node_init(node, addr,
1051 prefixlen);
1052 cached = node;
1053 } else {
1054 /* This node also falls within the new
1055 * prefixlen. Merge the to-be-reinserted
1056 * node and this one.
1057 */
1058 xfrm_policy_inexact_node_merge(net, v: node,
1059 n: cached, family);
1060 kfree_rcu(node, rcu);
1061 }
1062
1063 /* restart */
1064 p = &root->rb_node;
1065 parent = NULL;
1066 }
1067 }
1068
1069 node = cached;
1070 if (!node) {
1071 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1072 if (!node)
1073 return NULL;
1074 }
1075
1076 rb_link_node_rcu(node: &node->node, parent, rb_link: p);
1077 rb_insert_color(&node->node, root);
1078
1079 return node;
1080}
1081
1082static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1083{
1084 struct xfrm_pol_inexact_node *node;
1085 struct rb_node *rn = rb_first(r);
1086
1087 while (rn) {
1088 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1089
1090 xfrm_policy_inexact_gc_tree(r: &node->root, rm);
1091 rn = rb_next(rn);
1092
1093 if (!hlist_empty(h: &node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1094 WARN_ON_ONCE(rm);
1095 continue;
1096 }
1097
1098 rb_erase(&node->node, r);
1099 kfree_rcu(node, rcu);
1100 }
1101}
1102
1103static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1104{
1105 write_seqcount_begin(&b->count);
1106 xfrm_policy_inexact_gc_tree(r: &b->root_d, rm: net_exit);
1107 xfrm_policy_inexact_gc_tree(r: &b->root_s, rm: net_exit);
1108 write_seqcount_end(&b->count);
1109
1110 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1111 !hlist_empty(h: &b->hhead)) {
1112 WARN_ON_ONCE(net_exit);
1113 return;
1114 }
1115
1116 if (rhashtable_remove_fast(ht: &xfrm_policy_inexact_table, obj: &b->head,
1117 params: xfrm_pol_inexact_params) == 0) {
1118 list_del(entry: &b->inexact_bins);
1119 kfree_rcu(b, rcu);
1120 }
1121}
1122
1123static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1124{
1125 struct net *net = read_pnet(pnet: &b->k.net);
1126
1127 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1128 __xfrm_policy_inexact_prune_bin(b, net_exit: false);
1129 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1130}
1131
1132static void __xfrm_policy_inexact_flush(struct net *net)
1133{
1134 struct xfrm_pol_inexact_bin *bin, *t;
1135
1136 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1137
1138 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1139 __xfrm_policy_inexact_prune_bin(b: bin, net_exit: false);
1140}
1141
1142static struct hlist_head *
1143xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1144 struct xfrm_policy *policy, u8 dir)
1145{
1146 struct xfrm_pol_inexact_node *n;
1147 struct net *net;
1148
1149 net = xp_net(xp: policy);
1150 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1151
1152 if (xfrm_policy_inexact_insert_use_any_list(policy))
1153 return &bin->hhead;
1154
1155 if (xfrm_pol_inexact_addr_use_any_list(addr: &policy->selector.daddr,
1156 family: policy->family,
1157 prefixlen: policy->selector.prefixlen_d)) {
1158 write_seqcount_begin(&bin->count);
1159 n = xfrm_policy_inexact_insert_node(net,
1160 root: &bin->root_s,
1161 addr: &policy->selector.saddr,
1162 family: policy->family,
1163 prefixlen: policy->selector.prefixlen_s,
1164 dir);
1165 write_seqcount_end(&bin->count);
1166 if (!n)
1167 return NULL;
1168
1169 return &n->hhead;
1170 }
1171
1172 /* daddr is fixed */
1173 write_seqcount_begin(&bin->count);
1174 n = xfrm_policy_inexact_insert_node(net,
1175 root: &bin->root_d,
1176 addr: &policy->selector.daddr,
1177 family: policy->family,
1178 prefixlen: policy->selector.prefixlen_d, dir);
1179 write_seqcount_end(&bin->count);
1180 if (!n)
1181 return NULL;
1182
1183 /* saddr is wildcard */
1184 if (xfrm_pol_inexact_addr_use_any_list(addr: &policy->selector.saddr,
1185 family: policy->family,
1186 prefixlen: policy->selector.prefixlen_s))
1187 return &n->hhead;
1188
1189 write_seqcount_begin(&bin->count);
1190 n = xfrm_policy_inexact_insert_node(net,
1191 root: &n->root,
1192 addr: &policy->selector.saddr,
1193 family: policy->family,
1194 prefixlen: policy->selector.prefixlen_s, dir);
1195 write_seqcount_end(&bin->count);
1196 if (!n)
1197 return NULL;
1198
1199 return &n->hhead;
1200}
1201
1202static struct xfrm_policy *
1203xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1204{
1205 struct xfrm_pol_inexact_bin *bin;
1206 struct xfrm_policy *delpol;
1207 struct hlist_head *chain;
1208 struct net *net;
1209
1210 bin = xfrm_policy_inexact_alloc_bin(pol: policy, dir);
1211 if (!bin)
1212 return ERR_PTR(error: -ENOMEM);
1213
1214 net = xp_net(xp: policy);
1215 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1216
1217 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1218 if (!chain) {
1219 __xfrm_policy_inexact_prune_bin(b: bin, net_exit: false);
1220 return ERR_PTR(error: -ENOMEM);
1221 }
1222
1223 delpol = xfrm_policy_insert_list(chain, policy, excl);
1224 if (delpol && excl) {
1225 __xfrm_policy_inexact_prune_bin(b: bin, net_exit: false);
1226 return ERR_PTR(error: -EEXIST);
1227 }
1228
1229 chain = &net->xfrm.policy_inexact[dir];
1230 xfrm_policy_insert_inexact_list(chain, policy);
1231
1232 if (delpol)
1233 __xfrm_policy_inexact_prune_bin(b: bin, net_exit: false);
1234
1235 return delpol;
1236}
1237
1238static void xfrm_hash_rebuild(struct work_struct *work)
1239{
1240 struct net *net = container_of(work, struct net,
1241 xfrm.policy_hthresh.work);
1242 unsigned int hmask;
1243 struct xfrm_policy *pol;
1244 struct xfrm_policy *policy;
1245 struct hlist_head *chain;
1246 struct hlist_head *odst;
1247 struct hlist_node *newpos;
1248 int i;
1249 int dir;
1250 unsigned seq;
1251 u8 lbits4, rbits4, lbits6, rbits6;
1252
1253 mutex_lock(&hash_resize_mutex);
1254
1255 /* read selector prefixlen thresholds */
1256 do {
1257 seq = read_seqbegin(sl: &net->xfrm.policy_hthresh.lock);
1258
1259 lbits4 = net->xfrm.policy_hthresh.lbits4;
1260 rbits4 = net->xfrm.policy_hthresh.rbits4;
1261 lbits6 = net->xfrm.policy_hthresh.lbits6;
1262 rbits6 = net->xfrm.policy_hthresh.rbits6;
1263 } while (read_seqretry(sl: &net->xfrm.policy_hthresh.lock, start: seq));
1264
1265 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1266 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1267
1268 /* make sure that we can insert the indirect policies again before
1269 * we start with destructive action.
1270 */
1271 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1272 struct xfrm_pol_inexact_bin *bin;
1273 u8 dbits, sbits;
1274
1275 if (policy->walk.dead)
1276 continue;
1277
1278 dir = xfrm_policy_id2dir(index: policy->index);
1279 if (dir >= XFRM_POLICY_MAX)
1280 continue;
1281
1282 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1283 if (policy->family == AF_INET) {
1284 dbits = rbits4;
1285 sbits = lbits4;
1286 } else {
1287 dbits = rbits6;
1288 sbits = lbits6;
1289 }
1290 } else {
1291 if (policy->family == AF_INET) {
1292 dbits = lbits4;
1293 sbits = rbits4;
1294 } else {
1295 dbits = lbits6;
1296 sbits = rbits6;
1297 }
1298 }
1299
1300 if (policy->selector.prefixlen_d < dbits ||
1301 policy->selector.prefixlen_s < sbits)
1302 continue;
1303
1304 bin = xfrm_policy_inexact_alloc_bin(pol: policy, dir);
1305 if (!bin)
1306 goto out_unlock;
1307
1308 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1309 goto out_unlock;
1310 }
1311
1312 /* reset the bydst and inexact table in all directions */
1313 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1314 struct hlist_node *n;
1315
1316 hlist_for_each_entry_safe(policy, n,
1317 &net->xfrm.policy_inexact[dir],
1318 bydst_inexact_list) {
1319 hlist_del_rcu(n: &policy->bydst);
1320 hlist_del_init(n: &policy->bydst_inexact_list);
1321 }
1322
1323 hmask = net->xfrm.policy_bydst[dir].hmask;
1324 odst = net->xfrm.policy_bydst[dir].table;
1325 for (i = hmask; i >= 0; i--) {
1326 hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1327 hlist_del_rcu(n: &policy->bydst);
1328 }
1329 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1330 /* dir out => dst = remote, src = local */
1331 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1332 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1333 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1334 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1335 } else {
1336 /* dir in/fwd => dst = local, src = remote */
1337 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1338 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1339 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1340 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1341 }
1342 }
1343
1344 /* re-insert all policies by order of creation */
1345 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1346 if (policy->walk.dead)
1347 continue;
1348 dir = xfrm_policy_id2dir(index: policy->index);
1349 if (dir >= XFRM_POLICY_MAX) {
1350 /* skip socket policies */
1351 continue;
1352 }
1353 newpos = NULL;
1354 chain = policy_hash_bysel(net, sel: &policy->selector,
1355 family: policy->family, dir);
1356
1357 if (!chain) {
1358 void *p = xfrm_policy_inexact_insert(policy, dir, excl: 0);
1359
1360 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1361 continue;
1362 }
1363
1364 hlist_for_each_entry(pol, chain, bydst) {
1365 if (policy->priority >= pol->priority)
1366 newpos = &pol->bydst;
1367 else
1368 break;
1369 }
1370 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1371 hlist_add_behind_rcu(n: &policy->bydst, prev: newpos);
1372 else
1373 hlist_add_head_rcu(n: &policy->bydst, h: chain);
1374 }
1375
1376out_unlock:
1377 __xfrm_policy_inexact_flush(net);
1378 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1379 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1380
1381 mutex_unlock(lock: &hash_resize_mutex);
1382}
1383
1384void xfrm_policy_hash_rebuild(struct net *net)
1385{
1386 schedule_work(work: &net->xfrm.policy_hthresh.work);
1387}
1388EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1389
1390/* Generate new index... KAME seems to generate them ordered by cost
1391 * of an absolute inpredictability of ordering of rules. This will not pass. */
1392static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1393{
1394 for (;;) {
1395 struct hlist_head *list;
1396 struct xfrm_policy *p;
1397 u32 idx;
1398 int found;
1399
1400 if (!index) {
1401 idx = (net->xfrm.idx_generator | dir);
1402 net->xfrm.idx_generator += 8;
1403 } else {
1404 idx = index;
1405 index = 0;
1406 }
1407
1408 if (idx == 0)
1409 idx = 8;
1410 list = net->xfrm.policy_byidx + idx_hash(net, index: idx);
1411 found = 0;
1412 hlist_for_each_entry(p, list, byidx) {
1413 if (p->index == idx) {
1414 found = 1;
1415 break;
1416 }
1417 }
1418 if (!found)
1419 return idx;
1420 }
1421}
1422
1423static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1424{
1425 u32 *p1 = (u32 *) s1;
1426 u32 *p2 = (u32 *) s2;
1427 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1428 int i;
1429
1430 for (i = 0; i < len; i++) {
1431 if (p1[i] != p2[i])
1432 return 1;
1433 }
1434
1435 return 0;
1436}
1437
1438static void xfrm_policy_requeue(struct xfrm_policy *old,
1439 struct xfrm_policy *new)
1440{
1441 struct xfrm_policy_queue *pq = &old->polq;
1442 struct sk_buff_head list;
1443
1444 if (skb_queue_empty(list: &pq->hold_queue))
1445 return;
1446
1447 __skb_queue_head_init(list: &list);
1448
1449 spin_lock_bh(lock: &pq->hold_queue.lock);
1450 skb_queue_splice_init(list: &pq->hold_queue, head: &list);
1451 if (del_timer(timer: &pq->hold_timer))
1452 xfrm_pol_put(policy: old);
1453 spin_unlock_bh(lock: &pq->hold_queue.lock);
1454
1455 pq = &new->polq;
1456
1457 spin_lock_bh(lock: &pq->hold_queue.lock);
1458 skb_queue_splice(list: &list, head: &pq->hold_queue);
1459 pq->timeout = XFRM_QUEUE_TMO_MIN;
1460 if (!mod_timer(timer: &pq->hold_timer, expires: jiffies))
1461 xfrm_pol_hold(policy: new);
1462 spin_unlock_bh(lock: &pq->hold_queue.lock);
1463}
1464
1465static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1466 struct xfrm_policy *pol)
1467{
1468 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1469}
1470
1471static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1472{
1473 const struct xfrm_pol_inexact_key *k = data;
1474 u32 a = k->type << 24 | k->dir << 16 | k->family;
1475
1476 return jhash_3words(a, b: k->if_id, c: net_hash_mix(net: read_pnet(pnet: &k->net)),
1477 initval: seed);
1478}
1479
1480static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1481{
1482 const struct xfrm_pol_inexact_bin *b = data;
1483
1484 return xfrm_pol_bin_key(data: &b->k, len: 0, seed);
1485}
1486
1487static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1488 const void *ptr)
1489{
1490 const struct xfrm_pol_inexact_key *key = arg->key;
1491 const struct xfrm_pol_inexact_bin *b = ptr;
1492 int ret;
1493
1494 if (!net_eq(net1: read_pnet(pnet: &b->k.net), net2: read_pnet(pnet: &key->net)))
1495 return -1;
1496
1497 ret = b->k.dir ^ key->dir;
1498 if (ret)
1499 return ret;
1500
1501 ret = b->k.type ^ key->type;
1502 if (ret)
1503 return ret;
1504
1505 ret = b->k.family ^ key->family;
1506 if (ret)
1507 return ret;
1508
1509 return b->k.if_id ^ key->if_id;
1510}
1511
1512static const struct rhashtable_params xfrm_pol_inexact_params = {
1513 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1514 .hashfn = xfrm_pol_bin_key,
1515 .obj_hashfn = xfrm_pol_bin_obj,
1516 .obj_cmpfn = xfrm_pol_bin_cmp,
1517 .automatic_shrinking = true,
1518};
1519
1520static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1521 struct xfrm_policy *policy)
1522{
1523 struct xfrm_policy *pol, *delpol = NULL;
1524 struct hlist_node *newpos = NULL;
1525 int i = 0;
1526
1527 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1528 if (pol->type == policy->type &&
1529 pol->if_id == policy->if_id &&
1530 !selector_cmp(s1: &pol->selector, s2: &policy->selector) &&
1531 xfrm_policy_mark_match(mark: &policy->mark, pol) &&
1532 xfrm_sec_ctx_match(s1: pol->security, s2: policy->security) &&
1533 !WARN_ON(delpol)) {
1534 delpol = pol;
1535 if (policy->priority > pol->priority)
1536 continue;
1537 } else if (policy->priority >= pol->priority) {
1538 newpos = &pol->bydst_inexact_list;
1539 continue;
1540 }
1541 if (delpol)
1542 break;
1543 }
1544
1545 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1546 hlist_add_behind_rcu(n: &policy->bydst_inexact_list, prev: newpos);
1547 else
1548 hlist_add_head_rcu(n: &policy->bydst_inexact_list, h: chain);
1549
1550 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1551 pol->pos = i;
1552 i++;
1553 }
1554}
1555
1556static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1557 struct xfrm_policy *policy,
1558 bool excl)
1559{
1560 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1561
1562 hlist_for_each_entry(pol, chain, bydst) {
1563 if (pol->type == policy->type &&
1564 pol->if_id == policy->if_id &&
1565 !selector_cmp(s1: &pol->selector, s2: &policy->selector) &&
1566 xfrm_policy_mark_match(mark: &policy->mark, pol) &&
1567 xfrm_sec_ctx_match(s1: pol->security, s2: policy->security) &&
1568 !WARN_ON(delpol)) {
1569 if (excl)
1570 return ERR_PTR(error: -EEXIST);
1571 delpol = pol;
1572 if (policy->priority > pol->priority)
1573 continue;
1574 } else if (policy->priority >= pol->priority) {
1575 newpos = pol;
1576 continue;
1577 }
1578 if (delpol)
1579 break;
1580 }
1581
1582 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1583 hlist_add_behind_rcu(n: &policy->bydst, prev: &newpos->bydst);
1584 else
1585 /* Packet offload policies enter to the head
1586 * to speed-up lookups.
1587 */
1588 hlist_add_head_rcu(n: &policy->bydst, h: chain);
1589
1590 return delpol;
1591}
1592
1593int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1594{
1595 struct net *net = xp_net(xp: policy);
1596 struct xfrm_policy *delpol;
1597 struct hlist_head *chain;
1598
1599 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1600 chain = policy_hash_bysel(net, sel: &policy->selector, family: policy->family, dir);
1601 if (chain)
1602 delpol = xfrm_policy_insert_list(chain, policy, excl);
1603 else
1604 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1605
1606 if (IS_ERR(ptr: delpol)) {
1607 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1608 return PTR_ERR(ptr: delpol);
1609 }
1610
1611 __xfrm_policy_link(pol: policy, dir);
1612
1613 /* After previous checking, family can either be AF_INET or AF_INET6 */
1614 if (policy->family == AF_INET)
1615 rt_genid_bump_ipv4(net);
1616 else
1617 rt_genid_bump_ipv6(net);
1618
1619 if (delpol) {
1620 xfrm_policy_requeue(old: delpol, new: policy);
1621 __xfrm_policy_unlink(pol: delpol, dir);
1622 }
1623 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, index: policy->index);
1624 hlist_add_head(n: &policy->byidx, h: net->xfrm.policy_byidx+idx_hash(net, index: policy->index));
1625 policy->curlft.add_time = ktime_get_real_seconds();
1626 policy->curlft.use_time = 0;
1627 if (!mod_timer(timer: &policy->timer, expires: jiffies + HZ))
1628 xfrm_pol_hold(policy);
1629 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1630
1631 if (delpol)
1632 xfrm_policy_kill(policy: delpol);
1633 else if (xfrm_bydst_should_resize(net, dir, NULL))
1634 schedule_work(work: &net->xfrm.policy_hash_work);
1635
1636 return 0;
1637}
1638EXPORT_SYMBOL(xfrm_policy_insert);
1639
1640static struct xfrm_policy *
1641__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1642 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1643 struct xfrm_sec_ctx *ctx)
1644{
1645 struct xfrm_policy *pol;
1646
1647 if (!chain)
1648 return NULL;
1649
1650 hlist_for_each_entry(pol, chain, bydst) {
1651 if (pol->type == type &&
1652 pol->if_id == if_id &&
1653 xfrm_policy_mark_match(mark, pol) &&
1654 !selector_cmp(s1: sel, s2: &pol->selector) &&
1655 xfrm_sec_ctx_match(s1: ctx, s2: pol->security))
1656 return pol;
1657 }
1658
1659 return NULL;
1660}
1661
1662struct xfrm_policy *
1663xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1664 u8 type, int dir, struct xfrm_selector *sel,
1665 struct xfrm_sec_ctx *ctx, int delete, int *err)
1666{
1667 struct xfrm_pol_inexact_bin *bin = NULL;
1668 struct xfrm_policy *pol, *ret = NULL;
1669 struct hlist_head *chain;
1670
1671 *err = 0;
1672 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1673 chain = policy_hash_bysel(net, sel, family: sel->family, dir);
1674 if (!chain) {
1675 struct xfrm_pol_inexact_candidates cand;
1676 int i;
1677
1678 bin = xfrm_policy_inexact_lookup(net, type,
1679 family: sel->family, dir, if_id);
1680 if (!bin) {
1681 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1682 return NULL;
1683 }
1684
1685 if (!xfrm_policy_find_inexact_candidates(cand: &cand, b: bin,
1686 saddr: &sel->saddr,
1687 daddr: &sel->daddr)) {
1688 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1689 return NULL;
1690 }
1691
1692 pol = NULL;
1693 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1694 struct xfrm_policy *tmp;
1695
1696 tmp = __xfrm_policy_bysel_ctx(chain: cand.res[i], mark,
1697 if_id, type, dir,
1698 sel, ctx);
1699 if (!tmp)
1700 continue;
1701
1702 if (!pol || tmp->pos < pol->pos)
1703 pol = tmp;
1704 }
1705 } else {
1706 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1707 sel, ctx);
1708 }
1709
1710 if (pol) {
1711 xfrm_pol_hold(policy: pol);
1712 if (delete) {
1713 *err = security_xfrm_policy_delete(ctx: pol->security);
1714 if (*err) {
1715 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1716 return pol;
1717 }
1718 __xfrm_policy_unlink(pol, dir);
1719 }
1720 ret = pol;
1721 }
1722 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1723
1724 if (ret && delete)
1725 xfrm_policy_kill(policy: ret);
1726 if (bin && delete)
1727 xfrm_policy_inexact_prune_bin(b: bin);
1728 return ret;
1729}
1730EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1731
1732struct xfrm_policy *
1733xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1734 u8 type, int dir, u32 id, int delete, int *err)
1735{
1736 struct xfrm_policy *pol, *ret;
1737 struct hlist_head *chain;
1738
1739 *err = -ENOENT;
1740 if (xfrm_policy_id2dir(index: id) != dir)
1741 return NULL;
1742
1743 *err = 0;
1744 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1745 chain = net->xfrm.policy_byidx + idx_hash(net, index: id);
1746 ret = NULL;
1747 hlist_for_each_entry(pol, chain, byidx) {
1748 if (pol->type == type && pol->index == id &&
1749 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1750 xfrm_pol_hold(policy: pol);
1751 if (delete) {
1752 *err = security_xfrm_policy_delete(
1753 ctx: pol->security);
1754 if (*err) {
1755 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1756 return pol;
1757 }
1758 __xfrm_policy_unlink(pol, dir);
1759 }
1760 ret = pol;
1761 break;
1762 }
1763 }
1764 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1765
1766 if (ret && delete)
1767 xfrm_policy_kill(policy: ret);
1768 return ret;
1769}
1770EXPORT_SYMBOL(xfrm_policy_byid);
1771
1772#ifdef CONFIG_SECURITY_NETWORK_XFRM
1773static inline int
1774xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1775{
1776 struct xfrm_policy *pol;
1777 int err = 0;
1778
1779 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1780 if (pol->walk.dead ||
1781 xfrm_policy_id2dir(index: pol->index) >= XFRM_POLICY_MAX ||
1782 pol->type != type)
1783 continue;
1784
1785 err = security_xfrm_policy_delete(ctx: pol->security);
1786 if (err) {
1787 xfrm_audit_policy_delete(xp: pol, result: 0, task_valid);
1788 return err;
1789 }
1790 }
1791 return err;
1792}
1793
1794static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1795 struct net_device *dev,
1796 bool task_valid)
1797{
1798 struct xfrm_policy *pol;
1799 int err = 0;
1800
1801 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1802 if (pol->walk.dead ||
1803 xfrm_policy_id2dir(index: pol->index) >= XFRM_POLICY_MAX ||
1804 pol->xdo.dev != dev)
1805 continue;
1806
1807 err = security_xfrm_policy_delete(ctx: pol->security);
1808 if (err) {
1809 xfrm_audit_policy_delete(xp: pol, result: 0, task_valid);
1810 return err;
1811 }
1812 }
1813 return err;
1814}
1815#else
1816static inline int
1817xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1818{
1819 return 0;
1820}
1821
1822static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1823 struct net_device *dev,
1824 bool task_valid)
1825{
1826 return 0;
1827}
1828#endif
1829
1830int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1831{
1832 int dir, err = 0, cnt = 0;
1833 struct xfrm_policy *pol;
1834
1835 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1836
1837 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1838 if (err)
1839 goto out;
1840
1841again:
1842 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1843 if (pol->walk.dead)
1844 continue;
1845
1846 dir = xfrm_policy_id2dir(index: pol->index);
1847 if (dir >= XFRM_POLICY_MAX ||
1848 pol->type != type)
1849 continue;
1850
1851 __xfrm_policy_unlink(pol, dir);
1852 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1853 xfrm_dev_policy_delete(x: pol);
1854 cnt++;
1855 xfrm_audit_policy_delete(xp: pol, result: 1, task_valid);
1856 xfrm_policy_kill(policy: pol);
1857 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1858 goto again;
1859 }
1860 if (cnt)
1861 __xfrm_policy_inexact_flush(net);
1862 else
1863 err = -ESRCH;
1864out:
1865 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1866 return err;
1867}
1868EXPORT_SYMBOL(xfrm_policy_flush);
1869
1870int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1871 bool task_valid)
1872{
1873 int dir, err = 0, cnt = 0;
1874 struct xfrm_policy *pol;
1875
1876 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1877
1878 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1879 if (err)
1880 goto out;
1881
1882again:
1883 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1884 if (pol->walk.dead)
1885 continue;
1886
1887 dir = xfrm_policy_id2dir(index: pol->index);
1888 if (dir >= XFRM_POLICY_MAX ||
1889 pol->xdo.dev != dev)
1890 continue;
1891
1892 __xfrm_policy_unlink(pol, dir);
1893 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1894 xfrm_dev_policy_delete(x: pol);
1895 cnt++;
1896 xfrm_audit_policy_delete(xp: pol, result: 1, task_valid);
1897 xfrm_policy_kill(policy: pol);
1898 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1899 goto again;
1900 }
1901 if (cnt)
1902 __xfrm_policy_inexact_flush(net);
1903 else
1904 err = -ESRCH;
1905out:
1906 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1907 return err;
1908}
1909EXPORT_SYMBOL(xfrm_dev_policy_flush);
1910
1911int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1912 int (*func)(struct xfrm_policy *, int, int, void*),
1913 void *data)
1914{
1915 struct xfrm_policy *pol;
1916 struct xfrm_policy_walk_entry *x;
1917 int error = 0;
1918
1919 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1920 walk->type != XFRM_POLICY_TYPE_ANY)
1921 return -EINVAL;
1922
1923 if (list_empty(head: &walk->walk.all) && walk->seq != 0)
1924 return 0;
1925
1926 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
1927 if (list_empty(head: &walk->walk.all))
1928 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1929 else
1930 x = list_first_entry(&walk->walk.all,
1931 struct xfrm_policy_walk_entry, all);
1932
1933 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1934 if (x->dead)
1935 continue;
1936 pol = container_of(x, struct xfrm_policy, walk);
1937 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1938 walk->type != pol->type)
1939 continue;
1940 error = func(pol, xfrm_policy_id2dir(index: pol->index),
1941 walk->seq, data);
1942 if (error) {
1943 list_move_tail(list: &walk->walk.all, head: &x->all);
1944 goto out;
1945 }
1946 walk->seq++;
1947 }
1948 if (walk->seq == 0) {
1949 error = -ENOENT;
1950 goto out;
1951 }
1952 list_del_init(entry: &walk->walk.all);
1953out:
1954 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1955 return error;
1956}
1957EXPORT_SYMBOL(xfrm_policy_walk);
1958
1959void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1960{
1961 INIT_LIST_HEAD(list: &walk->walk.all);
1962 walk->walk.dead = 1;
1963 walk->type = type;
1964 walk->seq = 0;
1965}
1966EXPORT_SYMBOL(xfrm_policy_walk_init);
1967
1968void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1969{
1970 if (list_empty(head: &walk->walk.all))
1971 return;
1972
1973 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1974 list_del(entry: &walk->walk.all);
1975 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
1976}
1977EXPORT_SYMBOL(xfrm_policy_walk_done);
1978
1979/*
1980 * Find policy to apply to this flow.
1981 *
1982 * Returns 0 if policy found, else an -errno.
1983 */
1984static int xfrm_policy_match(const struct xfrm_policy *pol,
1985 const struct flowi *fl,
1986 u8 type, u16 family, u32 if_id)
1987{
1988 const struct xfrm_selector *sel = &pol->selector;
1989 int ret = -ESRCH;
1990 bool match;
1991
1992 if (pol->family != family ||
1993 pol->if_id != if_id ||
1994 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1995 pol->type != type)
1996 return ret;
1997
1998 match = xfrm_selector_match(sel, fl, family);
1999 if (match)
2000 ret = security_xfrm_policy_lookup(ctx: pol->security, fl_secid: fl->flowi_secid);
2001 return ret;
2002}
2003
2004static struct xfrm_pol_inexact_node *
2005xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
2006 seqcount_spinlock_t *count,
2007 const xfrm_address_t *addr, u16 family)
2008{
2009 const struct rb_node *parent;
2010 int seq;
2011
2012again:
2013 seq = read_seqcount_begin(count);
2014
2015 parent = rcu_dereference_raw(r->rb_node);
2016 while (parent) {
2017 struct xfrm_pol_inexact_node *node;
2018 int delta;
2019
2020 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2021
2022 delta = xfrm_policy_addr_delta(a: addr, b: &node->addr,
2023 prefixlen: node->prefixlen, family);
2024 if (delta < 0) {
2025 parent = rcu_dereference_raw(parent->rb_left);
2026 continue;
2027 } else if (delta > 0) {
2028 parent = rcu_dereference_raw(parent->rb_right);
2029 continue;
2030 }
2031
2032 return node;
2033 }
2034
2035 if (read_seqcount_retry(count, seq))
2036 goto again;
2037
2038 return NULL;
2039}
2040
2041static bool
2042xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2043 struct xfrm_pol_inexact_bin *b,
2044 const xfrm_address_t *saddr,
2045 const xfrm_address_t *daddr)
2046{
2047 struct xfrm_pol_inexact_node *n;
2048 u16 family;
2049
2050 if (!b)
2051 return false;
2052
2053 family = b->k.family;
2054 memset(cand, 0, sizeof(*cand));
2055 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2056
2057 n = xfrm_policy_lookup_inexact_addr(r: &b->root_d, count: &b->count, addr: daddr,
2058 family);
2059 if (n) {
2060 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2061 n = xfrm_policy_lookup_inexact_addr(r: &n->root, count: &b->count, addr: saddr,
2062 family);
2063 if (n)
2064 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2065 }
2066
2067 n = xfrm_policy_lookup_inexact_addr(r: &b->root_s, count: &b->count, addr: saddr,
2068 family);
2069 if (n)
2070 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2071
2072 return true;
2073}
2074
2075static struct xfrm_pol_inexact_bin *
2076xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2077 u8 dir, u32 if_id)
2078{
2079 struct xfrm_pol_inexact_key k = {
2080 .family = family,
2081 .type = type,
2082 .dir = dir,
2083 .if_id = if_id,
2084 };
2085
2086 write_pnet(pnet: &k.net, net);
2087
2088 return rhashtable_lookup(ht: &xfrm_policy_inexact_table, key: &k,
2089 params: xfrm_pol_inexact_params);
2090}
2091
2092static struct xfrm_pol_inexact_bin *
2093xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2094 u8 dir, u32 if_id)
2095{
2096 struct xfrm_pol_inexact_bin *bin;
2097
2098 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2099
2100 rcu_read_lock();
2101 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2102 rcu_read_unlock();
2103
2104 return bin;
2105}
2106
2107static struct xfrm_policy *
2108__xfrm_policy_eval_candidates(struct hlist_head *chain,
2109 struct xfrm_policy *prefer,
2110 const struct flowi *fl,
2111 u8 type, u16 family, u32 if_id)
2112{
2113 u32 priority = prefer ? prefer->priority : ~0u;
2114 struct xfrm_policy *pol;
2115
2116 if (!chain)
2117 return NULL;
2118
2119 hlist_for_each_entry_rcu(pol, chain, bydst) {
2120 int err;
2121
2122 if (pol->priority > priority)
2123 break;
2124
2125 err = xfrm_policy_match(pol, fl, type, family, if_id);
2126 if (err) {
2127 if (err != -ESRCH)
2128 return ERR_PTR(error: err);
2129
2130 continue;
2131 }
2132
2133 if (prefer) {
2134 /* matches. Is it older than *prefer? */
2135 if (pol->priority == priority &&
2136 prefer->pos < pol->pos)
2137 return prefer;
2138 }
2139
2140 return pol;
2141 }
2142
2143 return NULL;
2144}
2145
2146static struct xfrm_policy *
2147xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2148 struct xfrm_policy *prefer,
2149 const struct flowi *fl,
2150 u8 type, u16 family, u32 if_id)
2151{
2152 struct xfrm_policy *tmp;
2153 int i;
2154
2155 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2156 tmp = __xfrm_policy_eval_candidates(chain: cand->res[i],
2157 prefer,
2158 fl, type, family, if_id);
2159 if (!tmp)
2160 continue;
2161
2162 if (IS_ERR(ptr: tmp))
2163 return tmp;
2164 prefer = tmp;
2165 }
2166
2167 return prefer;
2168}
2169
2170static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2171 const struct flowi *fl,
2172 u16 family, u8 dir,
2173 u32 if_id)
2174{
2175 struct xfrm_pol_inexact_candidates cand;
2176 const xfrm_address_t *daddr, *saddr;
2177 struct xfrm_pol_inexact_bin *bin;
2178 struct xfrm_policy *pol, *ret;
2179 struct hlist_head *chain;
2180 unsigned int sequence;
2181 int err;
2182
2183 daddr = xfrm_flowi_daddr(fl, family);
2184 saddr = xfrm_flowi_saddr(fl, family);
2185 if (unlikely(!daddr || !saddr))
2186 return NULL;
2187
2188 rcu_read_lock();
2189 retry:
2190 do {
2191 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2192 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2193 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2194
2195 ret = NULL;
2196 hlist_for_each_entry_rcu(pol, chain, bydst) {
2197 err = xfrm_policy_match(pol, fl, type, family, if_id);
2198 if (err) {
2199 if (err == -ESRCH)
2200 continue;
2201 else {
2202 ret = ERR_PTR(error: err);
2203 goto fail;
2204 }
2205 } else {
2206 ret = pol;
2207 break;
2208 }
2209 }
2210 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2211 goto skip_inexact;
2212
2213 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2214 if (!bin || !xfrm_policy_find_inexact_candidates(cand: &cand, b: bin, saddr,
2215 daddr))
2216 goto skip_inexact;
2217
2218 pol = xfrm_policy_eval_candidates(cand: &cand, prefer: ret, fl, type,
2219 family, if_id);
2220 if (pol) {
2221 ret = pol;
2222 if (IS_ERR(ptr: pol))
2223 goto fail;
2224 }
2225
2226skip_inexact:
2227 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2228 goto retry;
2229
2230 if (ret && !xfrm_pol_hold_rcu(policy: ret))
2231 goto retry;
2232fail:
2233 rcu_read_unlock();
2234
2235 return ret;
2236}
2237
2238static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2239 const struct flowi *fl,
2240 u16 family, u8 dir, u32 if_id)
2241{
2242#ifdef CONFIG_XFRM_SUB_POLICY
2243 struct xfrm_policy *pol;
2244
2245 pol = xfrm_policy_lookup_bytype(net, type: XFRM_POLICY_TYPE_SUB, fl, family,
2246 dir, if_id);
2247 if (pol != NULL)
2248 return pol;
2249#endif
2250 return xfrm_policy_lookup_bytype(net, type: XFRM_POLICY_TYPE_MAIN, fl, family,
2251 dir, if_id);
2252}
2253
2254static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2255 const struct flowi *fl,
2256 u16 family, u32 if_id)
2257{
2258 struct xfrm_policy *pol;
2259
2260 rcu_read_lock();
2261 again:
2262 pol = rcu_dereference(sk->sk_policy[dir]);
2263 if (pol != NULL) {
2264 bool match;
2265 int err = 0;
2266
2267 if (pol->family != family) {
2268 pol = NULL;
2269 goto out;
2270 }
2271
2272 match = xfrm_selector_match(sel: &pol->selector, fl, family);
2273 if (match) {
2274 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2275 pol->if_id != if_id) {
2276 pol = NULL;
2277 goto out;
2278 }
2279 err = security_xfrm_policy_lookup(ctx: pol->security,
2280 fl_secid: fl->flowi_secid);
2281 if (!err) {
2282 if (!xfrm_pol_hold_rcu(policy: pol))
2283 goto again;
2284 } else if (err == -ESRCH) {
2285 pol = NULL;
2286 } else {
2287 pol = ERR_PTR(error: err);
2288 }
2289 } else
2290 pol = NULL;
2291 }
2292out:
2293 rcu_read_unlock();
2294 return pol;
2295}
2296
2297static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2298{
2299 struct net *net = xp_net(xp: pol);
2300
2301 list_add(new: &pol->walk.all, head: &net->xfrm.policy_all);
2302 net->xfrm.policy_count[dir]++;
2303 xfrm_pol_hold(policy: pol);
2304}
2305
2306static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2307 int dir)
2308{
2309 struct net *net = xp_net(xp: pol);
2310
2311 if (list_empty(head: &pol->walk.all))
2312 return NULL;
2313
2314 /* Socket policies are not hashed. */
2315 if (!hlist_unhashed(h: &pol->bydst)) {
2316 hlist_del_rcu(n: &pol->bydst);
2317 hlist_del_init(n: &pol->bydst_inexact_list);
2318 hlist_del(n: &pol->byidx);
2319 }
2320
2321 list_del_init(entry: &pol->walk.all);
2322 net->xfrm.policy_count[dir]--;
2323
2324 return pol;
2325}
2326
2327static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2328{
2329 __xfrm_policy_link(pol, dir: XFRM_POLICY_MAX + dir);
2330}
2331
2332static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2333{
2334 __xfrm_policy_unlink(pol, dir: XFRM_POLICY_MAX + dir);
2335}
2336
2337int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2338{
2339 struct net *net = xp_net(xp: pol);
2340
2341 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
2342 pol = __xfrm_policy_unlink(pol, dir);
2343 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
2344 if (pol) {
2345 xfrm_dev_policy_delete(x: pol);
2346 xfrm_policy_kill(policy: pol);
2347 return 0;
2348 }
2349 return -ENOENT;
2350}
2351EXPORT_SYMBOL(xfrm_policy_delete);
2352
2353int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2354{
2355 struct net *net = sock_net(sk);
2356 struct xfrm_policy *old_pol;
2357
2358#ifdef CONFIG_XFRM_SUB_POLICY
2359 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2360 return -EINVAL;
2361#endif
2362
2363 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
2364 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2365 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2366 if (pol) {
2367 pol->curlft.add_time = ktime_get_real_seconds();
2368 pol->index = xfrm_gen_index(net, dir: XFRM_POLICY_MAX+dir, index: 0);
2369 xfrm_sk_policy_link(pol, dir);
2370 }
2371 rcu_assign_pointer(sk->sk_policy[dir], pol);
2372 if (old_pol) {
2373 if (pol)
2374 xfrm_policy_requeue(old: old_pol, new: pol);
2375
2376 /* Unlinking succeeds always. This is the only function
2377 * allowed to delete or replace socket policy.
2378 */
2379 xfrm_sk_policy_unlink(pol: old_pol, dir);
2380 }
2381 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
2382
2383 if (old_pol) {
2384 xfrm_policy_kill(policy: old_pol);
2385 }
2386 return 0;
2387}
2388
2389static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2390{
2391 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(xp: old), GFP_ATOMIC);
2392 struct net *net = xp_net(xp: old);
2393
2394 if (newp) {
2395 newp->selector = old->selector;
2396 if (security_xfrm_policy_clone(old_ctx: old->security,
2397 new_ctxp: &newp->security)) {
2398 kfree(objp: newp);
2399 return NULL; /* ENOMEM */
2400 }
2401 newp->lft = old->lft;
2402 newp->curlft = old->curlft;
2403 newp->mark = old->mark;
2404 newp->if_id = old->if_id;
2405 newp->action = old->action;
2406 newp->flags = old->flags;
2407 newp->xfrm_nr = old->xfrm_nr;
2408 newp->index = old->index;
2409 newp->type = old->type;
2410 newp->family = old->family;
2411 memcpy(newp->xfrm_vec, old->xfrm_vec,
2412 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2413 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
2414 xfrm_sk_policy_link(pol: newp, dir);
2415 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
2416 xfrm_pol_put(policy: newp);
2417 }
2418 return newp;
2419}
2420
2421int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2422{
2423 const struct xfrm_policy *p;
2424 struct xfrm_policy *np;
2425 int i, ret = 0;
2426
2427 rcu_read_lock();
2428 for (i = 0; i < 2; i++) {
2429 p = rcu_dereference(osk->sk_policy[i]);
2430 if (p) {
2431 np = clone_policy(old: p, dir: i);
2432 if (unlikely(!np)) {
2433 ret = -ENOMEM;
2434 break;
2435 }
2436 rcu_assign_pointer(sk->sk_policy[i], np);
2437 }
2438 }
2439 rcu_read_unlock();
2440 return ret;
2441}
2442
2443static int
2444xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2445 xfrm_address_t *remote, unsigned short family, u32 mark)
2446{
2447 int err;
2448 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2449
2450 if (unlikely(afinfo == NULL))
2451 return -EINVAL;
2452 err = afinfo->get_saddr(net, oif, local, remote, mark);
2453 rcu_read_unlock();
2454 return err;
2455}
2456
2457/* Resolve list of templates for the flow, given policy. */
2458
2459static int
2460xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2461 struct xfrm_state **xfrm, unsigned short family)
2462{
2463 struct net *net = xp_net(xp: policy);
2464 int nx;
2465 int i, error;
2466 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2467 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2468 xfrm_address_t tmp;
2469
2470 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2471 struct xfrm_state *x;
2472 xfrm_address_t *remote = daddr;
2473 xfrm_address_t *local = saddr;
2474 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2475
2476 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2477 tmpl->mode == XFRM_MODE_BEET) {
2478 remote = &tmpl->id.daddr;
2479 local = &tmpl->saddr;
2480 if (xfrm_addr_any(addr: local, family: tmpl->encap_family)) {
2481 error = xfrm_get_saddr(net, oif: fl->flowi_oif,
2482 local: &tmp, remote,
2483 family: tmpl->encap_family, mark: 0);
2484 if (error)
2485 goto fail;
2486 local = &tmp;
2487 }
2488 }
2489
2490 x = xfrm_state_find(daddr: remote, saddr: local, fl, tmpl, pol: policy, err: &error,
2491 family, if_id: policy->if_id);
2492
2493 if (x && x->km.state == XFRM_STATE_VALID) {
2494 xfrm[nx++] = x;
2495 daddr = remote;
2496 saddr = local;
2497 continue;
2498 }
2499 if (x) {
2500 error = (x->km.state == XFRM_STATE_ERROR ?
2501 -EINVAL : -EAGAIN);
2502 xfrm_state_put(x);
2503 } else if (error == -ESRCH) {
2504 error = -EAGAIN;
2505 }
2506
2507 if (!tmpl->optional)
2508 goto fail;
2509 }
2510 return nx;
2511
2512fail:
2513 for (nx--; nx >= 0; nx--)
2514 xfrm_state_put(x: xfrm[nx]);
2515 return error;
2516}
2517
2518static int
2519xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2520 struct xfrm_state **xfrm, unsigned short family)
2521{
2522 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2523 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2524 int cnx = 0;
2525 int error;
2526 int ret;
2527 int i;
2528
2529 for (i = 0; i < npols; i++) {
2530 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2531 error = -ENOBUFS;
2532 goto fail;
2533 }
2534
2535 ret = xfrm_tmpl_resolve_one(policy: pols[i], fl, xfrm: &tpp[cnx], family);
2536 if (ret < 0) {
2537 error = ret;
2538 goto fail;
2539 } else
2540 cnx += ret;
2541 }
2542
2543 /* found states are sorted for outbound processing */
2544 if (npols > 1)
2545 xfrm_state_sort(dst: xfrm, src: tpp, n: cnx, family);
2546
2547 return cnx;
2548
2549 fail:
2550 for (cnx--; cnx >= 0; cnx--)
2551 xfrm_state_put(x: tpp[cnx]);
2552 return error;
2553
2554}
2555
2556static int xfrm_get_tos(const struct flowi *fl, int family)
2557{
2558 if (family == AF_INET)
2559 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2560
2561 return 0;
2562}
2563
2564static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2565{
2566 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2567 struct dst_ops *dst_ops;
2568 struct xfrm_dst *xdst;
2569
2570 if (!afinfo)
2571 return ERR_PTR(error: -EINVAL);
2572
2573 switch (family) {
2574 case AF_INET:
2575 dst_ops = &net->xfrm.xfrm4_dst_ops;
2576 break;
2577#if IS_ENABLED(CONFIG_IPV6)
2578 case AF_INET6:
2579 dst_ops = &net->xfrm.xfrm6_dst_ops;
2580 break;
2581#endif
2582 default:
2583 BUG();
2584 }
2585 xdst = dst_alloc(ops: dst_ops, NULL, DST_OBSOLETE_NONE, flags: 0);
2586
2587 if (likely(xdst)) {
2588 memset_after(xdst, 0, u.dst);
2589 } else
2590 xdst = ERR_PTR(error: -ENOBUFS);
2591
2592 rcu_read_unlock();
2593
2594 return xdst;
2595}
2596
2597static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2598 int nfheader_len)
2599{
2600 if (dst->ops->family == AF_INET6) {
2601 struct rt6_info *rt = (struct rt6_info *)dst;
2602 path->path_cookie = rt6_get_cookie(rt);
2603 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2604 }
2605}
2606
2607static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2608 const struct flowi *fl)
2609{
2610 const struct xfrm_policy_afinfo *afinfo =
2611 xfrm_policy_get_afinfo(family: xdst->u.dst.ops->family);
2612 int err;
2613
2614 if (!afinfo)
2615 return -EINVAL;
2616
2617 err = afinfo->fill_dst(xdst, dev, fl);
2618
2619 rcu_read_unlock();
2620
2621 return err;
2622}
2623
2624
2625/* Allocate chain of dst_entry's, attach known xfrm's, calculate
2626 * all the metrics... Shortly, bundle a bundle.
2627 */
2628
2629static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2630 struct xfrm_state **xfrm,
2631 struct xfrm_dst **bundle,
2632 int nx,
2633 const struct flowi *fl,
2634 struct dst_entry *dst)
2635{
2636 const struct xfrm_state_afinfo *afinfo;
2637 const struct xfrm_mode *inner_mode;
2638 struct net *net = xp_net(xp: policy);
2639 unsigned long now = jiffies;
2640 struct net_device *dev;
2641 struct xfrm_dst *xdst_prev = NULL;
2642 struct xfrm_dst *xdst0 = NULL;
2643 int i = 0;
2644 int err;
2645 int header_len = 0;
2646 int nfheader_len = 0;
2647 int trailer_len = 0;
2648 int tos;
2649 int family = policy->selector.family;
2650 xfrm_address_t saddr, daddr;
2651
2652 xfrm_flowi_addr_get(fl, saddr: &saddr, daddr: &daddr, family);
2653
2654 tos = xfrm_get_tos(fl, family);
2655
2656 dst_hold(dst);
2657
2658 for (; i < nx; i++) {
2659 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2660 struct dst_entry *dst1 = &xdst->u.dst;
2661
2662 err = PTR_ERR(ptr: xdst);
2663 if (IS_ERR(ptr: xdst)) {
2664 dst_release(dst);
2665 goto put_states;
2666 }
2667
2668 bundle[i] = xdst;
2669 if (!xdst_prev)
2670 xdst0 = xdst;
2671 else
2672 /* Ref count is taken during xfrm_alloc_dst()
2673 * No need to do dst_clone() on dst1
2674 */
2675 xfrm_dst_set_child(xdst: xdst_prev, child: &xdst->u.dst);
2676
2677 if (xfrm[i]->sel.family == AF_UNSPEC) {
2678 inner_mode = xfrm_ip2inner_mode(x: xfrm[i],
2679 ipproto: xfrm_af2proto(family));
2680 if (!inner_mode) {
2681 err = -EAFNOSUPPORT;
2682 dst_release(dst);
2683 goto put_states;
2684 }
2685 } else
2686 inner_mode = &xfrm[i]->inner_mode;
2687
2688 xdst->route = dst;
2689 dst_copy_metrics(dest: dst1, src: dst);
2690
2691 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2692 __u32 mark = 0;
2693 int oif;
2694
2695 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2696 mark = xfrm_smark_get(mark: fl->flowi_mark, x: xfrm[i]);
2697
2698 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2699 family = xfrm[i]->props.family;
2700
2701 oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2702 dst = xfrm_dst_lookup(x: xfrm[i], tos, oif,
2703 prev_saddr: &saddr, prev_daddr: &daddr, family, mark);
2704 err = PTR_ERR(ptr: dst);
2705 if (IS_ERR(ptr: dst))
2706 goto put_states;
2707 } else
2708 dst_hold(dst);
2709
2710 dst1->xfrm = xfrm[i];
2711 xdst->xfrm_genid = xfrm[i]->genid;
2712
2713 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2714 dst1->lastuse = now;
2715
2716 dst1->input = dst_discard;
2717
2718 rcu_read_lock();
2719 afinfo = xfrm_state_afinfo_get_rcu(family: inner_mode->family);
2720 if (likely(afinfo))
2721 dst1->output = afinfo->output;
2722 else
2723 dst1->output = dst_discard_out;
2724 rcu_read_unlock();
2725
2726 xdst_prev = xdst;
2727
2728 header_len += xfrm[i]->props.header_len;
2729 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2730 nfheader_len += xfrm[i]->props.header_len;
2731 trailer_len += xfrm[i]->props.trailer_len;
2732 }
2733
2734 xfrm_dst_set_child(xdst: xdst_prev, child: dst);
2735 xdst0->path = dst;
2736
2737 err = -ENODEV;
2738 dev = dst->dev;
2739 if (!dev)
2740 goto free_dst;
2741
2742 xfrm_init_path(path: xdst0, dst, nfheader_len);
2743 xfrm_init_pmtu(bundle, nr: nx);
2744
2745 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2746 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(dst: &xdst_prev->u.dst)) {
2747 err = xfrm_fill_dst(xdst: xdst_prev, dev, fl);
2748 if (err)
2749 goto free_dst;
2750
2751 xdst_prev->u.dst.header_len = header_len;
2752 xdst_prev->u.dst.trailer_len = trailer_len;
2753 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2754 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2755 }
2756
2757 return &xdst0->u.dst;
2758
2759put_states:
2760 for (; i < nx; i++)
2761 xfrm_state_put(x: xfrm[i]);
2762free_dst:
2763 if (xdst0)
2764 dst_release_immediate(dst: &xdst0->u.dst);
2765
2766 return ERR_PTR(error: err);
2767}
2768
2769static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2770 struct xfrm_policy **pols,
2771 int *num_pols, int *num_xfrms)
2772{
2773 int i;
2774
2775 if (*num_pols == 0 || !pols[0]) {
2776 *num_pols = 0;
2777 *num_xfrms = 0;
2778 return 0;
2779 }
2780 if (IS_ERR(ptr: pols[0])) {
2781 *num_pols = 0;
2782 return PTR_ERR(ptr: pols[0]);
2783 }
2784
2785 *num_xfrms = pols[0]->xfrm_nr;
2786
2787#ifdef CONFIG_XFRM_SUB_POLICY
2788 if (pols[0]->action == XFRM_POLICY_ALLOW &&
2789 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2790 pols[1] = xfrm_policy_lookup_bytype(net: xp_net(xp: pols[0]),
2791 type: XFRM_POLICY_TYPE_MAIN,
2792 fl, family,
2793 dir: XFRM_POLICY_OUT,
2794 if_id: pols[0]->if_id);
2795 if (pols[1]) {
2796 if (IS_ERR(ptr: pols[1])) {
2797 xfrm_pols_put(pols, npols: *num_pols);
2798 *num_pols = 0;
2799 return PTR_ERR(ptr: pols[1]);
2800 }
2801 (*num_pols)++;
2802 (*num_xfrms) += pols[1]->xfrm_nr;
2803 }
2804 }
2805#endif
2806 for (i = 0; i < *num_pols; i++) {
2807 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2808 *num_xfrms = -1;
2809 break;
2810 }
2811 }
2812
2813 return 0;
2814
2815}
2816
2817static struct xfrm_dst *
2818xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2819 const struct flowi *fl, u16 family,
2820 struct dst_entry *dst_orig)
2821{
2822 struct net *net = xp_net(xp: pols[0]);
2823 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2824 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2825 struct xfrm_dst *xdst;
2826 struct dst_entry *dst;
2827 int err;
2828
2829 /* Try to instantiate a bundle */
2830 err = xfrm_tmpl_resolve(pols, npols: num_pols, fl, xfrm, family);
2831 if (err <= 0) {
2832 if (err == 0)
2833 return NULL;
2834
2835 if (err != -EAGAIN)
2836 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2837 return ERR_PTR(error: err);
2838 }
2839
2840 dst = xfrm_bundle_create(policy: pols[0], xfrm, bundle, nx: err, fl, dst: dst_orig);
2841 if (IS_ERR(ptr: dst)) {
2842 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2843 return ERR_CAST(ptr: dst);
2844 }
2845
2846 xdst = (struct xfrm_dst *)dst;
2847 xdst->num_xfrms = err;
2848 xdst->num_pols = num_pols;
2849 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2850 xdst->policy_genid = atomic_read(v: &pols[0]->genid);
2851
2852 return xdst;
2853}
2854
2855static void xfrm_policy_queue_process(struct timer_list *t)
2856{
2857 struct sk_buff *skb;
2858 struct sock *sk;
2859 struct dst_entry *dst;
2860 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2861 struct net *net = xp_net(xp: pol);
2862 struct xfrm_policy_queue *pq = &pol->polq;
2863 struct flowi fl;
2864 struct sk_buff_head list;
2865 __u32 skb_mark;
2866
2867 spin_lock(lock: &pq->hold_queue.lock);
2868 skb = skb_peek(list_: &pq->hold_queue);
2869 if (!skb) {
2870 spin_unlock(lock: &pq->hold_queue.lock);
2871 goto out;
2872 }
2873 dst = skb_dst(skb);
2874 sk = skb->sk;
2875
2876 /* Fixup the mark to support VTI. */
2877 skb_mark = skb->mark;
2878 skb->mark = pol->mark.v;
2879 xfrm_decode_session(net, skb, fl: &fl, family: dst->ops->family);
2880 skb->mark = skb_mark;
2881 spin_unlock(lock: &pq->hold_queue.lock);
2882
2883 dst_hold(dst: xfrm_dst_path(dst));
2884 dst = xfrm_lookup(net, dst_orig: xfrm_dst_path(dst), fl: &fl, sk, flags: XFRM_LOOKUP_QUEUE);
2885 if (IS_ERR(ptr: dst))
2886 goto purge_queue;
2887
2888 if (dst->flags & DST_XFRM_QUEUE) {
2889 dst_release(dst);
2890
2891 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2892 goto purge_queue;
2893
2894 pq->timeout = pq->timeout << 1;
2895 if (!mod_timer(timer: &pq->hold_timer, expires: jiffies + pq->timeout))
2896 xfrm_pol_hold(policy: pol);
2897 goto out;
2898 }
2899
2900 dst_release(dst);
2901
2902 __skb_queue_head_init(list: &list);
2903
2904 spin_lock(lock: &pq->hold_queue.lock);
2905 pq->timeout = 0;
2906 skb_queue_splice_init(list: &pq->hold_queue, head: &list);
2907 spin_unlock(lock: &pq->hold_queue.lock);
2908
2909 while (!skb_queue_empty(list: &list)) {
2910 skb = __skb_dequeue(list: &list);
2911
2912 /* Fixup the mark to support VTI. */
2913 skb_mark = skb->mark;
2914 skb->mark = pol->mark.v;
2915 xfrm_decode_session(net, skb, fl: &fl, family: skb_dst(skb)->ops->family);
2916 skb->mark = skb_mark;
2917
2918 dst_hold(dst: xfrm_dst_path(dst: skb_dst(skb)));
2919 dst = xfrm_lookup(net, dst_orig: xfrm_dst_path(dst: skb_dst(skb)), fl: &fl, sk: skb->sk, flags: 0);
2920 if (IS_ERR(ptr: dst)) {
2921 kfree_skb(skb);
2922 continue;
2923 }
2924
2925 nf_reset_ct(skb);
2926 skb_dst_drop(skb);
2927 skb_dst_set(skb, dst);
2928
2929 dst_output(net, sk: skb->sk, skb);
2930 }
2931
2932out:
2933 xfrm_pol_put(policy: pol);
2934 return;
2935
2936purge_queue:
2937 pq->timeout = 0;
2938 skb_queue_purge(list: &pq->hold_queue);
2939 xfrm_pol_put(policy: pol);
2940}
2941
2942static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2943{
2944 unsigned long sched_next;
2945 struct dst_entry *dst = skb_dst(skb);
2946 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2947 struct xfrm_policy *pol = xdst->pols[0];
2948 struct xfrm_policy_queue *pq = &pol->polq;
2949
2950 if (unlikely(skb_fclone_busy(sk, skb))) {
2951 kfree_skb(skb);
2952 return 0;
2953 }
2954
2955 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2956 kfree_skb(skb);
2957 return -EAGAIN;
2958 }
2959
2960 skb_dst_force(skb);
2961
2962 spin_lock_bh(lock: &pq->hold_queue.lock);
2963
2964 if (!pq->timeout)
2965 pq->timeout = XFRM_QUEUE_TMO_MIN;
2966
2967 sched_next = jiffies + pq->timeout;
2968
2969 if (del_timer(timer: &pq->hold_timer)) {
2970 if (time_before(pq->hold_timer.expires, sched_next))
2971 sched_next = pq->hold_timer.expires;
2972 xfrm_pol_put(policy: pol);
2973 }
2974
2975 __skb_queue_tail(list: &pq->hold_queue, newsk: skb);
2976 if (!mod_timer(timer: &pq->hold_timer, expires: sched_next))
2977 xfrm_pol_hold(policy: pol);
2978
2979 spin_unlock_bh(lock: &pq->hold_queue.lock);
2980
2981 return 0;
2982}
2983
2984static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2985 struct xfrm_flo *xflo,
2986 const struct flowi *fl,
2987 int num_xfrms,
2988 u16 family)
2989{
2990 int err;
2991 struct net_device *dev;
2992 struct dst_entry *dst;
2993 struct dst_entry *dst1;
2994 struct xfrm_dst *xdst;
2995
2996 xdst = xfrm_alloc_dst(net, family);
2997 if (IS_ERR(ptr: xdst))
2998 return xdst;
2999
3000 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
3001 net->xfrm.sysctl_larval_drop ||
3002 num_xfrms <= 0)
3003 return xdst;
3004
3005 dst = xflo->dst_orig;
3006 dst1 = &xdst->u.dst;
3007 dst_hold(dst);
3008 xdst->route = dst;
3009
3010 dst_copy_metrics(dest: dst1, src: dst);
3011
3012 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3013 dst1->flags |= DST_XFRM_QUEUE;
3014 dst1->lastuse = jiffies;
3015
3016 dst1->input = dst_discard;
3017 dst1->output = xdst_queue_output;
3018
3019 dst_hold(dst);
3020 xfrm_dst_set_child(xdst, child: dst);
3021 xdst->path = dst;
3022
3023 xfrm_init_path(path: (struct xfrm_dst *)dst1, dst, nfheader_len: 0);
3024
3025 err = -ENODEV;
3026 dev = dst->dev;
3027 if (!dev)
3028 goto free_dst;
3029
3030 err = xfrm_fill_dst(xdst, dev, fl);
3031 if (err)
3032 goto free_dst;
3033
3034out:
3035 return xdst;
3036
3037free_dst:
3038 dst_release(dst: dst1);
3039 xdst = ERR_PTR(error: err);
3040 goto out;
3041}
3042
3043static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3044 const struct flowi *fl,
3045 u16 family, u8 dir,
3046 struct xfrm_flo *xflo, u32 if_id)
3047{
3048 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3049 int num_pols = 0, num_xfrms = 0, err;
3050 struct xfrm_dst *xdst;
3051
3052 /* Resolve policies to use if we couldn't get them from
3053 * previous cache entry */
3054 num_pols = 1;
3055 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3056 err = xfrm_expand_policies(fl, family, pols,
3057 num_pols: &num_pols, num_xfrms: &num_xfrms);
3058 if (err < 0)
3059 goto inc_error;
3060 if (num_pols == 0)
3061 return NULL;
3062 if (num_xfrms <= 0)
3063 goto make_dummy_bundle;
3064
3065 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3066 dst_orig: xflo->dst_orig);
3067 if (IS_ERR(ptr: xdst)) {
3068 err = PTR_ERR(ptr: xdst);
3069 if (err == -EREMOTE) {
3070 xfrm_pols_put(pols, npols: num_pols);
3071 return NULL;
3072 }
3073
3074 if (err != -EAGAIN)
3075 goto error;
3076 goto make_dummy_bundle;
3077 } else if (xdst == NULL) {
3078 num_xfrms = 0;
3079 goto make_dummy_bundle;
3080 }
3081
3082 return xdst;
3083
3084make_dummy_bundle:
3085 /* We found policies, but there's no bundles to instantiate:
3086 * either because the policy blocks, has no transformations or
3087 * we could not build template (no xfrm_states).*/
3088 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3089 if (IS_ERR(ptr: xdst)) {
3090 xfrm_pols_put(pols, npols: num_pols);
3091 return ERR_CAST(ptr: xdst);
3092 }
3093 xdst->num_pols = num_pols;
3094 xdst->num_xfrms = num_xfrms;
3095 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3096
3097 return xdst;
3098
3099inc_error:
3100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3101error:
3102 xfrm_pols_put(pols, npols: num_pols);
3103 return ERR_PTR(error: err);
3104}
3105
3106static struct dst_entry *make_blackhole(struct net *net, u16 family,
3107 struct dst_entry *dst_orig)
3108{
3109 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3110 struct dst_entry *ret;
3111
3112 if (!afinfo) {
3113 dst_release(dst: dst_orig);
3114 return ERR_PTR(error: -EINVAL);
3115 } else {
3116 ret = afinfo->blackhole_route(net, dst_orig);
3117 }
3118 rcu_read_unlock();
3119
3120 return ret;
3121}
3122
3123/* Finds/creates a bundle for given flow and if_id
3124 *
3125 * At the moment we eat a raw IP route. Mostly to speed up lookups
3126 * on interfaces with disabled IPsec.
3127 *
3128 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3129 * compatibility
3130 */
3131struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3132 struct dst_entry *dst_orig,
3133 const struct flowi *fl,
3134 const struct sock *sk,
3135 int flags, u32 if_id)
3136{
3137 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3138 struct xfrm_dst *xdst;
3139 struct dst_entry *dst, *route;
3140 u16 family = dst_orig->ops->family;
3141 u8 dir = XFRM_POLICY_OUT;
3142 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3143
3144 dst = NULL;
3145 xdst = NULL;
3146 route = NULL;
3147
3148 sk = sk_const_to_full_sk(sk);
3149 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3150 num_pols = 1;
3151 pols[0] = xfrm_sk_policy_lookup(sk, dir: XFRM_POLICY_OUT, fl, family,
3152 if_id);
3153 err = xfrm_expand_policies(fl, family, pols,
3154 num_pols: &num_pols, num_xfrms: &num_xfrms);
3155 if (err < 0)
3156 goto dropdst;
3157
3158 if (num_pols) {
3159 if (num_xfrms <= 0) {
3160 drop_pols = num_pols;
3161 goto no_transform;
3162 }
3163
3164 xdst = xfrm_resolve_and_create_bundle(
3165 pols, num_pols, fl,
3166 family, dst_orig);
3167
3168 if (IS_ERR(ptr: xdst)) {
3169 xfrm_pols_put(pols, npols: num_pols);
3170 err = PTR_ERR(ptr: xdst);
3171 if (err == -EREMOTE)
3172 goto nopol;
3173
3174 goto dropdst;
3175 } else if (xdst == NULL) {
3176 num_xfrms = 0;
3177 drop_pols = num_pols;
3178 goto no_transform;
3179 }
3180
3181 route = xdst->route;
3182 }
3183 }
3184
3185 if (xdst == NULL) {
3186 struct xfrm_flo xflo;
3187
3188 xflo.dst_orig = dst_orig;
3189 xflo.flags = flags;
3190
3191 /* To accelerate a bit... */
3192 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3193 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3194 goto nopol;
3195
3196 xdst = xfrm_bundle_lookup(net, fl, family, dir, xflo: &xflo, if_id);
3197 if (xdst == NULL)
3198 goto nopol;
3199 if (IS_ERR(ptr: xdst)) {
3200 err = PTR_ERR(ptr: xdst);
3201 goto dropdst;
3202 }
3203
3204 num_pols = xdst->num_pols;
3205 num_xfrms = xdst->num_xfrms;
3206 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3207 route = xdst->route;
3208 }
3209
3210 dst = &xdst->u.dst;
3211 if (route == NULL && num_xfrms > 0) {
3212 /* The only case when xfrm_bundle_lookup() returns a
3213 * bundle with null route, is when the template could
3214 * not be resolved. It means policies are there, but
3215 * bundle could not be created, since we don't yet
3216 * have the xfrm_state's. We need to wait for KM to
3217 * negotiate new SA's or bail out with error.*/
3218 if (net->xfrm.sysctl_larval_drop) {
3219 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3220 err = -EREMOTE;
3221 goto error;
3222 }
3223
3224 err = -EAGAIN;
3225
3226 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3227 goto error;
3228 }
3229
3230no_transform:
3231 if (num_pols == 0)
3232 goto nopol;
3233
3234 if ((flags & XFRM_LOOKUP_ICMP) &&
3235 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3236 err = -ENOENT;
3237 goto error;
3238 }
3239
3240 for (i = 0; i < num_pols; i++)
3241 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3242
3243 if (num_xfrms < 0) {
3244 /* Prohibit the flow */
3245 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3246 err = -EPERM;
3247 goto error;
3248 } else if (num_xfrms > 0) {
3249 /* Flow transformed */
3250 dst_release(dst: dst_orig);
3251 } else {
3252 /* Flow passes untransformed */
3253 dst_release(dst);
3254 dst = dst_orig;
3255 }
3256ok:
3257 xfrm_pols_put(pols, npols: drop_pols);
3258 if (dst && dst->xfrm &&
3259 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3260 dst->flags |= DST_XFRM_TUNNEL;
3261 return dst;
3262
3263nopol:
3264 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3265 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3266 err = -EPERM;
3267 goto error;
3268 }
3269 if (!(flags & XFRM_LOOKUP_ICMP)) {
3270 dst = dst_orig;
3271 goto ok;
3272 }
3273 err = -ENOENT;
3274error:
3275 dst_release(dst);
3276dropdst:
3277 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3278 dst_release(dst: dst_orig);
3279 xfrm_pols_put(pols, npols: drop_pols);
3280 return ERR_PTR(error: err);
3281}
3282EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3283
3284/* Main function: finds/creates a bundle for given flow.
3285 *
3286 * At the moment we eat a raw IP route. Mostly to speed up lookups
3287 * on interfaces with disabled IPsec.
3288 */
3289struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3290 const struct flowi *fl, const struct sock *sk,
3291 int flags)
3292{
3293 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3294}
3295EXPORT_SYMBOL(xfrm_lookup);
3296
3297/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3298 * Otherwise we may send out blackholed packets.
3299 */
3300struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3301 const struct flowi *fl,
3302 const struct sock *sk, int flags)
3303{
3304 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3305 flags | XFRM_LOOKUP_QUEUE |
3306 XFRM_LOOKUP_KEEP_DST_REF);
3307
3308 if (PTR_ERR(ptr: dst) == -EREMOTE)
3309 return make_blackhole(net, family: dst_orig->ops->family, dst_orig);
3310
3311 if (IS_ERR(ptr: dst))
3312 dst_release(dst: dst_orig);
3313
3314 return dst;
3315}
3316EXPORT_SYMBOL(xfrm_lookup_route);
3317
3318static inline int
3319xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3320{
3321 struct sec_path *sp = skb_sec_path(skb);
3322 struct xfrm_state *x;
3323
3324 if (!sp || idx < 0 || idx >= sp->len)
3325 return 0;
3326 x = sp->xvec[idx];
3327 if (!x->type->reject)
3328 return 0;
3329 return x->type->reject(x, skb, fl);
3330}
3331
3332/* When skb is transformed back to its "native" form, we have to
3333 * check policy restrictions. At the moment we make this in maximally
3334 * stupid way. Shame on me. :-) Of course, connected sockets must
3335 * have policy cached at them.
3336 */
3337
3338static inline int
3339xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3340 unsigned short family, u32 if_id)
3341{
3342 if (xfrm_state_kern(x))
3343 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family: tmpl->encap_family);
3344 return x->id.proto == tmpl->id.proto &&
3345 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3346 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3347 x->props.mode == tmpl->mode &&
3348 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3349 !(xfrm_id_proto_match(proto: tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3350 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3351 xfrm_state_addr_cmp(tmpl, x, family)) &&
3352 (if_id == 0 || if_id == x->if_id);
3353}
3354
3355/*
3356 * 0 or more than 0 is returned when validation is succeeded (either bypass
3357 * because of optional transport mode, or next index of the matched secpath
3358 * state with the template.
3359 * -1 is returned when no matching template is found.
3360 * Otherwise "-2 - errored_index" is returned.
3361 */
3362static inline int
3363xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3364 unsigned short family, u32 if_id)
3365{
3366 int idx = start;
3367
3368 if (tmpl->optional) {
3369 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3370 return start;
3371 } else
3372 start = -1;
3373 for (; idx < sp->len; idx++) {
3374 if (xfrm_state_ok(tmpl, x: sp->xvec[idx], family, if_id))
3375 return ++idx;
3376 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3377 if (idx < sp->verified_cnt) {
3378 /* Secpath entry previously verified, consider optional and
3379 * continue searching
3380 */
3381 continue;
3382 }
3383
3384 if (start == -1)
3385 start = -2-idx;
3386 break;
3387 }
3388 }
3389 return start;
3390}
3391
3392static void
3393decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3394{
3395 struct flowi4 *fl4 = &fl->u.ip4;
3396
3397 memset(fl4, 0, sizeof(struct flowi4));
3398
3399 if (reverse) {
3400 fl4->saddr = flkeys->addrs.ipv4.dst;
3401 fl4->daddr = flkeys->addrs.ipv4.src;
3402 fl4->fl4_sport = flkeys->ports.dst;
3403 fl4->fl4_dport = flkeys->ports.src;
3404 } else {
3405 fl4->saddr = flkeys->addrs.ipv4.src;
3406 fl4->daddr = flkeys->addrs.ipv4.dst;
3407 fl4->fl4_sport = flkeys->ports.src;
3408 fl4->fl4_dport = flkeys->ports.dst;
3409 }
3410
3411 switch (flkeys->basic.ip_proto) {
3412 case IPPROTO_GRE:
3413 fl4->fl4_gre_key = flkeys->gre.keyid;
3414 break;
3415 case IPPROTO_ICMP:
3416 fl4->fl4_icmp_type = flkeys->icmp.type;
3417 fl4->fl4_icmp_code = flkeys->icmp.code;
3418 break;
3419 }
3420
3421 fl4->flowi4_proto = flkeys->basic.ip_proto;
3422 fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
3423}
3424
3425#if IS_ENABLED(CONFIG_IPV6)
3426static void
3427decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3428{
3429 struct flowi6 *fl6 = &fl->u.ip6;
3430
3431 memset(fl6, 0, sizeof(struct flowi6));
3432
3433 if (reverse) {
3434 fl6->saddr = flkeys->addrs.ipv6.dst;
3435 fl6->daddr = flkeys->addrs.ipv6.src;
3436 fl6->fl6_sport = flkeys->ports.dst;
3437 fl6->fl6_dport = flkeys->ports.src;
3438 } else {
3439 fl6->saddr = flkeys->addrs.ipv6.src;
3440 fl6->daddr = flkeys->addrs.ipv6.dst;
3441 fl6->fl6_sport = flkeys->ports.src;
3442 fl6->fl6_dport = flkeys->ports.dst;
3443 }
3444
3445 switch (flkeys->basic.ip_proto) {
3446 case IPPROTO_GRE:
3447 fl6->fl6_gre_key = flkeys->gre.keyid;
3448 break;
3449 case IPPROTO_ICMPV6:
3450 fl6->fl6_icmp_type = flkeys->icmp.type;
3451 fl6->fl6_icmp_code = flkeys->icmp.code;
3452 break;
3453 }
3454
3455 fl6->flowi6_proto = flkeys->basic.ip_proto;
3456}
3457#endif
3458
3459int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
3460 unsigned int family, int reverse)
3461{
3462 struct xfrm_flow_keys flkeys;
3463
3464 memset(&flkeys, 0, sizeof(flkeys));
3465 __skb_flow_dissect(net, skb, flow_dissector: &xfrm_session_dissector, target_container: &flkeys,
3466 NULL, proto: 0, nhoff: 0, hlen: 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
3467
3468 switch (family) {
3469 case AF_INET:
3470 decode_session4(flkeys: &flkeys, fl, reverse);
3471 break;
3472#if IS_ENABLED(CONFIG_IPV6)
3473 case AF_INET6:
3474 decode_session6(flkeys: &flkeys, fl, reverse);
3475 break;
3476#endif
3477 default:
3478 return -EAFNOSUPPORT;
3479 }
3480
3481 fl->flowi_mark = skb->mark;
3482 if (reverse) {
3483 fl->flowi_oif = skb->skb_iif;
3484 } else {
3485 int oif = 0;
3486
3487 if (skb_dst(skb) && skb_dst(skb)->dev)
3488 oif = skb_dst(skb)->dev->ifindex;
3489
3490 fl->flowi_oif = oif;
3491 }
3492
3493 return security_xfrm_decode_session(skb, secid: &fl->flowi_secid);
3494}
3495EXPORT_SYMBOL(__xfrm_decode_session);
3496
3497static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3498{
3499 for (; k < sp->len; k++) {
3500 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3501 *idxp = k;
3502 return 1;
3503 }
3504 }
3505
3506 return 0;
3507}
3508
3509static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
3510{
3511 const struct flowi4 *fl4 = &fl->u.ip4;
3512
3513 if (family == AF_INET &&
3514 fl4->flowi4_proto == IPPROTO_ICMP &&
3515 (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
3516 fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
3517 return true;
3518
3519#if IS_ENABLED(CONFIG_IPV6)
3520 if (family == AF_INET6) {
3521 const struct flowi6 *fl6 = &fl->u.ip6;
3522
3523 if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
3524 (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
3525 fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
3526 fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
3527 return true;
3528 }
3529#endif
3530 return false;
3531}
3532
3533static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
3534 const struct flowi *fl, struct flowi *fl1)
3535{
3536 bool ret = true;
3537 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3538 int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) :
3539 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
3540
3541 if (!newskb)
3542 return true;
3543
3544 if (!pskb_pull(skb: newskb, len: hl))
3545 goto out;
3546
3547 skb_reset_network_header(skb: newskb);
3548
3549 if (xfrm_decode_session_reverse(net: dev_net(dev: skb->dev), skb: newskb, fl: fl1, family) < 0)
3550 goto out;
3551
3552 fl1->flowi_oif = fl->flowi_oif;
3553 fl1->flowi_mark = fl->flowi_mark;
3554 fl1->flowi_tos = fl->flowi_tos;
3555 nf_nat_decode_session(skb: newskb, fl: fl1, family);
3556 ret = false;
3557
3558out:
3559 consume_skb(skb: newskb);
3560 return ret;
3561}
3562
3563static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
3564 const struct xfrm_selector *sel,
3565 const struct flowi *fl)
3566{
3567 bool ret = false;
3568
3569 if (icmp_err_packet(fl, family)) {
3570 struct flowi fl1;
3571
3572 if (xfrm_icmp_flow_decode(skb, family, fl, fl1: &fl1))
3573 return ret;
3574
3575 ret = xfrm_selector_match(sel, fl: &fl1, family);
3576 }
3577
3578 return ret;
3579}
3580
3581static inline struct
3582xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
3583 const struct flowi *fl, unsigned short family,
3584 u32 if_id)
3585{
3586 struct xfrm_policy *pol = NULL;
3587
3588 if (icmp_err_packet(fl, family)) {
3589 struct flowi fl1;
3590 struct net *net = dev_net(dev: skb->dev);
3591
3592 if (xfrm_icmp_flow_decode(skb, family, fl, fl1: &fl1))
3593 return pol;
3594
3595 pol = xfrm_policy_lookup(net, fl: &fl1, family, dir: XFRM_POLICY_FWD, if_id);
3596 }
3597
3598 return pol;
3599}
3600
3601static inline struct
3602dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
3603 unsigned short family, struct dst_entry *dst)
3604{
3605 if (icmp_err_packet(fl, family)) {
3606 struct net *net = dev_net(dev: skb->dev);
3607 struct dst_entry *dst2;
3608 struct flowi fl1;
3609
3610 if (xfrm_icmp_flow_decode(skb, family, fl, fl1: &fl1))
3611 return dst;
3612
3613 dst_hold(dst);
3614
3615 dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
3616
3617 if (IS_ERR(ptr: dst2))
3618 return dst;
3619
3620 if (dst2->xfrm) {
3621 dst_release(dst);
3622 dst = dst2;
3623 } else {
3624 dst_release(dst: dst2);
3625 }
3626 }
3627
3628 return dst;
3629}
3630
3631int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3632 unsigned short family)
3633{
3634 struct net *net = dev_net(dev: skb->dev);
3635 struct xfrm_policy *pol;
3636 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3637 int npols = 0;
3638 int xfrm_nr;
3639 int pi;
3640 int reverse;
3641 struct flowi fl;
3642 int xerr_idx = -1;
3643 const struct xfrm_if_cb *ifcb;
3644 struct sec_path *sp;
3645 u32 if_id = 0;
3646
3647 rcu_read_lock();
3648 ifcb = xfrm_if_get_cb();
3649
3650 if (ifcb) {
3651 struct xfrm_if_decode_session_result r;
3652
3653 if (ifcb->decode_session(skb, family, &r)) {
3654 if_id = r.if_id;
3655 net = r.net;
3656 }
3657 }
3658 rcu_read_unlock();
3659
3660 reverse = dir & ~XFRM_POLICY_MASK;
3661 dir &= XFRM_POLICY_MASK;
3662
3663 if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) {
3664 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3665 return 0;
3666 }
3667
3668 nf_nat_decode_session(skb, fl: &fl, family);
3669
3670 /* First, check used SA against their selectors. */
3671 sp = skb_sec_path(skb);
3672 if (sp) {
3673 int i;
3674
3675 for (i = sp->len - 1; i >= 0; i--) {
3676 struct xfrm_state *x = sp->xvec[i];
3677 int ret = 0;
3678
3679 if (!xfrm_selector_match(sel: &x->sel, fl: &fl, family)) {
3680 ret = 1;
3681 if (x->props.flags & XFRM_STATE_ICMP &&
3682 xfrm_selector_inner_icmp_match(skb, family, sel: &x->sel, fl: &fl))
3683 ret = 0;
3684 if (ret) {
3685 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3686 return 0;
3687 }
3688 }
3689 }
3690 }
3691
3692 pol = NULL;
3693 sk = sk_to_full_sk(sk);
3694 if (sk && sk->sk_policy[dir]) {
3695 pol = xfrm_sk_policy_lookup(sk, dir, fl: &fl, family, if_id);
3696 if (IS_ERR(ptr: pol)) {
3697 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3698 return 0;
3699 }
3700 }
3701
3702 if (!pol)
3703 pol = xfrm_policy_lookup(net, fl: &fl, family, dir, if_id);
3704
3705 if (IS_ERR(ptr: pol)) {
3706 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3707 return 0;
3708 }
3709
3710 if (!pol && dir == XFRM_POLICY_FWD)
3711 pol = xfrm_in_fwd_icmp(skb, fl: &fl, family, if_id);
3712
3713 if (!pol) {
3714 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3715 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3716 return 0;
3717 }
3718
3719 if (sp && secpath_has_nontransport(sp, k: 0, idxp: &xerr_idx)) {
3720 xfrm_secpath_reject(idx: xerr_idx, skb, fl: &fl);
3721 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3722 return 0;
3723 }
3724 return 1;
3725 }
3726
3727 /* This lockless write can happen from different cpus. */
3728 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3729
3730 pols[0] = pol;
3731 npols++;
3732#ifdef CONFIG_XFRM_SUB_POLICY
3733 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3734 pols[1] = xfrm_policy_lookup_bytype(net, type: XFRM_POLICY_TYPE_MAIN,
3735 fl: &fl, family,
3736 dir: XFRM_POLICY_IN, if_id);
3737 if (pols[1]) {
3738 if (IS_ERR(ptr: pols[1])) {
3739 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3740 xfrm_pol_put(policy: pols[0]);
3741 return 0;
3742 }
3743 /* This write can happen from different cpus. */
3744 WRITE_ONCE(pols[1]->curlft.use_time,
3745 ktime_get_real_seconds());
3746 npols++;
3747 }
3748 }
3749#endif
3750
3751 if (pol->action == XFRM_POLICY_ALLOW) {
3752 static struct sec_path dummy;
3753 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3754 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3755 struct xfrm_tmpl **tpp = tp;
3756 int ti = 0;
3757 int i, k;
3758
3759 sp = skb_sec_path(skb);
3760 if (!sp)
3761 sp = &dummy;
3762
3763 for (pi = 0; pi < npols; pi++) {
3764 if (pols[pi] != pol &&
3765 pols[pi]->action != XFRM_POLICY_ALLOW) {
3766 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3767 goto reject;
3768 }
3769 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3770 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3771 goto reject_error;
3772 }
3773 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3774 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3775 }
3776 xfrm_nr = ti;
3777
3778 if (npols > 1) {
3779 xfrm_tmpl_sort(dst: stp, src: tpp, n: xfrm_nr, family);
3780 tpp = stp;
3781 }
3782
3783 /* For each tunnel xfrm, find the first matching tmpl.
3784 * For each tmpl before that, find corresponding xfrm.
3785 * Order is _important_. Later we will implement
3786 * some barriers, but at the moment barriers
3787 * are implied between each two transformations.
3788 * Upon success, marks secpath entries as having been
3789 * verified to allow them to be skipped in future policy
3790 * checks (e.g. nested tunnels).
3791 */
3792 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3793 k = xfrm_policy_ok(tmpl: tpp[i], sp, start: k, family, if_id);
3794 if (k < 0) {
3795 if (k < -1)
3796 /* "-2 - errored_index" returned */
3797 xerr_idx = -(2+k);
3798 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3799 goto reject;
3800 }
3801 }
3802
3803 if (secpath_has_nontransport(sp, k, idxp: &xerr_idx)) {
3804 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3805 goto reject;
3806 }
3807
3808 xfrm_pols_put(pols, npols);
3809 sp->verified_cnt = k;
3810
3811 return 1;
3812 }
3813 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3814
3815reject:
3816 xfrm_secpath_reject(idx: xerr_idx, skb, fl: &fl);
3817reject_error:
3818 xfrm_pols_put(pols, npols);
3819 return 0;
3820}
3821EXPORT_SYMBOL(__xfrm_policy_check);
3822
3823int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3824{
3825 struct net *net = dev_net(dev: skb->dev);
3826 struct flowi fl;
3827 struct dst_entry *dst;
3828 int res = 1;
3829
3830 if (xfrm_decode_session(net, skb, fl: &fl, family) < 0) {
3831 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3832 return 0;
3833 }
3834
3835 skb_dst_force(skb);
3836 if (!skb_dst(skb)) {
3837 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3838 return 0;
3839 }
3840
3841 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3842 if (IS_ERR(ptr: dst)) {
3843 res = 0;
3844 dst = NULL;
3845 }
3846
3847 if (dst && !dst->xfrm)
3848 dst = xfrm_out_fwd_icmp(skb, fl: &fl, family, dst);
3849
3850 skb_dst_set(skb, dst);
3851 return res;
3852}
3853EXPORT_SYMBOL(__xfrm_route_forward);
3854
3855/* Optimize later using cookies and generation ids. */
3856
3857static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3858{
3859 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3860 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3861 * get validated by dst_ops->check on every use. We do this
3862 * because when a normal route referenced by an XFRM dst is
3863 * obsoleted we do not go looking around for all parent
3864 * referencing XFRM dsts so that we can invalidate them. It
3865 * is just too much work. Instead we make the checks here on
3866 * every use. For example:
3867 *
3868 * XFRM dst A --> IPv4 dst X
3869 *
3870 * X is the "xdst->route" of A (X is also the "dst->path" of A
3871 * in this example). If X is marked obsolete, "A" will not
3872 * notice. That's what we are validating here via the
3873 * stale_bundle() check.
3874 *
3875 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3876 * be marked on it.
3877 * This will force stale_bundle() to fail on any xdst bundle with
3878 * this dst linked in it.
3879 */
3880 if (dst->obsolete < 0 && !stale_bundle(dst))
3881 return dst;
3882
3883 return NULL;
3884}
3885
3886static int stale_bundle(struct dst_entry *dst)
3887{
3888 return !xfrm_bundle_ok(xdst: (struct xfrm_dst *)dst);
3889}
3890
3891void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3892{
3893 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3894 dst->dev = blackhole_netdev;
3895 dev_hold(dev: dst->dev);
3896 dev_put(dev);
3897 }
3898}
3899EXPORT_SYMBOL(xfrm_dst_ifdown);
3900
3901static void xfrm_link_failure(struct sk_buff *skb)
3902{
3903 /* Impossible. Such dst must be popped before reaches point of failure. */
3904}
3905
3906static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3907{
3908 if (dst) {
3909 if (dst->obsolete) {
3910 dst_release(dst);
3911 dst = NULL;
3912 }
3913 }
3914 return dst;
3915}
3916
3917static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3918{
3919 while (nr--) {
3920 struct xfrm_dst *xdst = bundle[nr];
3921 u32 pmtu, route_mtu_cached;
3922 struct dst_entry *dst;
3923
3924 dst = &xdst->u.dst;
3925 pmtu = dst_mtu(dst: xfrm_dst_child(dst));
3926 xdst->child_mtu_cached = pmtu;
3927
3928 pmtu = xfrm_state_mtu(x: dst->xfrm, mtu: pmtu);
3929
3930 route_mtu_cached = dst_mtu(dst: xdst->route);
3931 xdst->route_mtu_cached = route_mtu_cached;
3932
3933 if (pmtu > route_mtu_cached)
3934 pmtu = route_mtu_cached;
3935
3936 dst_metric_set(dst, RTAX_MTU, val: pmtu);
3937 }
3938}
3939
3940/* Check that the bundle accepts the flow and its components are
3941 * still valid.
3942 */
3943
3944static int xfrm_bundle_ok(struct xfrm_dst *first)
3945{
3946 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3947 struct dst_entry *dst = &first->u.dst;
3948 struct xfrm_dst *xdst;
3949 int start_from, nr;
3950 u32 mtu;
3951
3952 if (!dst_check(dst: xfrm_dst_path(dst), cookie: ((struct xfrm_dst *)dst)->path_cookie) ||
3953 (dst->dev && !netif_running(dev: dst->dev)))
3954 return 0;
3955
3956 if (dst->flags & DST_XFRM_QUEUE)
3957 return 1;
3958
3959 start_from = nr = 0;
3960 do {
3961 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3962
3963 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3964 return 0;
3965 if (xdst->xfrm_genid != dst->xfrm->genid)
3966 return 0;
3967 if (xdst->num_pols > 0 &&
3968 xdst->policy_genid != atomic_read(v: &xdst->pols[0]->genid))
3969 return 0;
3970
3971 bundle[nr++] = xdst;
3972
3973 mtu = dst_mtu(dst: xfrm_dst_child(dst));
3974 if (xdst->child_mtu_cached != mtu) {
3975 start_from = nr;
3976 xdst->child_mtu_cached = mtu;
3977 }
3978
3979 if (!dst_check(dst: xdst->route, cookie: xdst->route_cookie))
3980 return 0;
3981 mtu = dst_mtu(dst: xdst->route);
3982 if (xdst->route_mtu_cached != mtu) {
3983 start_from = nr;
3984 xdst->route_mtu_cached = mtu;
3985 }
3986
3987 dst = xfrm_dst_child(dst);
3988 } while (dst->xfrm);
3989
3990 if (likely(!start_from))
3991 return 1;
3992
3993 xdst = bundle[start_from - 1];
3994 mtu = xdst->child_mtu_cached;
3995 while (start_from--) {
3996 dst = &xdst->u.dst;
3997
3998 mtu = xfrm_state_mtu(x: dst->xfrm, mtu);
3999 if (mtu > xdst->route_mtu_cached)
4000 mtu = xdst->route_mtu_cached;
4001 dst_metric_set(dst, RTAX_MTU, val: mtu);
4002 if (!start_from)
4003 break;
4004
4005 xdst = bundle[start_from - 1];
4006 xdst->child_mtu_cached = mtu;
4007 }
4008
4009 return 1;
4010}
4011
4012static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
4013{
4014 return dst_metric_advmss(dst: xfrm_dst_path(dst));
4015}
4016
4017static unsigned int xfrm_mtu(const struct dst_entry *dst)
4018{
4019 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4020
4021 return mtu ? : dst_mtu(dst: xfrm_dst_path(dst));
4022}
4023
4024static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
4025 const void *daddr)
4026{
4027 while (dst->xfrm) {
4028 const struct xfrm_state *xfrm = dst->xfrm;
4029
4030 dst = xfrm_dst_child(dst);
4031
4032 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4033 continue;
4034 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4035 daddr = xfrm->coaddr;
4036 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4037 daddr = &xfrm->id.daddr;
4038 }
4039 return daddr;
4040}
4041
4042static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4043 struct sk_buff *skb,
4044 const void *daddr)
4045{
4046 const struct dst_entry *path = xfrm_dst_path(dst);
4047
4048 if (!skb)
4049 daddr = xfrm_get_dst_nexthop(dst, daddr);
4050 return path->ops->neigh_lookup(path, skb, daddr);
4051}
4052
4053static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4054{
4055 const struct dst_entry *path = xfrm_dst_path(dst);
4056
4057 daddr = xfrm_get_dst_nexthop(dst, daddr);
4058 path->ops->confirm_neigh(path, daddr);
4059}
4060
4061int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4062{
4063 int err = 0;
4064
4065 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4066 return -EAFNOSUPPORT;
4067
4068 spin_lock(lock: &xfrm_policy_afinfo_lock);
4069 if (unlikely(xfrm_policy_afinfo[family] != NULL))
4070 err = -EEXIST;
4071 else {
4072 struct dst_ops *dst_ops = afinfo->dst_ops;
4073 if (likely(dst_ops->kmem_cachep == NULL))
4074 dst_ops->kmem_cachep = xfrm_dst_cache;
4075 if (likely(dst_ops->check == NULL))
4076 dst_ops->check = xfrm_dst_check;
4077 if (likely(dst_ops->default_advmss == NULL))
4078 dst_ops->default_advmss = xfrm_default_advmss;
4079 if (likely(dst_ops->mtu == NULL))
4080 dst_ops->mtu = xfrm_mtu;
4081 if (likely(dst_ops->negative_advice == NULL))
4082 dst_ops->negative_advice = xfrm_negative_advice;
4083 if (likely(dst_ops->link_failure == NULL))
4084 dst_ops->link_failure = xfrm_link_failure;
4085 if (likely(dst_ops->neigh_lookup == NULL))
4086 dst_ops->neigh_lookup = xfrm_neigh_lookup;
4087 if (likely(!dst_ops->confirm_neigh))
4088 dst_ops->confirm_neigh = xfrm_confirm_neigh;
4089 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4090 }
4091 spin_unlock(lock: &xfrm_policy_afinfo_lock);
4092
4093 return err;
4094}
4095EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4096
4097void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4098{
4099 struct dst_ops *dst_ops = afinfo->dst_ops;
4100 int i;
4101
4102 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4103 if (xfrm_policy_afinfo[i] != afinfo)
4104 continue;
4105 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4106 break;
4107 }
4108
4109 synchronize_rcu();
4110
4111 dst_ops->kmem_cachep = NULL;
4112 dst_ops->check = NULL;
4113 dst_ops->negative_advice = NULL;
4114 dst_ops->link_failure = NULL;
4115}
4116EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4117
4118void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4119{
4120 spin_lock(lock: &xfrm_if_cb_lock);
4121 rcu_assign_pointer(xfrm_if_cb, ifcb);
4122 spin_unlock(lock: &xfrm_if_cb_lock);
4123}
4124EXPORT_SYMBOL(xfrm_if_register_cb);
4125
4126void xfrm_if_unregister_cb(void)
4127{
4128 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4129 synchronize_rcu();
4130}
4131EXPORT_SYMBOL(xfrm_if_unregister_cb);
4132
4133#ifdef CONFIG_XFRM_STATISTICS
4134static int __net_init xfrm_statistics_init(struct net *net)
4135{
4136 int rv;
4137 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4138 if (!net->mib.xfrm_statistics)
4139 return -ENOMEM;
4140 rv = xfrm_proc_init(net);
4141 if (rv < 0)
4142 free_percpu(pdata: net->mib.xfrm_statistics);
4143 return rv;
4144}
4145
4146static void xfrm_statistics_fini(struct net *net)
4147{
4148 xfrm_proc_fini(net);
4149 free_percpu(pdata: net->mib.xfrm_statistics);
4150}
4151#else
4152static int __net_init xfrm_statistics_init(struct net *net)
4153{
4154 return 0;
4155}
4156
4157static void xfrm_statistics_fini(struct net *net)
4158{
4159}
4160#endif
4161
4162static int __net_init xfrm_policy_init(struct net *net)
4163{
4164 unsigned int hmask, sz;
4165 int dir, err;
4166
4167 if (net_eq(net1: net, net2: &init_net)) {
4168 xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4169 err = rhashtable_init(ht: &xfrm_policy_inexact_table,
4170 params: &xfrm_pol_inexact_params);
4171 BUG_ON(err);
4172 }
4173
4174 hmask = 8 - 1;
4175 sz = (hmask+1) * sizeof(struct hlist_head);
4176
4177 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4178 if (!net->xfrm.policy_byidx)
4179 goto out_byidx;
4180 net->xfrm.policy_idx_hmask = hmask;
4181
4182 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4183 struct xfrm_policy_hash *htab;
4184
4185 net->xfrm.policy_count[dir] = 0;
4186 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4187 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4188
4189 htab = &net->xfrm.policy_bydst[dir];
4190 htab->table = xfrm_hash_alloc(sz);
4191 if (!htab->table)
4192 goto out_bydst;
4193 htab->hmask = hmask;
4194 htab->dbits4 = 32;
4195 htab->sbits4 = 32;
4196 htab->dbits6 = 128;
4197 htab->sbits6 = 128;
4198 }
4199 net->xfrm.policy_hthresh.lbits4 = 32;
4200 net->xfrm.policy_hthresh.rbits4 = 32;
4201 net->xfrm.policy_hthresh.lbits6 = 128;
4202 net->xfrm.policy_hthresh.rbits6 = 128;
4203
4204 seqlock_init(&net->xfrm.policy_hthresh.lock);
4205
4206 INIT_LIST_HEAD(list: &net->xfrm.policy_all);
4207 INIT_LIST_HEAD(list: &net->xfrm.inexact_bins);
4208 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4209 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4210 return 0;
4211
4212out_bydst:
4213 for (dir--; dir >= 0; dir--) {
4214 struct xfrm_policy_hash *htab;
4215
4216 htab = &net->xfrm.policy_bydst[dir];
4217 xfrm_hash_free(n: htab->table, sz);
4218 }
4219 xfrm_hash_free(n: net->xfrm.policy_byidx, sz);
4220out_byidx:
4221 return -ENOMEM;
4222}
4223
4224static void xfrm_policy_fini(struct net *net)
4225{
4226 struct xfrm_pol_inexact_bin *b, *t;
4227 unsigned int sz;
4228 int dir;
4229
4230 flush_work(work: &net->xfrm.policy_hash_work);
4231#ifdef CONFIG_XFRM_SUB_POLICY
4232 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4233#endif
4234 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4235
4236 WARN_ON(!list_empty(&net->xfrm.policy_all));
4237
4238 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4239 struct xfrm_policy_hash *htab;
4240
4241 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4242
4243 htab = &net->xfrm.policy_bydst[dir];
4244 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4245 WARN_ON(!hlist_empty(htab->table));
4246 xfrm_hash_free(n: htab->table, sz);
4247 }
4248
4249 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4250 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4251 xfrm_hash_free(n: net->xfrm.policy_byidx, sz);
4252
4253 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
4254 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4255 __xfrm_policy_inexact_prune_bin(b, net_exit: true);
4256 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
4257}
4258
4259static int __net_init xfrm_net_init(struct net *net)
4260{
4261 int rv;
4262
4263 /* Initialize the per-net locks here */
4264 spin_lock_init(&net->xfrm.xfrm_state_lock);
4265 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4266 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4267 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4268 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4269 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4270 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4271
4272 rv = xfrm_statistics_init(net);
4273 if (rv < 0)
4274 goto out_statistics;
4275 rv = xfrm_state_init(net);
4276 if (rv < 0)
4277 goto out_state;
4278 rv = xfrm_policy_init(net);
4279 if (rv < 0)
4280 goto out_policy;
4281 rv = xfrm_sysctl_init(net);
4282 if (rv < 0)
4283 goto out_sysctl;
4284
4285 return 0;
4286
4287out_sysctl:
4288 xfrm_policy_fini(net);
4289out_policy:
4290 xfrm_state_fini(net);
4291out_state:
4292 xfrm_statistics_fini(net);
4293out_statistics:
4294 return rv;
4295}
4296
4297static void __net_exit xfrm_net_exit(struct net *net)
4298{
4299 xfrm_sysctl_fini(net);
4300 xfrm_policy_fini(net);
4301 xfrm_state_fini(net);
4302 xfrm_statistics_fini(net);
4303}
4304
4305static struct pernet_operations __net_initdata xfrm_net_ops = {
4306 .init = xfrm_net_init,
4307 .exit = xfrm_net_exit,
4308};
4309
4310static const struct flow_dissector_key xfrm_flow_dissector_keys[] = {
4311 {
4312 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
4313 .offset = offsetof(struct xfrm_flow_keys, control),
4314 },
4315 {
4316 .key_id = FLOW_DISSECTOR_KEY_BASIC,
4317 .offset = offsetof(struct xfrm_flow_keys, basic),
4318 },
4319 {
4320 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
4321 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv4),
4322 },
4323 {
4324 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
4325 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv6),
4326 },
4327 {
4328 .key_id = FLOW_DISSECTOR_KEY_PORTS,
4329 .offset = offsetof(struct xfrm_flow_keys, ports),
4330 },
4331 {
4332 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
4333 .offset = offsetof(struct xfrm_flow_keys, gre),
4334 },
4335 {
4336 .key_id = FLOW_DISSECTOR_KEY_IP,
4337 .offset = offsetof(struct xfrm_flow_keys, ip),
4338 },
4339 {
4340 .key_id = FLOW_DISSECTOR_KEY_ICMP,
4341 .offset = offsetof(struct xfrm_flow_keys, icmp),
4342 },
4343};
4344
4345void __init xfrm_init(void)
4346{
4347 skb_flow_dissector_init(flow_dissector: &xfrm_session_dissector,
4348 key: xfrm_flow_dissector_keys,
4349 ARRAY_SIZE(xfrm_flow_dissector_keys));
4350
4351 register_pernet_subsys(&xfrm_net_ops);
4352 xfrm_dev_init();
4353 xfrm_input_init();
4354
4355#ifdef CONFIG_XFRM_ESPINTCP
4356 espintcp_init();
4357#endif
4358
4359 register_xfrm_state_bpf();
4360}
4361
4362#ifdef CONFIG_AUDITSYSCALL
4363static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4364 struct audit_buffer *audit_buf)
4365{
4366 struct xfrm_sec_ctx *ctx = xp->security;
4367 struct xfrm_selector *sel = &xp->selector;
4368
4369 if (ctx)
4370 audit_log_format(ab: audit_buf, fmt: " sec_alg=%u sec_doi=%u sec_obj=%s",
4371 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4372
4373 switch (sel->family) {
4374 case AF_INET:
4375 audit_log_format(ab: audit_buf, fmt: " src=%pI4", &sel->saddr.a4);
4376 if (sel->prefixlen_s != 32)
4377 audit_log_format(ab: audit_buf, fmt: " src_prefixlen=%d",
4378 sel->prefixlen_s);
4379 audit_log_format(ab: audit_buf, fmt: " dst=%pI4", &sel->daddr.a4);
4380 if (sel->prefixlen_d != 32)
4381 audit_log_format(ab: audit_buf, fmt: " dst_prefixlen=%d",
4382 sel->prefixlen_d);
4383 break;
4384 case AF_INET6:
4385 audit_log_format(ab: audit_buf, fmt: " src=%pI6", sel->saddr.a6);
4386 if (sel->prefixlen_s != 128)
4387 audit_log_format(ab: audit_buf, fmt: " src_prefixlen=%d",
4388 sel->prefixlen_s);
4389 audit_log_format(ab: audit_buf, fmt: " dst=%pI6", sel->daddr.a6);
4390 if (sel->prefixlen_d != 128)
4391 audit_log_format(ab: audit_buf, fmt: " dst_prefixlen=%d",
4392 sel->prefixlen_d);
4393 break;
4394 }
4395}
4396
4397void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4398{
4399 struct audit_buffer *audit_buf;
4400
4401 audit_buf = xfrm_audit_start(op: "SPD-add");
4402 if (audit_buf == NULL)
4403 return;
4404 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4405 audit_log_format(ab: audit_buf, fmt: " res=%u", result);
4406 xfrm_audit_common_policyinfo(xp, audit_buf);
4407 audit_log_end(ab: audit_buf);
4408}
4409EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4410
4411void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4412 bool task_valid)
4413{
4414 struct audit_buffer *audit_buf;
4415
4416 audit_buf = xfrm_audit_start(op: "SPD-delete");
4417 if (audit_buf == NULL)
4418 return;
4419 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4420 audit_log_format(ab: audit_buf, fmt: " res=%u", result);
4421 xfrm_audit_common_policyinfo(xp, audit_buf);
4422 audit_log_end(ab: audit_buf);
4423}
4424EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4425#endif
4426
4427#ifdef CONFIG_XFRM_MIGRATE
4428static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4429 const struct xfrm_selector *sel_tgt)
4430{
4431 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4432 if (sel_tgt->family == sel_cmp->family &&
4433 xfrm_addr_equal(a: &sel_tgt->daddr, b: &sel_cmp->daddr,
4434 family: sel_cmp->family) &&
4435 xfrm_addr_equal(a: &sel_tgt->saddr, b: &sel_cmp->saddr,
4436 family: sel_cmp->family) &&
4437 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4438 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4439 return true;
4440 }
4441 } else {
4442 if (memcmp(p: sel_tgt, q: sel_cmp, size: sizeof(*sel_tgt)) == 0) {
4443 return true;
4444 }
4445 }
4446 return false;
4447}
4448
4449static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4450 u8 dir, u8 type, struct net *net, u32 if_id)
4451{
4452 struct xfrm_policy *pol, *ret = NULL;
4453 struct hlist_head *chain;
4454 u32 priority = ~0U;
4455
4456 spin_lock_bh(lock: &net->xfrm.xfrm_policy_lock);
4457 chain = policy_hash_direct(net, daddr: &sel->daddr, saddr: &sel->saddr, family: sel->family, dir);
4458 hlist_for_each_entry(pol, chain, bydst) {
4459 if ((if_id == 0 || pol->if_id == if_id) &&
4460 xfrm_migrate_selector_match(sel_cmp: sel, sel_tgt: &pol->selector) &&
4461 pol->type == type) {
4462 ret = pol;
4463 priority = ret->priority;
4464 break;
4465 }
4466 }
4467 chain = &net->xfrm.policy_inexact[dir];
4468 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4469 if ((pol->priority >= priority) && ret)
4470 break;
4471
4472 if ((if_id == 0 || pol->if_id == if_id) &&
4473 xfrm_migrate_selector_match(sel_cmp: sel, sel_tgt: &pol->selector) &&
4474 pol->type == type) {
4475 ret = pol;
4476 break;
4477 }
4478 }
4479
4480 xfrm_pol_hold(policy: ret);
4481
4482 spin_unlock_bh(lock: &net->xfrm.xfrm_policy_lock);
4483
4484 return ret;
4485}
4486
4487static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4488{
4489 int match = 0;
4490
4491 if (t->mode == m->mode && t->id.proto == m->proto &&
4492 (m->reqid == 0 || t->reqid == m->reqid)) {
4493 switch (t->mode) {
4494 case XFRM_MODE_TUNNEL:
4495 case XFRM_MODE_BEET:
4496 if (xfrm_addr_equal(a: &t->id.daddr, b: &m->old_daddr,
4497 family: m->old_family) &&
4498 xfrm_addr_equal(a: &t->saddr, b: &m->old_saddr,
4499 family: m->old_family)) {
4500 match = 1;
4501 }
4502 break;
4503 case XFRM_MODE_TRANSPORT:
4504 /* in case of transport mode, template does not store
4505 any IP addresses, hence we just compare mode and
4506 protocol */
4507 match = 1;
4508 break;
4509 default:
4510 break;
4511 }
4512 }
4513 return match;
4514}
4515
4516/* update endpoint address(es) of template(s) */
4517static int xfrm_policy_migrate(struct xfrm_policy *pol,
4518 struct xfrm_migrate *m, int num_migrate,
4519 struct netlink_ext_ack *extack)
4520{
4521 struct xfrm_migrate *mp;
4522 int i, j, n = 0;
4523
4524 write_lock_bh(&pol->lock);
4525 if (unlikely(pol->walk.dead)) {
4526 /* target policy has been deleted */
4527 NL_SET_ERR_MSG(extack, "Target policy not found");
4528 write_unlock_bh(&pol->lock);
4529 return -ENOENT;
4530 }
4531
4532 for (i = 0; i < pol->xfrm_nr; i++) {
4533 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4534 if (!migrate_tmpl_match(m: mp, t: &pol->xfrm_vec[i]))
4535 continue;
4536 n++;
4537 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4538 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4539 continue;
4540 /* update endpoints */
4541 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4542 sizeof(pol->xfrm_vec[i].id.daddr));
4543 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4544 sizeof(pol->xfrm_vec[i].saddr));
4545 pol->xfrm_vec[i].encap_family = mp->new_family;
4546 /* flush bundles */
4547 atomic_inc(v: &pol->genid);
4548 }
4549 }
4550
4551 write_unlock_bh(&pol->lock);
4552
4553 if (!n)
4554 return -ENODATA;
4555
4556 return 0;
4557}
4558
4559static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4560 struct netlink_ext_ack *extack)
4561{
4562 int i, j;
4563
4564 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4565 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4566 return -EINVAL;
4567 }
4568
4569 for (i = 0; i < num_migrate; i++) {
4570 if (xfrm_addr_any(addr: &m[i].new_daddr, family: m[i].new_family) ||
4571 xfrm_addr_any(addr: &m[i].new_saddr, family: m[i].new_family)) {
4572 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4573 return -EINVAL;
4574 }
4575
4576 /* check if there is any duplicated entry */
4577 for (j = i + 1; j < num_migrate; j++) {
4578 if (!memcmp(p: &m[i].old_daddr, q: &m[j].old_daddr,
4579 size: sizeof(m[i].old_daddr)) &&
4580 !memcmp(p: &m[i].old_saddr, q: &m[j].old_saddr,
4581 size: sizeof(m[i].old_saddr)) &&
4582 m[i].proto == m[j].proto &&
4583 m[i].mode == m[j].mode &&
4584 m[i].reqid == m[j].reqid &&
4585 m[i].old_family == m[j].old_family) {
4586 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4587 return -EINVAL;
4588 }
4589 }
4590 }
4591
4592 return 0;
4593}
4594
4595int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4596 struct xfrm_migrate *m, int num_migrate,
4597 struct xfrm_kmaddress *k, struct net *net,
4598 struct xfrm_encap_tmpl *encap, u32 if_id,
4599 struct netlink_ext_ack *extack)
4600{
4601 int i, err, nx_cur = 0, nx_new = 0;
4602 struct xfrm_policy *pol = NULL;
4603 struct xfrm_state *x, *xc;
4604 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4605 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4606 struct xfrm_migrate *mp;
4607
4608 /* Stage 0 - sanity checks */
4609 err = xfrm_migrate_check(m, num_migrate, extack);
4610 if (err < 0)
4611 goto out;
4612
4613 if (dir >= XFRM_POLICY_MAX) {
4614 NL_SET_ERR_MSG(extack, "Invalid policy direction");
4615 err = -EINVAL;
4616 goto out;
4617 }
4618
4619 /* Stage 1 - find policy */
4620 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4621 if (!pol) {
4622 NL_SET_ERR_MSG(extack, "Target policy not found");
4623 err = -ENOENT;
4624 goto out;
4625 }
4626
4627 /* Stage 2 - find and update state(s) */
4628 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4629 if ((x = xfrm_migrate_state_find(m: mp, net, if_id))) {
4630 x_cur[nx_cur] = x;
4631 nx_cur++;
4632 xc = xfrm_state_migrate(x, m: mp, encap);
4633 if (xc) {
4634 x_new[nx_new] = xc;
4635 nx_new++;
4636 } else {
4637 err = -ENODATA;
4638 goto restore_state;
4639 }
4640 }
4641 }
4642
4643 /* Stage 3 - update policy */
4644 err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4645 if (err < 0)
4646 goto restore_state;
4647
4648 /* Stage 4 - delete old state(s) */
4649 if (nx_cur) {
4650 xfrm_states_put(states: x_cur, n: nx_cur);
4651 xfrm_states_delete(states: x_cur, n: nx_cur);
4652 }
4653
4654 /* Stage 5 - announce */
4655 km_migrate(sel, dir, type, m, num_bundles: num_migrate, k, encap);
4656
4657 xfrm_pol_put(policy: pol);
4658
4659 return 0;
4660out:
4661 return err;
4662
4663restore_state:
4664 if (pol)
4665 xfrm_pol_put(policy: pol);
4666 if (nx_cur)
4667 xfrm_states_put(states: x_cur, n: nx_cur);
4668 if (nx_new)
4669 xfrm_states_delete(states: x_new, n: nx_new);
4670
4671 return err;
4672}
4673EXPORT_SYMBOL(xfrm_migrate);
4674#endif
4675

source code of linux/net/xfrm/xfrm_policy.c