1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * |
4 | * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet |
5 | * & Swedish University of Agricultural Sciences. |
6 | * |
7 | * Jens Laas <jens.laas@data.slu.se> Swedish University of |
8 | * Agricultural Sciences. |
9 | * |
10 | * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet |
11 | * |
12 | * This work is based on the LPC-trie which is originally described in: |
13 | * |
14 | * An experimental study of compression methods for dynamic tries |
15 | * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. |
16 | * https://www.csc.kth.se/~snilsson/software/dyntrie2/ |
17 | * |
18 | * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson |
19 | * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 |
20 | * |
21 | * Code from fib_hash has been reused which includes the following header: |
22 | * |
23 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
24 | * operating system. INET is implemented using the BSD Socket |
25 | * interface as the means of communication with the user level. |
26 | * |
27 | * IPv4 FIB: lookup engine and maintenance routines. |
28 | * |
29 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
30 | * |
31 | * Substantial contributions to this work comes from: |
32 | * |
33 | * David S. Miller, <davem@davemloft.net> |
34 | * Stephen Hemminger <shemminger@osdl.org> |
35 | * Paul E. McKenney <paulmck@us.ibm.com> |
36 | * Patrick McHardy <kaber@trash.net> |
37 | */ |
38 | #include <linux/cache.h> |
39 | #include <linux/uaccess.h> |
40 | #include <linux/bitops.h> |
41 | #include <linux/types.h> |
42 | #include <linux/kernel.h> |
43 | #include <linux/mm.h> |
44 | #include <linux/string.h> |
45 | #include <linux/socket.h> |
46 | #include <linux/sockios.h> |
47 | #include <linux/errno.h> |
48 | #include <linux/in.h> |
49 | #include <linux/inet.h> |
50 | #include <linux/inetdevice.h> |
51 | #include <linux/netdevice.h> |
52 | #include <linux/if_arp.h> |
53 | #include <linux/proc_fs.h> |
54 | #include <linux/rcupdate.h> |
55 | #include <linux/skbuff.h> |
56 | #include <linux/netlink.h> |
57 | #include <linux/init.h> |
58 | #include <linux/list.h> |
59 | #include <linux/slab.h> |
60 | #include <linux/export.h> |
61 | #include <linux/vmalloc.h> |
62 | #include <linux/notifier.h> |
63 | #include <net/net_namespace.h> |
64 | #include <net/inet_dscp.h> |
65 | #include <net/ip.h> |
66 | #include <net/protocol.h> |
67 | #include <net/route.h> |
68 | #include <net/tcp.h> |
69 | #include <net/sock.h> |
70 | #include <net/ip_fib.h> |
71 | #include <net/fib_notifier.h> |
72 | #include <trace/events/fib.h> |
73 | #include "fib_lookup.h" |
74 | |
75 | static int call_fib_entry_notifier(struct notifier_block *nb, |
76 | enum fib_event_type event_type, u32 dst, |
77 | int dst_len, struct fib_alias *fa, |
78 | struct netlink_ext_ack *extack) |
79 | { |
80 | struct fib_entry_notifier_info info = { |
81 | .info.extack = extack, |
82 | .dst = dst, |
83 | .dst_len = dst_len, |
84 | .fi = fa->fa_info, |
85 | .dscp = fa->fa_dscp, |
86 | .type = fa->fa_type, |
87 | .tb_id = fa->tb_id, |
88 | }; |
89 | return call_fib4_notifier(nb, event_type, info: &info.info); |
90 | } |
91 | |
92 | static int call_fib_entry_notifiers(struct net *net, |
93 | enum fib_event_type event_type, u32 dst, |
94 | int dst_len, struct fib_alias *fa, |
95 | struct netlink_ext_ack *extack) |
96 | { |
97 | struct fib_entry_notifier_info info = { |
98 | .info.extack = extack, |
99 | .dst = dst, |
100 | .dst_len = dst_len, |
101 | .fi = fa->fa_info, |
102 | .dscp = fa->fa_dscp, |
103 | .type = fa->fa_type, |
104 | .tb_id = fa->tb_id, |
105 | }; |
106 | return call_fib4_notifiers(net, event_type, info: &info.info); |
107 | } |
108 | |
109 | #define MAX_STAT_DEPTH 32 |
110 | |
111 | #define KEYLENGTH (8*sizeof(t_key)) |
112 | #define KEY_MAX ((t_key)~0) |
113 | |
114 | typedef unsigned int t_key; |
115 | |
116 | #define IS_TRIE(n) ((n)->pos >= KEYLENGTH) |
117 | #define IS_TNODE(n) ((n)->bits) |
118 | #define IS_LEAF(n) (!(n)->bits) |
119 | |
120 | struct key_vector { |
121 | t_key key; |
122 | unsigned char pos; /* 2log(KEYLENGTH) bits needed */ |
123 | unsigned char bits; /* 2log(KEYLENGTH) bits needed */ |
124 | unsigned char slen; |
125 | union { |
126 | /* This list pointer if valid if (pos | bits) == 0 (LEAF) */ |
127 | struct hlist_head leaf; |
128 | /* This array is valid if (pos | bits) > 0 (TNODE) */ |
129 | DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode); |
130 | }; |
131 | }; |
132 | |
133 | struct tnode { |
134 | struct rcu_head rcu; |
135 | t_key empty_children; /* KEYLENGTH bits needed */ |
136 | t_key full_children; /* KEYLENGTH bits needed */ |
137 | struct key_vector __rcu *parent; |
138 | struct key_vector kv[1]; |
139 | #define tn_bits kv[0].bits |
140 | }; |
141 | |
142 | #define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n]) |
143 | #define LEAF_SIZE TNODE_SIZE(1) |
144 | |
145 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
146 | struct trie_use_stats { |
147 | unsigned int gets; |
148 | unsigned int backtrack; |
149 | unsigned int semantic_match_passed; |
150 | unsigned int semantic_match_miss; |
151 | unsigned int null_node_hit; |
152 | unsigned int resize_node_skipped; |
153 | }; |
154 | #endif |
155 | |
156 | struct trie_stat { |
157 | unsigned int totdepth; |
158 | unsigned int maxdepth; |
159 | unsigned int tnodes; |
160 | unsigned int leaves; |
161 | unsigned int nullpointers; |
162 | unsigned int prefixes; |
163 | unsigned int nodesizes[MAX_STAT_DEPTH]; |
164 | }; |
165 | |
166 | struct trie { |
167 | struct key_vector kv[1]; |
168 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
169 | struct trie_use_stats __percpu *stats; |
170 | #endif |
171 | }; |
172 | |
173 | static struct key_vector *resize(struct trie *t, struct key_vector *tn); |
174 | static unsigned int tnode_free_size; |
175 | |
176 | /* |
177 | * synchronize_rcu after call_rcu for outstanding dirty memory; it should be |
178 | * especially useful before resizing the root node with PREEMPT_NONE configs; |
179 | * the value was obtained experimentally, aiming to avoid visible slowdown. |
180 | */ |
181 | unsigned int sysctl_fib_sync_mem = 512 * 1024; |
182 | unsigned int sysctl_fib_sync_mem_min = 64 * 1024; |
183 | unsigned int sysctl_fib_sync_mem_max = 64 * 1024 * 1024; |
184 | |
185 | static struct kmem_cache *fn_alias_kmem __ro_after_init; |
186 | static struct kmem_cache *trie_leaf_kmem __ro_after_init; |
187 | |
188 | static inline struct tnode *tn_info(struct key_vector *kv) |
189 | { |
190 | return container_of(kv, struct tnode, kv[0]); |
191 | } |
192 | |
193 | /* caller must hold RTNL */ |
194 | #define node_parent(tn) rtnl_dereference(tn_info(tn)->parent) |
195 | #define get_child(tn, i) rtnl_dereference((tn)->tnode[i]) |
196 | |
197 | /* caller must hold RCU read lock or RTNL */ |
198 | #define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent) |
199 | #define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i]) |
200 | |
201 | /* wrapper for rcu_assign_pointer */ |
202 | static inline void node_set_parent(struct key_vector *n, struct key_vector *tp) |
203 | { |
204 | if (n) |
205 | rcu_assign_pointer(tn_info(n)->parent, tp); |
206 | } |
207 | |
208 | #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p) |
209 | |
210 | /* This provides us with the number of children in this node, in the case of a |
211 | * leaf this will return 0 meaning none of the children are accessible. |
212 | */ |
213 | static inline unsigned long child_length(const struct key_vector *tn) |
214 | { |
215 | return (1ul << tn->bits) & ~(1ul); |
216 | } |
217 | |
218 | #define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos) |
219 | |
220 | static inline unsigned long get_index(t_key key, struct key_vector *kv) |
221 | { |
222 | unsigned long index = key ^ kv->key; |
223 | |
224 | if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos)) |
225 | return 0; |
226 | |
227 | return index >> kv->pos; |
228 | } |
229 | |
230 | /* To understand this stuff, an understanding of keys and all their bits is |
231 | * necessary. Every node in the trie has a key associated with it, but not |
232 | * all of the bits in that key are significant. |
233 | * |
234 | * Consider a node 'n' and its parent 'tp'. |
235 | * |
236 | * If n is a leaf, every bit in its key is significant. Its presence is |
237 | * necessitated by path compression, since during a tree traversal (when |
238 | * searching for a leaf - unless we are doing an insertion) we will completely |
239 | * ignore all skipped bits we encounter. Thus we need to verify, at the end of |
240 | * a potentially successful search, that we have indeed been walking the |
241 | * correct key path. |
242 | * |
243 | * Note that we can never "miss" the correct key in the tree if present by |
244 | * following the wrong path. Path compression ensures that segments of the key |
245 | * that are the same for all keys with a given prefix are skipped, but the |
246 | * skipped part *is* identical for each node in the subtrie below the skipped |
247 | * bit! trie_insert() in this implementation takes care of that. |
248 | * |
249 | * if n is an internal node - a 'tnode' here, the various parts of its key |
250 | * have many different meanings. |
251 | * |
252 | * Example: |
253 | * _________________________________________________________________ |
254 | * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | |
255 | * ----------------------------------------------------------------- |
256 | * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 |
257 | * |
258 | * _________________________________________________________________ |
259 | * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | |
260 | * ----------------------------------------------------------------- |
261 | * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 |
262 | * |
263 | * tp->pos = 22 |
264 | * tp->bits = 3 |
265 | * n->pos = 13 |
266 | * n->bits = 4 |
267 | * |
268 | * First, let's just ignore the bits that come before the parent tp, that is |
269 | * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this |
270 | * point we do not use them for anything. |
271 | * |
272 | * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the |
273 | * index into the parent's child array. That is, they will be used to find |
274 | * 'n' among tp's children. |
275 | * |
276 | * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits |
277 | * for the node n. |
278 | * |
279 | * All the bits we have seen so far are significant to the node n. The rest |
280 | * of the bits are really not needed or indeed known in n->key. |
281 | * |
282 | * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into |
283 | * n's child array, and will of course be different for each child. |
284 | * |
285 | * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown |
286 | * at this point. |
287 | */ |
288 | |
289 | static const int halve_threshold = 25; |
290 | static const int inflate_threshold = 50; |
291 | static const int halve_threshold_root = 15; |
292 | static const int inflate_threshold_root = 30; |
293 | |
294 | static void __alias_free_mem(struct rcu_head *head) |
295 | { |
296 | struct fib_alias *fa = container_of(head, struct fib_alias, rcu); |
297 | kmem_cache_free(s: fn_alias_kmem, objp: fa); |
298 | } |
299 | |
300 | static inline void alias_free_mem_rcu(struct fib_alias *fa) |
301 | { |
302 | call_rcu(head: &fa->rcu, func: __alias_free_mem); |
303 | } |
304 | |
305 | #define TNODE_VMALLOC_MAX \ |
306 | ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *)) |
307 | |
308 | static void __node_free_rcu(struct rcu_head *head) |
309 | { |
310 | struct tnode *n = container_of(head, struct tnode, rcu); |
311 | |
312 | if (!n->tn_bits) |
313 | kmem_cache_free(s: trie_leaf_kmem, objp: n); |
314 | else |
315 | kvfree(addr: n); |
316 | } |
317 | |
318 | #define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu) |
319 | |
320 | static struct tnode *tnode_alloc(int bits) |
321 | { |
322 | size_t size; |
323 | |
324 | /* verify bits is within bounds */ |
325 | if (bits > TNODE_VMALLOC_MAX) |
326 | return NULL; |
327 | |
328 | /* determine size and verify it is non-zero and didn't overflow */ |
329 | size = TNODE_SIZE(1ul << bits); |
330 | |
331 | if (size <= PAGE_SIZE) |
332 | return kzalloc(size, GFP_KERNEL); |
333 | else |
334 | return vzalloc(size); |
335 | } |
336 | |
337 | static inline void empty_child_inc(struct key_vector *n) |
338 | { |
339 | tn_info(kv: n)->empty_children++; |
340 | |
341 | if (!tn_info(kv: n)->empty_children) |
342 | tn_info(kv: n)->full_children++; |
343 | } |
344 | |
345 | static inline void empty_child_dec(struct key_vector *n) |
346 | { |
347 | if (!tn_info(kv: n)->empty_children) |
348 | tn_info(kv: n)->full_children--; |
349 | |
350 | tn_info(kv: n)->empty_children--; |
351 | } |
352 | |
353 | static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) |
354 | { |
355 | struct key_vector *l; |
356 | struct tnode *kv; |
357 | |
358 | kv = kmem_cache_alloc(cachep: trie_leaf_kmem, GFP_KERNEL); |
359 | if (!kv) |
360 | return NULL; |
361 | |
362 | /* initialize key vector */ |
363 | l = kv->kv; |
364 | l->key = key; |
365 | l->pos = 0; |
366 | l->bits = 0; |
367 | l->slen = fa->fa_slen; |
368 | |
369 | /* link leaf to fib alias */ |
370 | INIT_HLIST_HEAD(&l->leaf); |
371 | hlist_add_head(n: &fa->fa_list, h: &l->leaf); |
372 | |
373 | return l; |
374 | } |
375 | |
376 | static struct key_vector *tnode_new(t_key key, int pos, int bits) |
377 | { |
378 | unsigned int shift = pos + bits; |
379 | struct key_vector *tn; |
380 | struct tnode *tnode; |
381 | |
382 | /* verify bits and pos their msb bits clear and values are valid */ |
383 | BUG_ON(!bits || (shift > KEYLENGTH)); |
384 | |
385 | tnode = tnode_alloc(bits); |
386 | if (!tnode) |
387 | return NULL; |
388 | |
389 | pr_debug("AT %p s=%zu %zu\n" , tnode, TNODE_SIZE(0), |
390 | sizeof(struct key_vector *) << bits); |
391 | |
392 | if (bits == KEYLENGTH) |
393 | tnode->full_children = 1; |
394 | else |
395 | tnode->empty_children = 1ul << bits; |
396 | |
397 | tn = tnode->kv; |
398 | tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; |
399 | tn->pos = pos; |
400 | tn->bits = bits; |
401 | tn->slen = pos; |
402 | |
403 | return tn; |
404 | } |
405 | |
406 | /* Check whether a tnode 'n' is "full", i.e. it is an internal node |
407 | * and no bits are skipped. See discussion in dyntree paper p. 6 |
408 | */ |
409 | static inline int tnode_full(struct key_vector *tn, struct key_vector *n) |
410 | { |
411 | return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n); |
412 | } |
413 | |
414 | /* Add a child at position i overwriting the old value. |
415 | * Update the value of full_children and empty_children. |
416 | */ |
417 | static void put_child(struct key_vector *tn, unsigned long i, |
418 | struct key_vector *n) |
419 | { |
420 | struct key_vector *chi = get_child(tn, i); |
421 | int isfull, wasfull; |
422 | |
423 | BUG_ON(i >= child_length(tn)); |
424 | |
425 | /* update emptyChildren, overflow into fullChildren */ |
426 | if (!n && chi) |
427 | empty_child_inc(n: tn); |
428 | if (n && !chi) |
429 | empty_child_dec(n: tn); |
430 | |
431 | /* update fullChildren */ |
432 | wasfull = tnode_full(tn, n: chi); |
433 | isfull = tnode_full(tn, n); |
434 | |
435 | if (wasfull && !isfull) |
436 | tn_info(kv: tn)->full_children--; |
437 | else if (!wasfull && isfull) |
438 | tn_info(kv: tn)->full_children++; |
439 | |
440 | if (n && (tn->slen < n->slen)) |
441 | tn->slen = n->slen; |
442 | |
443 | rcu_assign_pointer(tn->tnode[i], n); |
444 | } |
445 | |
446 | static void update_children(struct key_vector *tn) |
447 | { |
448 | unsigned long i; |
449 | |
450 | /* update all of the child parent pointers */ |
451 | for (i = child_length(tn); i;) { |
452 | struct key_vector *inode = get_child(tn, --i); |
453 | |
454 | if (!inode) |
455 | continue; |
456 | |
457 | /* Either update the children of a tnode that |
458 | * already belongs to us or update the child |
459 | * to point to ourselves. |
460 | */ |
461 | if (node_parent(inode) == tn) |
462 | update_children(tn: inode); |
463 | else |
464 | node_set_parent(n: inode, tp: tn); |
465 | } |
466 | } |
467 | |
468 | static inline void put_child_root(struct key_vector *tp, t_key key, |
469 | struct key_vector *n) |
470 | { |
471 | if (IS_TRIE(tp)) |
472 | rcu_assign_pointer(tp->tnode[0], n); |
473 | else |
474 | put_child(tn: tp, i: get_index(key, kv: tp), n); |
475 | } |
476 | |
477 | static inline void tnode_free_init(struct key_vector *tn) |
478 | { |
479 | tn_info(kv: tn)->rcu.next = NULL; |
480 | } |
481 | |
482 | static inline void tnode_free_append(struct key_vector *tn, |
483 | struct key_vector *n) |
484 | { |
485 | tn_info(kv: n)->rcu.next = tn_info(kv: tn)->rcu.next; |
486 | tn_info(kv: tn)->rcu.next = &tn_info(kv: n)->rcu; |
487 | } |
488 | |
489 | static void tnode_free(struct key_vector *tn) |
490 | { |
491 | struct callback_head *head = &tn_info(kv: tn)->rcu; |
492 | |
493 | while (head) { |
494 | head = head->next; |
495 | tnode_free_size += TNODE_SIZE(1ul << tn->bits); |
496 | node_free(tn); |
497 | |
498 | tn = container_of(head, struct tnode, rcu)->kv; |
499 | } |
500 | |
501 | if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) { |
502 | tnode_free_size = 0; |
503 | synchronize_rcu(); |
504 | } |
505 | } |
506 | |
507 | static struct key_vector *replace(struct trie *t, |
508 | struct key_vector *oldtnode, |
509 | struct key_vector *tn) |
510 | { |
511 | struct key_vector *tp = node_parent(oldtnode); |
512 | unsigned long i; |
513 | |
514 | /* setup the parent pointer out of and back into this node */ |
515 | NODE_INIT_PARENT(tn, tp); |
516 | put_child_root(tp, key: tn->key, n: tn); |
517 | |
518 | /* update all of the child parent pointers */ |
519 | update_children(tn); |
520 | |
521 | /* all pointers should be clean so we are done */ |
522 | tnode_free(tn: oldtnode); |
523 | |
524 | /* resize children now that oldtnode is freed */ |
525 | for (i = child_length(tn); i;) { |
526 | struct key_vector *inode = get_child(tn, --i); |
527 | |
528 | /* resize child node */ |
529 | if (tnode_full(tn, n: inode)) |
530 | tn = resize(t, tn: inode); |
531 | } |
532 | |
533 | return tp; |
534 | } |
535 | |
536 | static struct key_vector *inflate(struct trie *t, |
537 | struct key_vector *oldtnode) |
538 | { |
539 | struct key_vector *tn; |
540 | unsigned long i; |
541 | t_key m; |
542 | |
543 | pr_debug("In inflate\n" ); |
544 | |
545 | tn = tnode_new(key: oldtnode->key, pos: oldtnode->pos - 1, bits: oldtnode->bits + 1); |
546 | if (!tn) |
547 | goto notnode; |
548 | |
549 | /* prepare oldtnode to be freed */ |
550 | tnode_free_init(tn: oldtnode); |
551 | |
552 | /* Assemble all of the pointers in our cluster, in this case that |
553 | * represents all of the pointers out of our allocated nodes that |
554 | * point to existing tnodes and the links between our allocated |
555 | * nodes. |
556 | */ |
557 | for (i = child_length(tn: oldtnode), m = 1u << tn->pos; i;) { |
558 | struct key_vector *inode = get_child(oldtnode, --i); |
559 | struct key_vector *node0, *node1; |
560 | unsigned long j, k; |
561 | |
562 | /* An empty child */ |
563 | if (!inode) |
564 | continue; |
565 | |
566 | /* A leaf or an internal node with skipped bits */ |
567 | if (!tnode_full(tn: oldtnode, n: inode)) { |
568 | put_child(tn, i: get_index(key: inode->key, kv: tn), n: inode); |
569 | continue; |
570 | } |
571 | |
572 | /* drop the node in the old tnode free list */ |
573 | tnode_free_append(tn: oldtnode, n: inode); |
574 | |
575 | /* An internal node with two children */ |
576 | if (inode->bits == 1) { |
577 | put_child(tn, i: 2 * i + 1, get_child(inode, 1)); |
578 | put_child(tn, i: 2 * i, get_child(inode, 0)); |
579 | continue; |
580 | } |
581 | |
582 | /* We will replace this node 'inode' with two new |
583 | * ones, 'node0' and 'node1', each with half of the |
584 | * original children. The two new nodes will have |
585 | * a position one bit further down the key and this |
586 | * means that the "significant" part of their keys |
587 | * (see the discussion near the top of this file) |
588 | * will differ by one bit, which will be "0" in |
589 | * node0's key and "1" in node1's key. Since we are |
590 | * moving the key position by one step, the bit that |
591 | * we are moving away from - the bit at position |
592 | * (tn->pos) - is the one that will differ between |
593 | * node0 and node1. So... we synthesize that bit in the |
594 | * two new keys. |
595 | */ |
596 | node1 = tnode_new(key: inode->key | m, pos: inode->pos, bits: inode->bits - 1); |
597 | if (!node1) |
598 | goto nomem; |
599 | node0 = tnode_new(key: inode->key, pos: inode->pos, bits: inode->bits - 1); |
600 | |
601 | tnode_free_append(tn, n: node1); |
602 | if (!node0) |
603 | goto nomem; |
604 | tnode_free_append(tn, n: node0); |
605 | |
606 | /* populate child pointers in new nodes */ |
607 | for (k = child_length(tn: inode), j = k / 2; j;) { |
608 | put_child(tn: node1, i: --j, get_child(inode, --k)); |
609 | put_child(tn: node0, i: j, get_child(inode, j)); |
610 | put_child(tn: node1, i: --j, get_child(inode, --k)); |
611 | put_child(tn: node0, i: j, get_child(inode, j)); |
612 | } |
613 | |
614 | /* link new nodes to parent */ |
615 | NODE_INIT_PARENT(node1, tn); |
616 | NODE_INIT_PARENT(node0, tn); |
617 | |
618 | /* link parent to nodes */ |
619 | put_child(tn, i: 2 * i + 1, n: node1); |
620 | put_child(tn, i: 2 * i, n: node0); |
621 | } |
622 | |
623 | /* setup the parent pointers into and out of this node */ |
624 | return replace(t, oldtnode, tn); |
625 | nomem: |
626 | /* all pointers should be clean so we are done */ |
627 | tnode_free(tn); |
628 | notnode: |
629 | return NULL; |
630 | } |
631 | |
632 | static struct key_vector *halve(struct trie *t, |
633 | struct key_vector *oldtnode) |
634 | { |
635 | struct key_vector *tn; |
636 | unsigned long i; |
637 | |
638 | pr_debug("In halve\n" ); |
639 | |
640 | tn = tnode_new(key: oldtnode->key, pos: oldtnode->pos + 1, bits: oldtnode->bits - 1); |
641 | if (!tn) |
642 | goto notnode; |
643 | |
644 | /* prepare oldtnode to be freed */ |
645 | tnode_free_init(tn: oldtnode); |
646 | |
647 | /* Assemble all of the pointers in our cluster, in this case that |
648 | * represents all of the pointers out of our allocated nodes that |
649 | * point to existing tnodes and the links between our allocated |
650 | * nodes. |
651 | */ |
652 | for (i = child_length(tn: oldtnode); i;) { |
653 | struct key_vector *node1 = get_child(oldtnode, --i); |
654 | struct key_vector *node0 = get_child(oldtnode, --i); |
655 | struct key_vector *inode; |
656 | |
657 | /* At least one of the children is empty */ |
658 | if (!node1 || !node0) { |
659 | put_child(tn, i: i / 2, n: node1 ? : node0); |
660 | continue; |
661 | } |
662 | |
663 | /* Two nonempty children */ |
664 | inode = tnode_new(key: node0->key, pos: oldtnode->pos, bits: 1); |
665 | if (!inode) |
666 | goto nomem; |
667 | tnode_free_append(tn, n: inode); |
668 | |
669 | /* initialize pointers out of node */ |
670 | put_child(tn: inode, i: 1, n: node1); |
671 | put_child(tn: inode, i: 0, n: node0); |
672 | NODE_INIT_PARENT(inode, tn); |
673 | |
674 | /* link parent to node */ |
675 | put_child(tn, i: i / 2, n: inode); |
676 | } |
677 | |
678 | /* setup the parent pointers into and out of this node */ |
679 | return replace(t, oldtnode, tn); |
680 | nomem: |
681 | /* all pointers should be clean so we are done */ |
682 | tnode_free(tn); |
683 | notnode: |
684 | return NULL; |
685 | } |
686 | |
687 | static struct key_vector *collapse(struct trie *t, |
688 | struct key_vector *oldtnode) |
689 | { |
690 | struct key_vector *n, *tp; |
691 | unsigned long i; |
692 | |
693 | /* scan the tnode looking for that one child that might still exist */ |
694 | for (n = NULL, i = child_length(tn: oldtnode); !n && i;) |
695 | n = get_child(oldtnode, --i); |
696 | |
697 | /* compress one level */ |
698 | tp = node_parent(oldtnode); |
699 | put_child_root(tp, key: oldtnode->key, n); |
700 | node_set_parent(n, tp); |
701 | |
702 | /* drop dead node */ |
703 | node_free(oldtnode); |
704 | |
705 | return tp; |
706 | } |
707 | |
708 | static unsigned char update_suffix(struct key_vector *tn) |
709 | { |
710 | unsigned char slen = tn->pos; |
711 | unsigned long stride, i; |
712 | unsigned char slen_max; |
713 | |
714 | /* only vector 0 can have a suffix length greater than or equal to |
715 | * tn->pos + tn->bits, the second highest node will have a suffix |
716 | * length at most of tn->pos + tn->bits - 1 |
717 | */ |
718 | slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen); |
719 | |
720 | /* search though the list of children looking for nodes that might |
721 | * have a suffix greater than the one we currently have. This is |
722 | * why we start with a stride of 2 since a stride of 1 would |
723 | * represent the nodes with suffix length equal to tn->pos |
724 | */ |
725 | for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) { |
726 | struct key_vector *n = get_child(tn, i); |
727 | |
728 | if (!n || (n->slen <= slen)) |
729 | continue; |
730 | |
731 | /* update stride and slen based on new value */ |
732 | stride <<= (n->slen - slen); |
733 | slen = n->slen; |
734 | i &= ~(stride - 1); |
735 | |
736 | /* stop searching if we have hit the maximum possible value */ |
737 | if (slen >= slen_max) |
738 | break; |
739 | } |
740 | |
741 | tn->slen = slen; |
742 | |
743 | return slen; |
744 | } |
745 | |
746 | /* From "Implementing a dynamic compressed trie" by Stefan Nilsson of |
747 | * the Helsinki University of Technology and Matti Tikkanen of Nokia |
748 | * Telecommunications, page 6: |
749 | * "A node is doubled if the ratio of non-empty children to all |
750 | * children in the *doubled* node is at least 'high'." |
751 | * |
752 | * 'high' in this instance is the variable 'inflate_threshold'. It |
753 | * is expressed as a percentage, so we multiply it with |
754 | * child_length() and instead of multiplying by 2 (since the |
755 | * child array will be doubled by inflate()) and multiplying |
756 | * the left-hand side by 100 (to handle the percentage thing) we |
757 | * multiply the left-hand side by 50. |
758 | * |
759 | * The left-hand side may look a bit weird: child_length(tn) |
760 | * - tn->empty_children is of course the number of non-null children |
761 | * in the current node. tn->full_children is the number of "full" |
762 | * children, that is non-null tnodes with a skip value of 0. |
763 | * All of those will be doubled in the resulting inflated tnode, so |
764 | * we just count them one extra time here. |
765 | * |
766 | * A clearer way to write this would be: |
767 | * |
768 | * to_be_doubled = tn->full_children; |
769 | * not_to_be_doubled = child_length(tn) - tn->empty_children - |
770 | * tn->full_children; |
771 | * |
772 | * new_child_length = child_length(tn) * 2; |
773 | * |
774 | * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) / |
775 | * new_child_length; |
776 | * if (new_fill_factor >= inflate_threshold) |
777 | * |
778 | * ...and so on, tho it would mess up the while () loop. |
779 | * |
780 | * anyway, |
781 | * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >= |
782 | * inflate_threshold |
783 | * |
784 | * avoid a division: |
785 | * 100 * (not_to_be_doubled + 2*to_be_doubled) >= |
786 | * inflate_threshold * new_child_length |
787 | * |
788 | * expand not_to_be_doubled and to_be_doubled, and shorten: |
789 | * 100 * (child_length(tn) - tn->empty_children + |
790 | * tn->full_children) >= inflate_threshold * new_child_length |
791 | * |
792 | * expand new_child_length: |
793 | * 100 * (child_length(tn) - tn->empty_children + |
794 | * tn->full_children) >= |
795 | * inflate_threshold * child_length(tn) * 2 |
796 | * |
797 | * shorten again: |
798 | * 50 * (tn->full_children + child_length(tn) - |
799 | * tn->empty_children) >= inflate_threshold * |
800 | * child_length(tn) |
801 | * |
802 | */ |
803 | static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn) |
804 | { |
805 | unsigned long used = child_length(tn); |
806 | unsigned long threshold = used; |
807 | |
808 | /* Keep root node larger */ |
809 | threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold; |
810 | used -= tn_info(kv: tn)->empty_children; |
811 | used += tn_info(kv: tn)->full_children; |
812 | |
813 | /* if bits == KEYLENGTH then pos = 0, and will fail below */ |
814 | |
815 | return (used > 1) && tn->pos && ((50 * used) >= threshold); |
816 | } |
817 | |
818 | static inline bool should_halve(struct key_vector *tp, struct key_vector *tn) |
819 | { |
820 | unsigned long used = child_length(tn); |
821 | unsigned long threshold = used; |
822 | |
823 | /* Keep root node larger */ |
824 | threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold; |
825 | used -= tn_info(kv: tn)->empty_children; |
826 | |
827 | /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */ |
828 | |
829 | return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold); |
830 | } |
831 | |
832 | static inline bool should_collapse(struct key_vector *tn) |
833 | { |
834 | unsigned long used = child_length(tn); |
835 | |
836 | used -= tn_info(kv: tn)->empty_children; |
837 | |
838 | /* account for bits == KEYLENGTH case */ |
839 | if ((tn->bits == KEYLENGTH) && tn_info(kv: tn)->full_children) |
840 | used -= KEY_MAX; |
841 | |
842 | /* One child or none, time to drop us from the trie */ |
843 | return used < 2; |
844 | } |
845 | |
846 | #define MAX_WORK 10 |
847 | static struct key_vector *resize(struct trie *t, struct key_vector *tn) |
848 | { |
849 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
850 | struct trie_use_stats __percpu *stats = t->stats; |
851 | #endif |
852 | struct key_vector *tp = node_parent(tn); |
853 | unsigned long cindex = get_index(key: tn->key, kv: tp); |
854 | int max_work = MAX_WORK; |
855 | |
856 | pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n" , |
857 | tn, inflate_threshold, halve_threshold); |
858 | |
859 | /* track the tnode via the pointer from the parent instead of |
860 | * doing it ourselves. This way we can let RCU fully do its |
861 | * thing without us interfering |
862 | */ |
863 | BUG_ON(tn != get_child(tp, cindex)); |
864 | |
865 | /* Double as long as the resulting node has a number of |
866 | * nonempty nodes that are above the threshold. |
867 | */ |
868 | while (should_inflate(tp, tn) && max_work) { |
869 | tp = inflate(t, oldtnode: tn); |
870 | if (!tp) { |
871 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
872 | this_cpu_inc(stats->resize_node_skipped); |
873 | #endif |
874 | break; |
875 | } |
876 | |
877 | max_work--; |
878 | tn = get_child(tp, cindex); |
879 | } |
880 | |
881 | /* update parent in case inflate failed */ |
882 | tp = node_parent(tn); |
883 | |
884 | /* Return if at least one inflate is run */ |
885 | if (max_work != MAX_WORK) |
886 | return tp; |
887 | |
888 | /* Halve as long as the number of empty children in this |
889 | * node is above threshold. |
890 | */ |
891 | while (should_halve(tp, tn) && max_work) { |
892 | tp = halve(t, oldtnode: tn); |
893 | if (!tp) { |
894 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
895 | this_cpu_inc(stats->resize_node_skipped); |
896 | #endif |
897 | break; |
898 | } |
899 | |
900 | max_work--; |
901 | tn = get_child(tp, cindex); |
902 | } |
903 | |
904 | /* Only one child remains */ |
905 | if (should_collapse(tn)) |
906 | return collapse(t, oldtnode: tn); |
907 | |
908 | /* update parent in case halve failed */ |
909 | return node_parent(tn); |
910 | } |
911 | |
912 | static void node_pull_suffix(struct key_vector *tn, unsigned char slen) |
913 | { |
914 | unsigned char node_slen = tn->slen; |
915 | |
916 | while ((node_slen > tn->pos) && (node_slen > slen)) { |
917 | slen = update_suffix(tn); |
918 | if (node_slen == slen) |
919 | break; |
920 | |
921 | tn = node_parent(tn); |
922 | node_slen = tn->slen; |
923 | } |
924 | } |
925 | |
926 | static void node_push_suffix(struct key_vector *tn, unsigned char slen) |
927 | { |
928 | while (tn->slen < slen) { |
929 | tn->slen = slen; |
930 | tn = node_parent(tn); |
931 | } |
932 | } |
933 | |
934 | /* rcu_read_lock needs to be hold by caller from readside */ |
935 | static struct key_vector *fib_find_node(struct trie *t, |
936 | struct key_vector **tp, u32 key) |
937 | { |
938 | struct key_vector *pn, *n = t->kv; |
939 | unsigned long index = 0; |
940 | |
941 | do { |
942 | pn = n; |
943 | n = get_child_rcu(n, index); |
944 | |
945 | if (!n) |
946 | break; |
947 | |
948 | index = get_cindex(key, n); |
949 | |
950 | /* This bit of code is a bit tricky but it combines multiple |
951 | * checks into a single check. The prefix consists of the |
952 | * prefix plus zeros for the bits in the cindex. The index |
953 | * is the difference between the key and this value. From |
954 | * this we can actually derive several pieces of data. |
955 | * if (index >= (1ul << bits)) |
956 | * we have a mismatch in skip bits and failed |
957 | * else |
958 | * we know the value is cindex |
959 | * |
960 | * This check is safe even if bits == KEYLENGTH due to the |
961 | * fact that we can only allocate a node with 32 bits if a |
962 | * long is greater than 32 bits. |
963 | */ |
964 | if (index >= (1ul << n->bits)) { |
965 | n = NULL; |
966 | break; |
967 | } |
968 | |
969 | /* keep searching until we find a perfect match leaf or NULL */ |
970 | } while (IS_TNODE(n)); |
971 | |
972 | *tp = pn; |
973 | |
974 | return n; |
975 | } |
976 | |
977 | /* Return the first fib alias matching DSCP with |
978 | * priority less than or equal to PRIO. |
979 | * If 'find_first' is set, return the first matching |
980 | * fib alias, regardless of DSCP and priority. |
981 | */ |
982 | static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, |
983 | dscp_t dscp, u32 prio, u32 tb_id, |
984 | bool find_first) |
985 | { |
986 | struct fib_alias *fa; |
987 | |
988 | if (!fah) |
989 | return NULL; |
990 | |
991 | hlist_for_each_entry(fa, fah, fa_list) { |
992 | /* Avoid Sparse warning when using dscp_t in inequalities */ |
993 | u8 __fa_dscp = inet_dscp_to_dsfield(fa->fa_dscp); |
994 | u8 __dscp = inet_dscp_to_dsfield(dscp); |
995 | |
996 | if (fa->fa_slen < slen) |
997 | continue; |
998 | if (fa->fa_slen != slen) |
999 | break; |
1000 | if (fa->tb_id > tb_id) |
1001 | continue; |
1002 | if (fa->tb_id != tb_id) |
1003 | break; |
1004 | if (find_first) |
1005 | return fa; |
1006 | if (__fa_dscp > __dscp) |
1007 | continue; |
1008 | if (fa->fa_info->fib_priority >= prio || __fa_dscp < __dscp) |
1009 | return fa; |
1010 | } |
1011 | |
1012 | return NULL; |
1013 | } |
1014 | |
1015 | static struct fib_alias * |
1016 | fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri) |
1017 | { |
1018 | u8 slen = KEYLENGTH - fri->dst_len; |
1019 | struct key_vector *l, *tp; |
1020 | struct fib_table *tb; |
1021 | struct fib_alias *fa; |
1022 | struct trie *t; |
1023 | |
1024 | tb = fib_get_table(net, id: fri->tb_id); |
1025 | if (!tb) |
1026 | return NULL; |
1027 | |
1028 | t = (struct trie *)tb->tb_data; |
1029 | l = fib_find_node(t, &tp, be32_to_cpu(fri->dst)); |
1030 | if (!l) |
1031 | return NULL; |
1032 | |
1033 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { |
1034 | if (fa->fa_slen == slen && fa->tb_id == fri->tb_id && |
1035 | fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi && |
1036 | fa->fa_type == fri->type) |
1037 | return fa; |
1038 | } |
1039 | |
1040 | return NULL; |
1041 | } |
1042 | |
1043 | void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri) |
1044 | { |
1045 | u8 fib_notify_on_flag_change; |
1046 | struct fib_alias *fa_match; |
1047 | struct sk_buff *skb; |
1048 | int err; |
1049 | |
1050 | rcu_read_lock(); |
1051 | |
1052 | fa_match = fib_find_matching_alias(net, fri); |
1053 | if (!fa_match) |
1054 | goto out; |
1055 | |
1056 | /* These are paired with the WRITE_ONCE() happening in this function. |
1057 | * The reason is that we are only protected by RCU at this point. |
1058 | */ |
1059 | if (READ_ONCE(fa_match->offload) == fri->offload && |
1060 | READ_ONCE(fa_match->trap) == fri->trap && |
1061 | READ_ONCE(fa_match->offload_failed) == fri->offload_failed) |
1062 | goto out; |
1063 | |
1064 | WRITE_ONCE(fa_match->offload, fri->offload); |
1065 | WRITE_ONCE(fa_match->trap, fri->trap); |
1066 | |
1067 | fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change); |
1068 | |
1069 | /* 2 means send notifications only if offload_failed was changed. */ |
1070 | if (fib_notify_on_flag_change == 2 && |
1071 | READ_ONCE(fa_match->offload_failed) == fri->offload_failed) |
1072 | goto out; |
1073 | |
1074 | WRITE_ONCE(fa_match->offload_failed, fri->offload_failed); |
1075 | |
1076 | if (!fib_notify_on_flag_change) |
1077 | goto out; |
1078 | |
1079 | skb = nlmsg_new(payload: fib_nlmsg_size(fi: fa_match->fa_info), GFP_ATOMIC); |
1080 | if (!skb) { |
1081 | err = -ENOBUFS; |
1082 | goto errout; |
1083 | } |
1084 | |
1085 | err = fib_dump_info(skb, pid: 0, seq: 0, RTM_NEWROUTE, fri, flags: 0); |
1086 | if (err < 0) { |
1087 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ |
1088 | WARN_ON(err == -EMSGSIZE); |
1089 | kfree_skb(skb); |
1090 | goto errout; |
1091 | } |
1092 | |
1093 | rtnl_notify(skb, net, pid: 0, RTNLGRP_IPV4_ROUTE, NULL, GFP_ATOMIC); |
1094 | goto out; |
1095 | |
1096 | errout: |
1097 | rtnl_set_sk_err(net, RTNLGRP_IPV4_ROUTE, error: err); |
1098 | out: |
1099 | rcu_read_unlock(); |
1100 | } |
1101 | EXPORT_SYMBOL_GPL(fib_alias_hw_flags_set); |
1102 | |
1103 | static void trie_rebalance(struct trie *t, struct key_vector *tn) |
1104 | { |
1105 | while (!IS_TRIE(tn)) |
1106 | tn = resize(t, tn); |
1107 | } |
1108 | |
1109 | static int fib_insert_node(struct trie *t, struct key_vector *tp, |
1110 | struct fib_alias *new, t_key key) |
1111 | { |
1112 | struct key_vector *n, *l; |
1113 | |
1114 | l = leaf_new(key, fa: new); |
1115 | if (!l) |
1116 | goto noleaf; |
1117 | |
1118 | /* retrieve child from parent node */ |
1119 | n = get_child(tp, get_index(key, tp)); |
1120 | |
1121 | /* Case 2: n is a LEAF or a TNODE and the key doesn't match. |
1122 | * |
1123 | * Add a new tnode here |
1124 | * first tnode need some special handling |
1125 | * leaves us in position for handling as case 3 |
1126 | */ |
1127 | if (n) { |
1128 | struct key_vector *tn; |
1129 | |
1130 | tn = tnode_new(key, pos: __fls(word: key ^ n->key), bits: 1); |
1131 | if (!tn) |
1132 | goto notnode; |
1133 | |
1134 | /* initialize routes out of node */ |
1135 | NODE_INIT_PARENT(tn, tp); |
1136 | put_child(tn, i: get_index(key, kv: tn) ^ 1, n); |
1137 | |
1138 | /* start adding routes into the node */ |
1139 | put_child_root(tp, key, n: tn); |
1140 | node_set_parent(n, tp: tn); |
1141 | |
1142 | /* parent now has a NULL spot where the leaf can go */ |
1143 | tp = tn; |
1144 | } |
1145 | |
1146 | /* Case 3: n is NULL, and will just insert a new leaf */ |
1147 | node_push_suffix(tn: tp, slen: new->fa_slen); |
1148 | NODE_INIT_PARENT(l, tp); |
1149 | put_child_root(tp, key, n: l); |
1150 | trie_rebalance(t, tn: tp); |
1151 | |
1152 | return 0; |
1153 | notnode: |
1154 | node_free(l); |
1155 | noleaf: |
1156 | return -ENOMEM; |
1157 | } |
1158 | |
1159 | static int fib_insert_alias(struct trie *t, struct key_vector *tp, |
1160 | struct key_vector *l, struct fib_alias *new, |
1161 | struct fib_alias *fa, t_key key) |
1162 | { |
1163 | if (!l) |
1164 | return fib_insert_node(t, tp, new, key); |
1165 | |
1166 | if (fa) { |
1167 | hlist_add_before_rcu(n: &new->fa_list, next: &fa->fa_list); |
1168 | } else { |
1169 | struct fib_alias *last; |
1170 | |
1171 | hlist_for_each_entry(last, &l->leaf, fa_list) { |
1172 | if (new->fa_slen < last->fa_slen) |
1173 | break; |
1174 | if ((new->fa_slen == last->fa_slen) && |
1175 | (new->tb_id > last->tb_id)) |
1176 | break; |
1177 | fa = last; |
1178 | } |
1179 | |
1180 | if (fa) |
1181 | hlist_add_behind_rcu(n: &new->fa_list, prev: &fa->fa_list); |
1182 | else |
1183 | hlist_add_head_rcu(n: &new->fa_list, h: &l->leaf); |
1184 | } |
1185 | |
1186 | /* if we added to the tail node then we need to update slen */ |
1187 | if (l->slen < new->fa_slen) { |
1188 | l->slen = new->fa_slen; |
1189 | node_push_suffix(tn: tp, slen: new->fa_slen); |
1190 | } |
1191 | |
1192 | return 0; |
1193 | } |
1194 | |
1195 | static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack) |
1196 | { |
1197 | if (plen > KEYLENGTH) { |
1198 | NL_SET_ERR_MSG(extack, "Invalid prefix length" ); |
1199 | return false; |
1200 | } |
1201 | |
1202 | if ((plen < KEYLENGTH) && (key << plen)) { |
1203 | NL_SET_ERR_MSG(extack, |
1204 | "Invalid prefix for given prefix length" ); |
1205 | return false; |
1206 | } |
1207 | |
1208 | return true; |
1209 | } |
1210 | |
1211 | static void fib_remove_alias(struct trie *t, struct key_vector *tp, |
1212 | struct key_vector *l, struct fib_alias *old); |
1213 | |
1214 | /* Caller must hold RTNL. */ |
1215 | int fib_table_insert(struct net *net, struct fib_table *tb, |
1216 | struct fib_config *cfg, struct netlink_ext_ack *extack) |
1217 | { |
1218 | struct trie *t = (struct trie *)tb->tb_data; |
1219 | struct fib_alias *fa, *new_fa; |
1220 | struct key_vector *l, *tp; |
1221 | u16 nlflags = NLM_F_EXCL; |
1222 | struct fib_info *fi; |
1223 | u8 plen = cfg->fc_dst_len; |
1224 | u8 slen = KEYLENGTH - plen; |
1225 | dscp_t dscp; |
1226 | u32 key; |
1227 | int err; |
1228 | |
1229 | key = ntohl(cfg->fc_dst); |
1230 | |
1231 | if (!fib_valid_key_len(key, plen, extack)) |
1232 | return -EINVAL; |
1233 | |
1234 | pr_debug("Insert table=%u %08x/%d\n" , tb->tb_id, key, plen); |
1235 | |
1236 | fi = fib_create_info(cfg, extack); |
1237 | if (IS_ERR(ptr: fi)) { |
1238 | err = PTR_ERR(ptr: fi); |
1239 | goto err; |
1240 | } |
1241 | |
1242 | dscp = cfg->fc_dscp; |
1243 | l = fib_find_node(t, tp: &tp, key); |
1244 | fa = l ? fib_find_alias(&l->leaf, slen, dscp, fi->fib_priority, |
1245 | tb->tb_id, false) : NULL; |
1246 | |
1247 | /* Now fa, if non-NULL, points to the first fib alias |
1248 | * with the same keys [prefix,dscp,priority], if such key already |
1249 | * exists or to the node before which we will insert new one. |
1250 | * |
1251 | * If fa is NULL, we will need to allocate a new one and |
1252 | * insert to the tail of the section matching the suffix length |
1253 | * of the new alias. |
1254 | */ |
1255 | |
1256 | if (fa && fa->fa_dscp == dscp && |
1257 | fa->fa_info->fib_priority == fi->fib_priority) { |
1258 | struct fib_alias *fa_first, *fa_match; |
1259 | |
1260 | err = -EEXIST; |
1261 | if (cfg->fc_nlflags & NLM_F_EXCL) |
1262 | goto out; |
1263 | |
1264 | nlflags &= ~NLM_F_EXCL; |
1265 | |
1266 | /* We have 2 goals: |
1267 | * 1. Find exact match for type, scope, fib_info to avoid |
1268 | * duplicate routes |
1269 | * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it |
1270 | */ |
1271 | fa_match = NULL; |
1272 | fa_first = fa; |
1273 | hlist_for_each_entry_from(fa, fa_list) { |
1274 | if ((fa->fa_slen != slen) || |
1275 | (fa->tb_id != tb->tb_id) || |
1276 | (fa->fa_dscp != dscp)) |
1277 | break; |
1278 | if (fa->fa_info->fib_priority != fi->fib_priority) |
1279 | break; |
1280 | if (fa->fa_type == cfg->fc_type && |
1281 | fa->fa_info == fi) { |
1282 | fa_match = fa; |
1283 | break; |
1284 | } |
1285 | } |
1286 | |
1287 | if (cfg->fc_nlflags & NLM_F_REPLACE) { |
1288 | struct fib_info *fi_drop; |
1289 | u8 state; |
1290 | |
1291 | nlflags |= NLM_F_REPLACE; |
1292 | fa = fa_first; |
1293 | if (fa_match) { |
1294 | if (fa == fa_match) |
1295 | err = 0; |
1296 | goto out; |
1297 | } |
1298 | err = -ENOBUFS; |
1299 | new_fa = kmem_cache_alloc(cachep: fn_alias_kmem, GFP_KERNEL); |
1300 | if (!new_fa) |
1301 | goto out; |
1302 | |
1303 | fi_drop = fa->fa_info; |
1304 | new_fa->fa_dscp = fa->fa_dscp; |
1305 | new_fa->fa_info = fi; |
1306 | new_fa->fa_type = cfg->fc_type; |
1307 | state = fa->fa_state; |
1308 | new_fa->fa_state = state & ~FA_S_ACCESSED; |
1309 | new_fa->fa_slen = fa->fa_slen; |
1310 | new_fa->tb_id = tb->tb_id; |
1311 | new_fa->fa_default = -1; |
1312 | new_fa->offload = 0; |
1313 | new_fa->trap = 0; |
1314 | new_fa->offload_failed = 0; |
1315 | |
1316 | hlist_replace_rcu(old: &fa->fa_list, new: &new_fa->fa_list); |
1317 | |
1318 | if (fib_find_alias(&l->leaf, fa->fa_slen, 0, 0, |
1319 | tb->tb_id, true) == new_fa) { |
1320 | enum fib_event_type fib_event; |
1321 | |
1322 | fib_event = FIB_EVENT_ENTRY_REPLACE; |
1323 | err = call_fib_entry_notifiers(net, fib_event, |
1324 | key, plen, |
1325 | new_fa, extack); |
1326 | if (err) { |
1327 | hlist_replace_rcu(old: &new_fa->fa_list, |
1328 | new: &fa->fa_list); |
1329 | goto out_free_new_fa; |
1330 | } |
1331 | } |
1332 | |
1333 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, |
1334 | tb->tb_id, &cfg->fc_nlinfo, nlflags); |
1335 | |
1336 | alias_free_mem_rcu(fa); |
1337 | |
1338 | fib_release_info(fi_drop); |
1339 | if (state & FA_S_ACCESSED) |
1340 | rt_cache_flush(net: cfg->fc_nlinfo.nl_net); |
1341 | |
1342 | goto succeeded; |
1343 | } |
1344 | /* Error if we find a perfect match which |
1345 | * uses the same scope, type, and nexthop |
1346 | * information. |
1347 | */ |
1348 | if (fa_match) |
1349 | goto out; |
1350 | |
1351 | if (cfg->fc_nlflags & NLM_F_APPEND) |
1352 | nlflags |= NLM_F_APPEND; |
1353 | else |
1354 | fa = fa_first; |
1355 | } |
1356 | err = -ENOENT; |
1357 | if (!(cfg->fc_nlflags & NLM_F_CREATE)) |
1358 | goto out; |
1359 | |
1360 | nlflags |= NLM_F_CREATE; |
1361 | err = -ENOBUFS; |
1362 | new_fa = kmem_cache_alloc(cachep: fn_alias_kmem, GFP_KERNEL); |
1363 | if (!new_fa) |
1364 | goto out; |
1365 | |
1366 | new_fa->fa_info = fi; |
1367 | new_fa->fa_dscp = dscp; |
1368 | new_fa->fa_type = cfg->fc_type; |
1369 | new_fa->fa_state = 0; |
1370 | new_fa->fa_slen = slen; |
1371 | new_fa->tb_id = tb->tb_id; |
1372 | new_fa->fa_default = -1; |
1373 | new_fa->offload = 0; |
1374 | new_fa->trap = 0; |
1375 | new_fa->offload_failed = 0; |
1376 | |
1377 | /* Insert new entry to the list. */ |
1378 | err = fib_insert_alias(t, tp, l, new: new_fa, fa, key); |
1379 | if (err) |
1380 | goto out_free_new_fa; |
1381 | |
1382 | /* The alias was already inserted, so the node must exist. */ |
1383 | l = l ? l : fib_find_node(t, tp: &tp, key); |
1384 | if (WARN_ON_ONCE(!l)) { |
1385 | err = -ENOENT; |
1386 | goto out_free_new_fa; |
1387 | } |
1388 | |
1389 | if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == |
1390 | new_fa) { |
1391 | enum fib_event_type fib_event; |
1392 | |
1393 | fib_event = FIB_EVENT_ENTRY_REPLACE; |
1394 | err = call_fib_entry_notifiers(net, fib_event, key, plen, |
1395 | new_fa, extack); |
1396 | if (err) |
1397 | goto out_remove_new_fa; |
1398 | } |
1399 | |
1400 | if (!plen) |
1401 | tb->tb_num_default++; |
1402 | |
1403 | rt_cache_flush(net: cfg->fc_nlinfo.nl_net); |
1404 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id, |
1405 | &cfg->fc_nlinfo, nlflags); |
1406 | succeeded: |
1407 | return 0; |
1408 | |
1409 | out_remove_new_fa: |
1410 | fib_remove_alias(t, tp, l, old: new_fa); |
1411 | out_free_new_fa: |
1412 | kmem_cache_free(s: fn_alias_kmem, objp: new_fa); |
1413 | out: |
1414 | fib_release_info(fi); |
1415 | err: |
1416 | return err; |
1417 | } |
1418 | |
1419 | static inline t_key prefix_mismatch(t_key key, struct key_vector *n) |
1420 | { |
1421 | t_key prefix = n->key; |
1422 | |
1423 | return (key ^ prefix) & (prefix | -prefix); |
1424 | } |
1425 | |
1426 | bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, |
1427 | const struct flowi4 *flp) |
1428 | { |
1429 | if (nhc->nhc_flags & RTNH_F_DEAD) |
1430 | return false; |
1431 | |
1432 | if (ip_ignore_linkdown(dev: nhc->nhc_dev) && |
1433 | nhc->nhc_flags & RTNH_F_LINKDOWN && |
1434 | !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) |
1435 | return false; |
1436 | |
1437 | if (flp->flowi4_oif && flp->flowi4_oif != nhc->nhc_oif) |
1438 | return false; |
1439 | |
1440 | return true; |
1441 | } |
1442 | |
1443 | /* should be called with rcu_read_lock */ |
1444 | int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, |
1445 | struct fib_result *res, int fib_flags) |
1446 | { |
1447 | struct trie *t = (struct trie *) tb->tb_data; |
1448 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1449 | struct trie_use_stats __percpu *stats = t->stats; |
1450 | #endif |
1451 | const t_key key = ntohl(flp->daddr); |
1452 | struct key_vector *n, *pn; |
1453 | struct fib_alias *fa; |
1454 | unsigned long index; |
1455 | t_key cindex; |
1456 | |
1457 | pn = t->kv; |
1458 | cindex = 0; |
1459 | |
1460 | n = get_child_rcu(pn, cindex); |
1461 | if (!n) { |
1462 | trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN); |
1463 | return -EAGAIN; |
1464 | } |
1465 | |
1466 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1467 | this_cpu_inc(stats->gets); |
1468 | #endif |
1469 | |
1470 | /* Step 1: Travel to the longest prefix match in the trie */ |
1471 | for (;;) { |
1472 | index = get_cindex(key, n); |
1473 | |
1474 | /* This bit of code is a bit tricky but it combines multiple |
1475 | * checks into a single check. The prefix consists of the |
1476 | * prefix plus zeros for the "bits" in the prefix. The index |
1477 | * is the difference between the key and this value. From |
1478 | * this we can actually derive several pieces of data. |
1479 | * if (index >= (1ul << bits)) |
1480 | * we have a mismatch in skip bits and failed |
1481 | * else |
1482 | * we know the value is cindex |
1483 | * |
1484 | * This check is safe even if bits == KEYLENGTH due to the |
1485 | * fact that we can only allocate a node with 32 bits if a |
1486 | * long is greater than 32 bits. |
1487 | */ |
1488 | if (index >= (1ul << n->bits)) |
1489 | break; |
1490 | |
1491 | /* we have found a leaf. Prefixes have already been compared */ |
1492 | if (IS_LEAF(n)) |
1493 | goto found; |
1494 | |
1495 | /* only record pn and cindex if we are going to be chopping |
1496 | * bits later. Otherwise we are just wasting cycles. |
1497 | */ |
1498 | if (n->slen > n->pos) { |
1499 | pn = n; |
1500 | cindex = index; |
1501 | } |
1502 | |
1503 | n = get_child_rcu(n, index); |
1504 | if (unlikely(!n)) |
1505 | goto backtrace; |
1506 | } |
1507 | |
1508 | /* Step 2: Sort out leaves and begin backtracing for longest prefix */ |
1509 | for (;;) { |
1510 | /* record the pointer where our next node pointer is stored */ |
1511 | struct key_vector __rcu **cptr = n->tnode; |
1512 | |
1513 | /* This test verifies that none of the bits that differ |
1514 | * between the key and the prefix exist in the region of |
1515 | * the lsb and higher in the prefix. |
1516 | */ |
1517 | if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos)) |
1518 | goto backtrace; |
1519 | |
1520 | /* exit out and process leaf */ |
1521 | if (unlikely(IS_LEAF(n))) |
1522 | break; |
1523 | |
1524 | /* Don't bother recording parent info. Since we are in |
1525 | * prefix match mode we will have to come back to wherever |
1526 | * we started this traversal anyway |
1527 | */ |
1528 | |
1529 | while ((n = rcu_dereference(*cptr)) == NULL) { |
1530 | backtrace: |
1531 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1532 | if (!n) |
1533 | this_cpu_inc(stats->null_node_hit); |
1534 | #endif |
1535 | /* If we are at cindex 0 there are no more bits for |
1536 | * us to strip at this level so we must ascend back |
1537 | * up one level to see if there are any more bits to |
1538 | * be stripped there. |
1539 | */ |
1540 | while (!cindex) { |
1541 | t_key pkey = pn->key; |
1542 | |
1543 | /* If we don't have a parent then there is |
1544 | * nothing for us to do as we do not have any |
1545 | * further nodes to parse. |
1546 | */ |
1547 | if (IS_TRIE(pn)) { |
1548 | trace_fib_table_lookup(tb->tb_id, flp, |
1549 | NULL, -EAGAIN); |
1550 | return -EAGAIN; |
1551 | } |
1552 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1553 | this_cpu_inc(stats->backtrack); |
1554 | #endif |
1555 | /* Get Child's index */ |
1556 | pn = node_parent_rcu(pn); |
1557 | cindex = get_index(key: pkey, kv: pn); |
1558 | } |
1559 | |
1560 | /* strip the least significant bit from the cindex */ |
1561 | cindex &= cindex - 1; |
1562 | |
1563 | /* grab pointer for next child node */ |
1564 | cptr = &pn->tnode[cindex]; |
1565 | } |
1566 | } |
1567 | |
1568 | found: |
1569 | /* this line carries forward the xor from earlier in the function */ |
1570 | index = key ^ n->key; |
1571 | |
1572 | /* Step 3: Process the leaf, if that fails fall back to backtracing */ |
1573 | hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { |
1574 | struct fib_info *fi = fa->fa_info; |
1575 | struct fib_nh_common *nhc; |
1576 | int nhsel, err; |
1577 | |
1578 | if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { |
1579 | if (index >= (1ul << fa->fa_slen)) |
1580 | continue; |
1581 | } |
1582 | if (fa->fa_dscp && |
1583 | inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos) |
1584 | continue; |
1585 | if (fi->fib_dead) |
1586 | continue; |
1587 | if (fa->fa_info->fib_scope < flp->flowi4_scope) |
1588 | continue; |
1589 | fib_alias_accessed(fa); |
1590 | err = fib_props[fa->fa_type].error; |
1591 | if (unlikely(err < 0)) { |
1592 | out_reject: |
1593 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1594 | this_cpu_inc(stats->semantic_match_passed); |
1595 | #endif |
1596 | trace_fib_table_lookup(tb_id: tb->tb_id, flp, NULL, err); |
1597 | return err; |
1598 | } |
1599 | if (fi->fib_flags & RTNH_F_DEAD) |
1600 | continue; |
1601 | |
1602 | if (unlikely(fi->nh)) { |
1603 | if (nexthop_is_blackhole(nh: fi->nh)) { |
1604 | err = fib_props[RTN_BLACKHOLE].error; |
1605 | goto out_reject; |
1606 | } |
1607 | |
1608 | nhc = nexthop_get_nhc_lookup(nh: fi->nh, fib_flags, flp, |
1609 | nhsel: &nhsel); |
1610 | if (nhc) |
1611 | goto set_result; |
1612 | goto miss; |
1613 | } |
1614 | |
1615 | for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { |
1616 | nhc = fib_info_nhc(fi, nhsel); |
1617 | |
1618 | if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) |
1619 | continue; |
1620 | set_result: |
1621 | if (!(fib_flags & FIB_LOOKUP_NOREF)) |
1622 | refcount_inc(r: &fi->fib_clntref); |
1623 | |
1624 | res->prefix = htonl(n->key); |
1625 | res->prefixlen = KEYLENGTH - fa->fa_slen; |
1626 | res->nh_sel = nhsel; |
1627 | res->nhc = nhc; |
1628 | res->type = fa->fa_type; |
1629 | res->scope = fi->fib_scope; |
1630 | res->fi = fi; |
1631 | res->table = tb; |
1632 | res->fa_head = &n->leaf; |
1633 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1634 | this_cpu_inc(stats->semantic_match_passed); |
1635 | #endif |
1636 | trace_fib_table_lookup(tb_id: tb->tb_id, flp, nhc, err); |
1637 | |
1638 | return err; |
1639 | } |
1640 | } |
1641 | miss: |
1642 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1643 | this_cpu_inc(stats->semantic_match_miss); |
1644 | #endif |
1645 | goto backtrace; |
1646 | } |
1647 | EXPORT_SYMBOL_GPL(fib_table_lookup); |
1648 | |
1649 | static void fib_remove_alias(struct trie *t, struct key_vector *tp, |
1650 | struct key_vector *l, struct fib_alias *old) |
1651 | { |
1652 | /* record the location of the previous list_info entry */ |
1653 | struct hlist_node **pprev = old->fa_list.pprev; |
1654 | struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next); |
1655 | |
1656 | /* remove the fib_alias from the list */ |
1657 | hlist_del_rcu(n: &old->fa_list); |
1658 | |
1659 | /* if we emptied the list this leaf will be freed and we can sort |
1660 | * out parent suffix lengths as a part of trie_rebalance |
1661 | */ |
1662 | if (hlist_empty(h: &l->leaf)) { |
1663 | if (tp->slen == l->slen) |
1664 | node_pull_suffix(tn: tp, slen: tp->pos); |
1665 | put_child_root(tp, key: l->key, NULL); |
1666 | node_free(l); |
1667 | trie_rebalance(t, tn: tp); |
1668 | return; |
1669 | } |
1670 | |
1671 | /* only access fa if it is pointing at the last valid hlist_node */ |
1672 | if (*pprev) |
1673 | return; |
1674 | |
1675 | /* update the trie with the latest suffix length */ |
1676 | l->slen = fa->fa_slen; |
1677 | node_pull_suffix(tn: tp, slen: fa->fa_slen); |
1678 | } |
1679 | |
1680 | static void fib_notify_alias_delete(struct net *net, u32 key, |
1681 | struct hlist_head *fah, |
1682 | struct fib_alias *fa_to_delete, |
1683 | struct netlink_ext_ack *extack) |
1684 | { |
1685 | struct fib_alias *fa_next, *fa_to_notify; |
1686 | u32 tb_id = fa_to_delete->tb_id; |
1687 | u8 slen = fa_to_delete->fa_slen; |
1688 | enum fib_event_type fib_event; |
1689 | |
1690 | /* Do not notify if we do not care about the route. */ |
1691 | if (fib_find_alias(fah, slen, 0, 0, tb_id, true) != fa_to_delete) |
1692 | return; |
1693 | |
1694 | /* Determine if the route should be replaced by the next route in the |
1695 | * list. |
1696 | */ |
1697 | fa_next = hlist_entry_safe(fa_to_delete->fa_list.next, |
1698 | struct fib_alias, fa_list); |
1699 | if (fa_next && fa_next->fa_slen == slen && fa_next->tb_id == tb_id) { |
1700 | fib_event = FIB_EVENT_ENTRY_REPLACE; |
1701 | fa_to_notify = fa_next; |
1702 | } else { |
1703 | fib_event = FIB_EVENT_ENTRY_DEL; |
1704 | fa_to_notify = fa_to_delete; |
1705 | } |
1706 | call_fib_entry_notifiers(net, fib_event, key, KEYLENGTH - slen, |
1707 | fa_to_notify, extack); |
1708 | } |
1709 | |
1710 | /* Caller must hold RTNL. */ |
1711 | int fib_table_delete(struct net *net, struct fib_table *tb, |
1712 | struct fib_config *cfg, struct netlink_ext_ack *extack) |
1713 | { |
1714 | struct trie *t = (struct trie *) tb->tb_data; |
1715 | struct fib_alias *fa, *fa_to_delete; |
1716 | struct key_vector *l, *tp; |
1717 | u8 plen = cfg->fc_dst_len; |
1718 | u8 slen = KEYLENGTH - plen; |
1719 | dscp_t dscp; |
1720 | u32 key; |
1721 | |
1722 | key = ntohl(cfg->fc_dst); |
1723 | |
1724 | if (!fib_valid_key_len(key, plen, extack)) |
1725 | return -EINVAL; |
1726 | |
1727 | l = fib_find_node(t, tp: &tp, key); |
1728 | if (!l) |
1729 | return -ESRCH; |
1730 | |
1731 | dscp = cfg->fc_dscp; |
1732 | fa = fib_find_alias(&l->leaf, slen, dscp, 0, tb->tb_id, false); |
1733 | if (!fa) |
1734 | return -ESRCH; |
1735 | |
1736 | pr_debug("Deleting %08x/%d dsfield=0x%02x t=%p\n" , key, plen, |
1737 | inet_dscp_to_dsfield(dscp), t); |
1738 | |
1739 | fa_to_delete = NULL; |
1740 | hlist_for_each_entry_from(fa, fa_list) { |
1741 | struct fib_info *fi = fa->fa_info; |
1742 | |
1743 | if ((fa->fa_slen != slen) || |
1744 | (fa->tb_id != tb->tb_id) || |
1745 | (fa->fa_dscp != dscp)) |
1746 | break; |
1747 | |
1748 | if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && |
1749 | (cfg->fc_scope == RT_SCOPE_NOWHERE || |
1750 | fa->fa_info->fib_scope == cfg->fc_scope) && |
1751 | (!cfg->fc_prefsrc || |
1752 | fi->fib_prefsrc == cfg->fc_prefsrc) && |
1753 | (!cfg->fc_protocol || |
1754 | fi->fib_protocol == cfg->fc_protocol) && |
1755 | fib_nh_match(net, cfg, fi, extack) == 0 && |
1756 | fib_metrics_match(cfg, fi)) { |
1757 | fa_to_delete = fa; |
1758 | break; |
1759 | } |
1760 | } |
1761 | |
1762 | if (!fa_to_delete) |
1763 | return -ESRCH; |
1764 | |
1765 | fib_notify_alias_delete(net, key, fah: &l->leaf, fa_to_delete, extack); |
1766 | rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, |
1767 | &cfg->fc_nlinfo, 0); |
1768 | |
1769 | if (!plen) |
1770 | tb->tb_num_default--; |
1771 | |
1772 | fib_remove_alias(t, tp, l, old: fa_to_delete); |
1773 | |
1774 | if (fa_to_delete->fa_state & FA_S_ACCESSED) |
1775 | rt_cache_flush(net: cfg->fc_nlinfo.nl_net); |
1776 | |
1777 | fib_release_info(fa_to_delete->fa_info); |
1778 | alias_free_mem_rcu(fa: fa_to_delete); |
1779 | return 0; |
1780 | } |
1781 | |
1782 | /* Scan for the next leaf starting at the provided key value */ |
1783 | static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key) |
1784 | { |
1785 | struct key_vector *pn, *n = *tn; |
1786 | unsigned long cindex; |
1787 | |
1788 | /* this loop is meant to try and find the key in the trie */ |
1789 | do { |
1790 | /* record parent and next child index */ |
1791 | pn = n; |
1792 | cindex = (key > pn->key) ? get_index(key, kv: pn) : 0; |
1793 | |
1794 | if (cindex >> pn->bits) |
1795 | break; |
1796 | |
1797 | /* descend into the next child */ |
1798 | n = get_child_rcu(pn, cindex++); |
1799 | if (!n) |
1800 | break; |
1801 | |
1802 | /* guarantee forward progress on the keys */ |
1803 | if (IS_LEAF(n) && (n->key >= key)) |
1804 | goto found; |
1805 | } while (IS_TNODE(n)); |
1806 | |
1807 | /* this loop will search for the next leaf with a greater key */ |
1808 | while (!IS_TRIE(pn)) { |
1809 | /* if we exhausted the parent node we will need to climb */ |
1810 | if (cindex >= (1ul << pn->bits)) { |
1811 | t_key pkey = pn->key; |
1812 | |
1813 | pn = node_parent_rcu(pn); |
1814 | cindex = get_index(key: pkey, kv: pn) + 1; |
1815 | continue; |
1816 | } |
1817 | |
1818 | /* grab the next available node */ |
1819 | n = get_child_rcu(pn, cindex++); |
1820 | if (!n) |
1821 | continue; |
1822 | |
1823 | /* no need to compare keys since we bumped the index */ |
1824 | if (IS_LEAF(n)) |
1825 | goto found; |
1826 | |
1827 | /* Rescan start scanning in new node */ |
1828 | pn = n; |
1829 | cindex = 0; |
1830 | } |
1831 | |
1832 | *tn = pn; |
1833 | return NULL; /* Root of trie */ |
1834 | found: |
1835 | /* if we are at the limit for keys just return NULL for the tnode */ |
1836 | *tn = pn; |
1837 | return n; |
1838 | } |
1839 | |
1840 | static void fib_trie_free(struct fib_table *tb) |
1841 | { |
1842 | struct trie *t = (struct trie *)tb->tb_data; |
1843 | struct key_vector *pn = t->kv; |
1844 | unsigned long cindex = 1; |
1845 | struct hlist_node *tmp; |
1846 | struct fib_alias *fa; |
1847 | |
1848 | /* walk trie in reverse order and free everything */ |
1849 | for (;;) { |
1850 | struct key_vector *n; |
1851 | |
1852 | if (!(cindex--)) { |
1853 | t_key pkey = pn->key; |
1854 | |
1855 | if (IS_TRIE(pn)) |
1856 | break; |
1857 | |
1858 | n = pn; |
1859 | pn = node_parent(pn); |
1860 | |
1861 | /* drop emptied tnode */ |
1862 | put_child_root(tp: pn, key: n->key, NULL); |
1863 | node_free(n); |
1864 | |
1865 | cindex = get_index(key: pkey, kv: pn); |
1866 | |
1867 | continue; |
1868 | } |
1869 | |
1870 | /* grab the next available node */ |
1871 | n = get_child(pn, cindex); |
1872 | if (!n) |
1873 | continue; |
1874 | |
1875 | if (IS_TNODE(n)) { |
1876 | /* record pn and cindex for leaf walking */ |
1877 | pn = n; |
1878 | cindex = 1ul << n->bits; |
1879 | |
1880 | continue; |
1881 | } |
1882 | |
1883 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { |
1884 | hlist_del_rcu(n: &fa->fa_list); |
1885 | alias_free_mem_rcu(fa); |
1886 | } |
1887 | |
1888 | put_child_root(tp: pn, key: n->key, NULL); |
1889 | node_free(n); |
1890 | } |
1891 | |
1892 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1893 | free_percpu(t->stats); |
1894 | #endif |
1895 | kfree(objp: tb); |
1896 | } |
1897 | |
1898 | struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) |
1899 | { |
1900 | struct trie *ot = (struct trie *)oldtb->tb_data; |
1901 | struct key_vector *l, *tp = ot->kv; |
1902 | struct fib_table *local_tb; |
1903 | struct fib_alias *fa; |
1904 | struct trie *lt; |
1905 | t_key key = 0; |
1906 | |
1907 | if (oldtb->tb_data == oldtb->__data) |
1908 | return oldtb; |
1909 | |
1910 | local_tb = fib_trie_table(id: RT_TABLE_LOCAL, NULL); |
1911 | if (!local_tb) |
1912 | return NULL; |
1913 | |
1914 | lt = (struct trie *)local_tb->tb_data; |
1915 | |
1916 | while ((l = leaf_walk_rcu(tn: &tp, key)) != NULL) { |
1917 | struct key_vector *local_l = NULL, *local_tp; |
1918 | |
1919 | hlist_for_each_entry(fa, &l->leaf, fa_list) { |
1920 | struct fib_alias *new_fa; |
1921 | |
1922 | if (local_tb->tb_id != fa->tb_id) |
1923 | continue; |
1924 | |
1925 | /* clone fa for new local table */ |
1926 | new_fa = kmem_cache_alloc(cachep: fn_alias_kmem, GFP_KERNEL); |
1927 | if (!new_fa) |
1928 | goto out; |
1929 | |
1930 | memcpy(to: new_fa, from: fa, len: sizeof(*fa)); |
1931 | |
1932 | /* insert clone into table */ |
1933 | if (!local_l) |
1934 | local_l = fib_find_node(t: lt, tp: &local_tp, key: l->key); |
1935 | |
1936 | if (fib_insert_alias(t: lt, tp: local_tp, l: local_l, new: new_fa, |
1937 | NULL, key: l->key)) { |
1938 | kmem_cache_free(s: fn_alias_kmem, objp: new_fa); |
1939 | goto out; |
1940 | } |
1941 | } |
1942 | |
1943 | /* stop loop if key wrapped back to 0 */ |
1944 | key = l->key + 1; |
1945 | if (key < l->key) |
1946 | break; |
1947 | } |
1948 | |
1949 | return local_tb; |
1950 | out: |
1951 | fib_trie_free(tb: local_tb); |
1952 | |
1953 | return NULL; |
1954 | } |
1955 | |
1956 | /* Caller must hold RTNL */ |
1957 | void fib_table_flush_external(struct fib_table *tb) |
1958 | { |
1959 | struct trie *t = (struct trie *)tb->tb_data; |
1960 | struct key_vector *pn = t->kv; |
1961 | unsigned long cindex = 1; |
1962 | struct hlist_node *tmp; |
1963 | struct fib_alias *fa; |
1964 | |
1965 | /* walk trie in reverse order */ |
1966 | for (;;) { |
1967 | unsigned char slen = 0; |
1968 | struct key_vector *n; |
1969 | |
1970 | if (!(cindex--)) { |
1971 | t_key pkey = pn->key; |
1972 | |
1973 | /* cannot resize the trie vector */ |
1974 | if (IS_TRIE(pn)) |
1975 | break; |
1976 | |
1977 | /* update the suffix to address pulled leaves */ |
1978 | if (pn->slen > pn->pos) |
1979 | update_suffix(tn: pn); |
1980 | |
1981 | /* resize completed node */ |
1982 | pn = resize(t, tn: pn); |
1983 | cindex = get_index(key: pkey, kv: pn); |
1984 | |
1985 | continue; |
1986 | } |
1987 | |
1988 | /* grab the next available node */ |
1989 | n = get_child(pn, cindex); |
1990 | if (!n) |
1991 | continue; |
1992 | |
1993 | if (IS_TNODE(n)) { |
1994 | /* record pn and cindex for leaf walking */ |
1995 | pn = n; |
1996 | cindex = 1ul << n->bits; |
1997 | |
1998 | continue; |
1999 | } |
2000 | |
2001 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { |
2002 | /* if alias was cloned to local then we just |
2003 | * need to remove the local copy from main |
2004 | */ |
2005 | if (tb->tb_id != fa->tb_id) { |
2006 | hlist_del_rcu(n: &fa->fa_list); |
2007 | alias_free_mem_rcu(fa); |
2008 | continue; |
2009 | } |
2010 | |
2011 | /* record local slen */ |
2012 | slen = fa->fa_slen; |
2013 | } |
2014 | |
2015 | /* update leaf slen */ |
2016 | n->slen = slen; |
2017 | |
2018 | if (hlist_empty(h: &n->leaf)) { |
2019 | put_child_root(tp: pn, key: n->key, NULL); |
2020 | node_free(n); |
2021 | } |
2022 | } |
2023 | } |
2024 | |
2025 | /* Caller must hold RTNL. */ |
2026 | int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) |
2027 | { |
2028 | struct trie *t = (struct trie *)tb->tb_data; |
2029 | struct key_vector *pn = t->kv; |
2030 | unsigned long cindex = 1; |
2031 | struct hlist_node *tmp; |
2032 | struct fib_alias *fa; |
2033 | int found = 0; |
2034 | |
2035 | /* walk trie in reverse order */ |
2036 | for (;;) { |
2037 | unsigned char slen = 0; |
2038 | struct key_vector *n; |
2039 | |
2040 | if (!(cindex--)) { |
2041 | t_key pkey = pn->key; |
2042 | |
2043 | /* cannot resize the trie vector */ |
2044 | if (IS_TRIE(pn)) |
2045 | break; |
2046 | |
2047 | /* update the suffix to address pulled leaves */ |
2048 | if (pn->slen > pn->pos) |
2049 | update_suffix(tn: pn); |
2050 | |
2051 | /* resize completed node */ |
2052 | pn = resize(t, tn: pn); |
2053 | cindex = get_index(key: pkey, kv: pn); |
2054 | |
2055 | continue; |
2056 | } |
2057 | |
2058 | /* grab the next available node */ |
2059 | n = get_child(pn, cindex); |
2060 | if (!n) |
2061 | continue; |
2062 | |
2063 | if (IS_TNODE(n)) { |
2064 | /* record pn and cindex for leaf walking */ |
2065 | pn = n; |
2066 | cindex = 1ul << n->bits; |
2067 | |
2068 | continue; |
2069 | } |
2070 | |
2071 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { |
2072 | struct fib_info *fi = fa->fa_info; |
2073 | |
2074 | if (!fi || tb->tb_id != fa->tb_id || |
2075 | (!(fi->fib_flags & RTNH_F_DEAD) && |
2076 | !fib_props[fa->fa_type].error)) { |
2077 | slen = fa->fa_slen; |
2078 | continue; |
2079 | } |
2080 | |
2081 | /* Do not flush error routes if network namespace is |
2082 | * not being dismantled |
2083 | */ |
2084 | if (!flush_all && fib_props[fa->fa_type].error) { |
2085 | slen = fa->fa_slen; |
2086 | continue; |
2087 | } |
2088 | |
2089 | fib_notify_alias_delete(net, key: n->key, fah: &n->leaf, fa_to_delete: fa, |
2090 | NULL); |
2091 | hlist_del_rcu(n: &fa->fa_list); |
2092 | fib_release_info(fa->fa_info); |
2093 | alias_free_mem_rcu(fa); |
2094 | found++; |
2095 | } |
2096 | |
2097 | /* update leaf slen */ |
2098 | n->slen = slen; |
2099 | |
2100 | if (hlist_empty(h: &n->leaf)) { |
2101 | put_child_root(tp: pn, key: n->key, NULL); |
2102 | node_free(n); |
2103 | } |
2104 | } |
2105 | |
2106 | pr_debug("trie_flush found=%d\n" , found); |
2107 | return found; |
2108 | } |
2109 | |
2110 | /* derived from fib_trie_free */ |
2111 | static void __fib_info_notify_update(struct net *net, struct fib_table *tb, |
2112 | struct nl_info *info) |
2113 | { |
2114 | struct trie *t = (struct trie *)tb->tb_data; |
2115 | struct key_vector *pn = t->kv; |
2116 | unsigned long cindex = 1; |
2117 | struct fib_alias *fa; |
2118 | |
2119 | for (;;) { |
2120 | struct key_vector *n; |
2121 | |
2122 | if (!(cindex--)) { |
2123 | t_key pkey = pn->key; |
2124 | |
2125 | if (IS_TRIE(pn)) |
2126 | break; |
2127 | |
2128 | pn = node_parent(pn); |
2129 | cindex = get_index(key: pkey, kv: pn); |
2130 | continue; |
2131 | } |
2132 | |
2133 | /* grab the next available node */ |
2134 | n = get_child(pn, cindex); |
2135 | if (!n) |
2136 | continue; |
2137 | |
2138 | if (IS_TNODE(n)) { |
2139 | /* record pn and cindex for leaf walking */ |
2140 | pn = n; |
2141 | cindex = 1ul << n->bits; |
2142 | |
2143 | continue; |
2144 | } |
2145 | |
2146 | hlist_for_each_entry(fa, &n->leaf, fa_list) { |
2147 | struct fib_info *fi = fa->fa_info; |
2148 | |
2149 | if (!fi || !fi->nh_updated || fa->tb_id != tb->tb_id) |
2150 | continue; |
2151 | |
2152 | rtmsg_fib(RTM_NEWROUTE, htonl(n->key), fa, |
2153 | KEYLENGTH - fa->fa_slen, tb->tb_id, |
2154 | info, NLM_F_REPLACE); |
2155 | } |
2156 | } |
2157 | } |
2158 | |
2159 | void fib_info_notify_update(struct net *net, struct nl_info *info) |
2160 | { |
2161 | unsigned int h; |
2162 | |
2163 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
2164 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2165 | struct fib_table *tb; |
2166 | |
2167 | hlist_for_each_entry_rcu(tb, head, tb_hlist, |
2168 | lockdep_rtnl_is_held()) |
2169 | __fib_info_notify_update(net, tb, info); |
2170 | } |
2171 | } |
2172 | |
2173 | static int fib_leaf_notify(struct key_vector *l, struct fib_table *tb, |
2174 | struct notifier_block *nb, |
2175 | struct netlink_ext_ack *extack) |
2176 | { |
2177 | struct fib_alias *fa; |
2178 | int last_slen = -1; |
2179 | int err; |
2180 | |
2181 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { |
2182 | struct fib_info *fi = fa->fa_info; |
2183 | |
2184 | if (!fi) |
2185 | continue; |
2186 | |
2187 | /* local and main table can share the same trie, |
2188 | * so don't notify twice for the same entry. |
2189 | */ |
2190 | if (tb->tb_id != fa->tb_id) |
2191 | continue; |
2192 | |
2193 | if (fa->fa_slen == last_slen) |
2194 | continue; |
2195 | |
2196 | last_slen = fa->fa_slen; |
2197 | err = call_fib_entry_notifier(nb, event_type: FIB_EVENT_ENTRY_REPLACE, |
2198 | dst: l->key, KEYLENGTH - fa->fa_slen, |
2199 | fa, extack); |
2200 | if (err) |
2201 | return err; |
2202 | } |
2203 | return 0; |
2204 | } |
2205 | |
2206 | static int fib_table_notify(struct fib_table *tb, struct notifier_block *nb, |
2207 | struct netlink_ext_ack *extack) |
2208 | { |
2209 | struct trie *t = (struct trie *)tb->tb_data; |
2210 | struct key_vector *l, *tp = t->kv; |
2211 | t_key key = 0; |
2212 | int err; |
2213 | |
2214 | while ((l = leaf_walk_rcu(tn: &tp, key)) != NULL) { |
2215 | err = fib_leaf_notify(l, tb, nb, extack); |
2216 | if (err) |
2217 | return err; |
2218 | |
2219 | key = l->key + 1; |
2220 | /* stop in case of wrap around */ |
2221 | if (key < l->key) |
2222 | break; |
2223 | } |
2224 | return 0; |
2225 | } |
2226 | |
2227 | int fib_notify(struct net *net, struct notifier_block *nb, |
2228 | struct netlink_ext_ack *extack) |
2229 | { |
2230 | unsigned int h; |
2231 | int err; |
2232 | |
2233 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
2234 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2235 | struct fib_table *tb; |
2236 | |
2237 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2238 | err = fib_table_notify(tb, nb, extack); |
2239 | if (err) |
2240 | return err; |
2241 | } |
2242 | } |
2243 | return 0; |
2244 | } |
2245 | |
2246 | static void __trie_free_rcu(struct rcu_head *head) |
2247 | { |
2248 | struct fib_table *tb = container_of(head, struct fib_table, rcu); |
2249 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
2250 | struct trie *t = (struct trie *)tb->tb_data; |
2251 | |
2252 | if (tb->tb_data == tb->__data) |
2253 | free_percpu(t->stats); |
2254 | #endif /* CONFIG_IP_FIB_TRIE_STATS */ |
2255 | kfree(objp: tb); |
2256 | } |
2257 | |
2258 | void fib_free_table(struct fib_table *tb) |
2259 | { |
2260 | call_rcu(head: &tb->rcu, func: __trie_free_rcu); |
2261 | } |
2262 | |
2263 | static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, |
2264 | struct sk_buff *skb, struct netlink_callback *cb, |
2265 | struct fib_dump_filter *filter) |
2266 | { |
2267 | unsigned int flags = NLM_F_MULTI; |
2268 | __be32 xkey = htonl(l->key); |
2269 | int i, s_i, i_fa, s_fa, err; |
2270 | struct fib_alias *fa; |
2271 | |
2272 | if (filter->filter_set || |
2273 | !filter->dump_exceptions || !filter->dump_routes) |
2274 | flags |= NLM_F_DUMP_FILTERED; |
2275 | |
2276 | s_i = cb->args[4]; |
2277 | s_fa = cb->args[5]; |
2278 | i = 0; |
2279 | |
2280 | /* rcu_read_lock is hold by caller */ |
2281 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { |
2282 | struct fib_info *fi = fa->fa_info; |
2283 | |
2284 | if (i < s_i) |
2285 | goto next; |
2286 | |
2287 | i_fa = 0; |
2288 | |
2289 | if (tb->tb_id != fa->tb_id) |
2290 | goto next; |
2291 | |
2292 | if (filter->filter_set) { |
2293 | if (filter->rt_type && fa->fa_type != filter->rt_type) |
2294 | goto next; |
2295 | |
2296 | if ((filter->protocol && |
2297 | fi->fib_protocol != filter->protocol)) |
2298 | goto next; |
2299 | |
2300 | if (filter->dev && |
2301 | !fib_info_nh_uses_dev(fi, dev: filter->dev)) |
2302 | goto next; |
2303 | } |
2304 | |
2305 | if (filter->dump_routes) { |
2306 | if (!s_fa) { |
2307 | struct fib_rt_info fri; |
2308 | |
2309 | fri.fi = fi; |
2310 | fri.tb_id = tb->tb_id; |
2311 | fri.dst = xkey; |
2312 | fri.dst_len = KEYLENGTH - fa->fa_slen; |
2313 | fri.dscp = fa->fa_dscp; |
2314 | fri.type = fa->fa_type; |
2315 | fri.offload = READ_ONCE(fa->offload); |
2316 | fri.trap = READ_ONCE(fa->trap); |
2317 | fri.offload_failed = READ_ONCE(fa->offload_failed); |
2318 | err = fib_dump_info(skb, |
2319 | NETLINK_CB(cb->skb).portid, |
2320 | seq: cb->nlh->nlmsg_seq, |
2321 | RTM_NEWROUTE, fri: &fri, flags); |
2322 | if (err < 0) |
2323 | goto stop; |
2324 | } |
2325 | |
2326 | i_fa++; |
2327 | } |
2328 | |
2329 | if (filter->dump_exceptions) { |
2330 | err = fib_dump_info_fnhe(skb, cb, table_id: tb->tb_id, fi, |
2331 | fa_index: &i_fa, fa_start: s_fa, flags); |
2332 | if (err < 0) |
2333 | goto stop; |
2334 | } |
2335 | |
2336 | next: |
2337 | i++; |
2338 | } |
2339 | |
2340 | cb->args[4] = i; |
2341 | return skb->len; |
2342 | |
2343 | stop: |
2344 | cb->args[4] = i; |
2345 | cb->args[5] = i_fa; |
2346 | return err; |
2347 | } |
2348 | |
2349 | /* rcu_read_lock needs to be hold by caller from readside */ |
2350 | int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, |
2351 | struct netlink_callback *cb, struct fib_dump_filter *filter) |
2352 | { |
2353 | struct trie *t = (struct trie *)tb->tb_data; |
2354 | struct key_vector *l, *tp = t->kv; |
2355 | /* Dump starting at last key. |
2356 | * Note: 0.0.0.0/0 (ie default) is first key. |
2357 | */ |
2358 | int count = cb->args[2]; |
2359 | t_key key = cb->args[3]; |
2360 | |
2361 | /* First time here, count and key are both always 0. Count > 0 |
2362 | * and key == 0 means the dump has wrapped around and we are done. |
2363 | */ |
2364 | if (count && !key) |
2365 | return skb->len; |
2366 | |
2367 | while ((l = leaf_walk_rcu(tn: &tp, key)) != NULL) { |
2368 | int err; |
2369 | |
2370 | err = fn_trie_dump_leaf(l, tb, skb, cb, filter); |
2371 | if (err < 0) { |
2372 | cb->args[3] = key; |
2373 | cb->args[2] = count; |
2374 | return err; |
2375 | } |
2376 | |
2377 | ++count; |
2378 | key = l->key + 1; |
2379 | |
2380 | memset(s: &cb->args[4], c: 0, |
2381 | n: sizeof(cb->args) - 4*sizeof(cb->args[0])); |
2382 | |
2383 | /* stop loop if key wrapped back to 0 */ |
2384 | if (key < l->key) |
2385 | break; |
2386 | } |
2387 | |
2388 | cb->args[3] = key; |
2389 | cb->args[2] = count; |
2390 | |
2391 | return skb->len; |
2392 | } |
2393 | |
2394 | void __init fib_trie_init(void) |
2395 | { |
2396 | fn_alias_kmem = kmem_cache_create(name: "ip_fib_alias" , |
2397 | size: sizeof(struct fib_alias), |
2398 | align: 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); |
2399 | |
2400 | trie_leaf_kmem = kmem_cache_create(name: "ip_fib_trie" , |
2401 | LEAF_SIZE, |
2402 | align: 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); |
2403 | } |
2404 | |
2405 | struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) |
2406 | { |
2407 | struct fib_table *tb; |
2408 | struct trie *t; |
2409 | size_t sz = sizeof(*tb); |
2410 | |
2411 | if (!alias) |
2412 | sz += sizeof(struct trie); |
2413 | |
2414 | tb = kzalloc(size: sz, GFP_KERNEL); |
2415 | if (!tb) |
2416 | return NULL; |
2417 | |
2418 | tb->tb_id = id; |
2419 | tb->tb_num_default = 0; |
2420 | tb->tb_data = (alias ? alias->__data : tb->__data); |
2421 | |
2422 | if (alias) |
2423 | return tb; |
2424 | |
2425 | t = (struct trie *) tb->tb_data; |
2426 | t->kv[0].pos = KEYLENGTH; |
2427 | t->kv[0].slen = KEYLENGTH; |
2428 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
2429 | t->stats = alloc_percpu(struct trie_use_stats); |
2430 | if (!t->stats) { |
2431 | kfree(tb); |
2432 | tb = NULL; |
2433 | } |
2434 | #endif |
2435 | |
2436 | return tb; |
2437 | } |
2438 | |
2439 | #ifdef CONFIG_PROC_FS |
2440 | /* Depth first Trie walk iterator */ |
2441 | struct fib_trie_iter { |
2442 | struct seq_net_private p; |
2443 | struct fib_table *tb; |
2444 | struct key_vector *tnode; |
2445 | unsigned int index; |
2446 | unsigned int depth; |
2447 | }; |
2448 | |
2449 | static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) |
2450 | { |
2451 | unsigned long cindex = iter->index; |
2452 | struct key_vector *pn = iter->tnode; |
2453 | t_key pkey; |
2454 | |
2455 | pr_debug("get_next iter={node=%p index=%d depth=%d}\n" , |
2456 | iter->tnode, iter->index, iter->depth); |
2457 | |
2458 | while (!IS_TRIE(pn)) { |
2459 | while (cindex < child_length(pn)) { |
2460 | struct key_vector *n = get_child_rcu(pn, cindex++); |
2461 | |
2462 | if (!n) |
2463 | continue; |
2464 | |
2465 | if (IS_LEAF(n)) { |
2466 | iter->tnode = pn; |
2467 | iter->index = cindex; |
2468 | } else { |
2469 | /* push down one level */ |
2470 | iter->tnode = n; |
2471 | iter->index = 0; |
2472 | ++iter->depth; |
2473 | } |
2474 | |
2475 | return n; |
2476 | } |
2477 | |
2478 | /* Current node exhausted, pop back up */ |
2479 | pkey = pn->key; |
2480 | pn = node_parent_rcu(pn); |
2481 | cindex = get_index(pkey, pn) + 1; |
2482 | --iter->depth; |
2483 | } |
2484 | |
2485 | /* record root node so further searches know we are done */ |
2486 | iter->tnode = pn; |
2487 | iter->index = 0; |
2488 | |
2489 | return NULL; |
2490 | } |
2491 | |
2492 | static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter, |
2493 | struct trie *t) |
2494 | { |
2495 | struct key_vector *n, *pn; |
2496 | |
2497 | if (!t) |
2498 | return NULL; |
2499 | |
2500 | pn = t->kv; |
2501 | n = rcu_dereference(pn->tnode[0]); |
2502 | if (!n) |
2503 | return NULL; |
2504 | |
2505 | if (IS_TNODE(n)) { |
2506 | iter->tnode = n; |
2507 | iter->index = 0; |
2508 | iter->depth = 1; |
2509 | } else { |
2510 | iter->tnode = pn; |
2511 | iter->index = 0; |
2512 | iter->depth = 0; |
2513 | } |
2514 | |
2515 | return n; |
2516 | } |
2517 | |
2518 | static void trie_collect_stats(struct trie *t, struct trie_stat *s) |
2519 | { |
2520 | struct key_vector *n; |
2521 | struct fib_trie_iter iter; |
2522 | |
2523 | memset(s, 0, sizeof(*s)); |
2524 | |
2525 | rcu_read_lock(); |
2526 | for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) { |
2527 | if (IS_LEAF(n)) { |
2528 | struct fib_alias *fa; |
2529 | |
2530 | s->leaves++; |
2531 | s->totdepth += iter.depth; |
2532 | if (iter.depth > s->maxdepth) |
2533 | s->maxdepth = iter.depth; |
2534 | |
2535 | hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) |
2536 | ++s->prefixes; |
2537 | } else { |
2538 | s->tnodes++; |
2539 | if (n->bits < MAX_STAT_DEPTH) |
2540 | s->nodesizes[n->bits]++; |
2541 | s->nullpointers += tn_info(n)->empty_children; |
2542 | } |
2543 | } |
2544 | rcu_read_unlock(); |
2545 | } |
2546 | |
2547 | /* |
2548 | * This outputs /proc/net/fib_triestats |
2549 | */ |
2550 | static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat) |
2551 | { |
2552 | unsigned int i, max, pointers, bytes, avdepth; |
2553 | |
2554 | if (stat->leaves) |
2555 | avdepth = stat->totdepth*100 / stat->leaves; |
2556 | else |
2557 | avdepth = 0; |
2558 | |
2559 | seq_printf(seq, "\tAver depth: %u.%02d\n" , |
2560 | avdepth / 100, avdepth % 100); |
2561 | seq_printf(seq, "\tMax depth: %u\n" , stat->maxdepth); |
2562 | |
2563 | seq_printf(seq, "\tLeaves: %u\n" , stat->leaves); |
2564 | bytes = LEAF_SIZE * stat->leaves; |
2565 | |
2566 | seq_printf(seq, "\tPrefixes: %u\n" , stat->prefixes); |
2567 | bytes += sizeof(struct fib_alias) * stat->prefixes; |
2568 | |
2569 | seq_printf(seq, "\tInternal nodes: %u\n\t" , stat->tnodes); |
2570 | bytes += TNODE_SIZE(0) * stat->tnodes; |
2571 | |
2572 | max = MAX_STAT_DEPTH; |
2573 | while (max > 0 && stat->nodesizes[max-1] == 0) |
2574 | max--; |
2575 | |
2576 | pointers = 0; |
2577 | for (i = 1; i < max; i++) |
2578 | if (stat->nodesizes[i] != 0) { |
2579 | seq_printf(seq, " %u: %u" , i, stat->nodesizes[i]); |
2580 | pointers += (1<<i) * stat->nodesizes[i]; |
2581 | } |
2582 | seq_putc(seq, '\n'); |
2583 | seq_printf(seq, "\tPointers: %u\n" , pointers); |
2584 | |
2585 | bytes += sizeof(struct key_vector *) * pointers; |
2586 | seq_printf(seq, "Null ptrs: %u\n" , stat->nullpointers); |
2587 | seq_printf(seq, "Total size: %u kB\n" , (bytes + 1023) / 1024); |
2588 | } |
2589 | |
2590 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
2591 | static void trie_show_usage(struct seq_file *seq, |
2592 | const struct trie_use_stats __percpu *stats) |
2593 | { |
2594 | struct trie_use_stats s = { 0 }; |
2595 | int cpu; |
2596 | |
2597 | /* loop through all of the CPUs and gather up the stats */ |
2598 | for_each_possible_cpu(cpu) { |
2599 | const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu); |
2600 | |
2601 | s.gets += pcpu->gets; |
2602 | s.backtrack += pcpu->backtrack; |
2603 | s.semantic_match_passed += pcpu->semantic_match_passed; |
2604 | s.semantic_match_miss += pcpu->semantic_match_miss; |
2605 | s.null_node_hit += pcpu->null_node_hit; |
2606 | s.resize_node_skipped += pcpu->resize_node_skipped; |
2607 | } |
2608 | |
2609 | seq_printf(seq, "\nCounters:\n---------\n" ); |
2610 | seq_printf(seq, "gets = %u\n" , s.gets); |
2611 | seq_printf(seq, "backtracks = %u\n" , s.backtrack); |
2612 | seq_printf(seq, "semantic match passed = %u\n" , |
2613 | s.semantic_match_passed); |
2614 | seq_printf(seq, "semantic match miss = %u\n" , s.semantic_match_miss); |
2615 | seq_printf(seq, "null node hit= %u\n" , s.null_node_hit); |
2616 | seq_printf(seq, "skipped node resize = %u\n\n" , s.resize_node_skipped); |
2617 | } |
2618 | #endif /* CONFIG_IP_FIB_TRIE_STATS */ |
2619 | |
2620 | static void fib_table_print(struct seq_file *seq, struct fib_table *tb) |
2621 | { |
2622 | if (tb->tb_id == RT_TABLE_LOCAL) |
2623 | seq_puts(seq, "Local:\n" ); |
2624 | else if (tb->tb_id == RT_TABLE_MAIN) |
2625 | seq_puts(seq, "Main:\n" ); |
2626 | else |
2627 | seq_printf(seq, "Id %d:\n" , tb->tb_id); |
2628 | } |
2629 | |
2630 | |
2631 | static int fib_triestat_seq_show(struct seq_file *seq, void *v) |
2632 | { |
2633 | struct net *net = seq->private; |
2634 | unsigned int h; |
2635 | |
2636 | seq_printf(seq, |
2637 | "Basic info: size of leaf:" |
2638 | " %zd bytes, size of tnode: %zd bytes.\n" , |
2639 | LEAF_SIZE, TNODE_SIZE(0)); |
2640 | |
2641 | rcu_read_lock(); |
2642 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
2643 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2644 | struct fib_table *tb; |
2645 | |
2646 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2647 | struct trie *t = (struct trie *) tb->tb_data; |
2648 | struct trie_stat stat; |
2649 | |
2650 | if (!t) |
2651 | continue; |
2652 | |
2653 | fib_table_print(seq, tb); |
2654 | |
2655 | trie_collect_stats(t, &stat); |
2656 | trie_show_stats(seq, &stat); |
2657 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
2658 | trie_show_usage(seq, t->stats); |
2659 | #endif |
2660 | } |
2661 | cond_resched_rcu(); |
2662 | } |
2663 | rcu_read_unlock(); |
2664 | |
2665 | return 0; |
2666 | } |
2667 | |
2668 | static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos) |
2669 | { |
2670 | struct fib_trie_iter *iter = seq->private; |
2671 | struct net *net = seq_file_net(seq); |
2672 | loff_t idx = 0; |
2673 | unsigned int h; |
2674 | |
2675 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
2676 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2677 | struct fib_table *tb; |
2678 | |
2679 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2680 | struct key_vector *n; |
2681 | |
2682 | for (n = fib_trie_get_first(iter, |
2683 | (struct trie *) tb->tb_data); |
2684 | n; n = fib_trie_get_next(iter)) |
2685 | if (pos == idx++) { |
2686 | iter->tb = tb; |
2687 | return n; |
2688 | } |
2689 | } |
2690 | } |
2691 | |
2692 | return NULL; |
2693 | } |
2694 | |
2695 | static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos) |
2696 | __acquires(RCU) |
2697 | { |
2698 | rcu_read_lock(); |
2699 | return fib_trie_get_idx(seq, *pos); |
2700 | } |
2701 | |
2702 | static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2703 | { |
2704 | struct fib_trie_iter *iter = seq->private; |
2705 | struct net *net = seq_file_net(seq); |
2706 | struct fib_table *tb = iter->tb; |
2707 | struct hlist_node *tb_node; |
2708 | unsigned int h; |
2709 | struct key_vector *n; |
2710 | |
2711 | ++*pos; |
2712 | /* next node in same table */ |
2713 | n = fib_trie_get_next(iter); |
2714 | if (n) |
2715 | return n; |
2716 | |
2717 | /* walk rest of this hash chain */ |
2718 | h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); |
2719 | while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) { |
2720 | tb = hlist_entry(tb_node, struct fib_table, tb_hlist); |
2721 | n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); |
2722 | if (n) |
2723 | goto found; |
2724 | } |
2725 | |
2726 | /* new hash chain */ |
2727 | while (++h < FIB_TABLE_HASHSZ) { |
2728 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
2729 | hlist_for_each_entry_rcu(tb, head, tb_hlist) { |
2730 | n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); |
2731 | if (n) |
2732 | goto found; |
2733 | } |
2734 | } |
2735 | return NULL; |
2736 | |
2737 | found: |
2738 | iter->tb = tb; |
2739 | return n; |
2740 | } |
2741 | |
2742 | static void fib_trie_seq_stop(struct seq_file *seq, void *v) |
2743 | __releases(RCU) |
2744 | { |
2745 | rcu_read_unlock(); |
2746 | } |
2747 | |
2748 | static void seq_indent(struct seq_file *seq, int n) |
2749 | { |
2750 | while (n-- > 0) |
2751 | seq_puts(seq, " " ); |
2752 | } |
2753 | |
2754 | static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s) |
2755 | { |
2756 | switch (s) { |
2757 | case RT_SCOPE_UNIVERSE: return "universe" ; |
2758 | case RT_SCOPE_SITE: return "site" ; |
2759 | case RT_SCOPE_LINK: return "link" ; |
2760 | case RT_SCOPE_HOST: return "host" ; |
2761 | case RT_SCOPE_NOWHERE: return "nowhere" ; |
2762 | default: |
2763 | snprintf(buf, len, "scope=%d" , s); |
2764 | return buf; |
2765 | } |
2766 | } |
2767 | |
2768 | static const char *const rtn_type_names[__RTN_MAX] = { |
2769 | [RTN_UNSPEC] = "UNSPEC" , |
2770 | [RTN_UNICAST] = "UNICAST" , |
2771 | [RTN_LOCAL] = "LOCAL" , |
2772 | [RTN_BROADCAST] = "BROADCAST" , |
2773 | [RTN_ANYCAST] = "ANYCAST" , |
2774 | [RTN_MULTICAST] = "MULTICAST" , |
2775 | [RTN_BLACKHOLE] = "BLACKHOLE" , |
2776 | [RTN_UNREACHABLE] = "UNREACHABLE" , |
2777 | [RTN_PROHIBIT] = "PROHIBIT" , |
2778 | [RTN_THROW] = "THROW" , |
2779 | [RTN_NAT] = "NAT" , |
2780 | [RTN_XRESOLVE] = "XRESOLVE" , |
2781 | }; |
2782 | |
2783 | static inline const char *rtn_type(char *buf, size_t len, unsigned int t) |
2784 | { |
2785 | if (t < __RTN_MAX && rtn_type_names[t]) |
2786 | return rtn_type_names[t]; |
2787 | snprintf(buf, len, "type %u" , t); |
2788 | return buf; |
2789 | } |
2790 | |
2791 | /* Pretty print the trie */ |
2792 | static int fib_trie_seq_show(struct seq_file *seq, void *v) |
2793 | { |
2794 | const struct fib_trie_iter *iter = seq->private; |
2795 | struct key_vector *n = v; |
2796 | |
2797 | if (IS_TRIE(node_parent_rcu(n))) |
2798 | fib_table_print(seq, iter->tb); |
2799 | |
2800 | if (IS_TNODE(n)) { |
2801 | __be32 prf = htonl(n->key); |
2802 | |
2803 | seq_indent(seq, iter->depth-1); |
2804 | seq_printf(seq, " +-- %pI4/%zu %u %u %u\n" , |
2805 | &prf, KEYLENGTH - n->pos - n->bits, n->bits, |
2806 | tn_info(n)->full_children, |
2807 | tn_info(n)->empty_children); |
2808 | } else { |
2809 | __be32 val = htonl(n->key); |
2810 | struct fib_alias *fa; |
2811 | |
2812 | seq_indent(seq, iter->depth); |
2813 | seq_printf(seq, " |-- %pI4\n" , &val); |
2814 | |
2815 | hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { |
2816 | char buf1[32], buf2[32]; |
2817 | |
2818 | seq_indent(seq, iter->depth + 1); |
2819 | seq_printf(seq, " /%zu %s %s" , |
2820 | KEYLENGTH - fa->fa_slen, |
2821 | rtn_scope(buf1, sizeof(buf1), |
2822 | fa->fa_info->fib_scope), |
2823 | rtn_type(buf2, sizeof(buf2), |
2824 | fa->fa_type)); |
2825 | if (fa->fa_dscp) |
2826 | seq_printf(seq, " tos=%d" , |
2827 | inet_dscp_to_dsfield(fa->fa_dscp)); |
2828 | seq_putc(seq, '\n'); |
2829 | } |
2830 | } |
2831 | |
2832 | return 0; |
2833 | } |
2834 | |
2835 | static const struct seq_operations fib_trie_seq_ops = { |
2836 | .start = fib_trie_seq_start, |
2837 | .next = fib_trie_seq_next, |
2838 | .stop = fib_trie_seq_stop, |
2839 | .show = fib_trie_seq_show, |
2840 | }; |
2841 | |
2842 | struct fib_route_iter { |
2843 | struct seq_net_private p; |
2844 | struct fib_table *main_tb; |
2845 | struct key_vector *tnode; |
2846 | loff_t pos; |
2847 | t_key key; |
2848 | }; |
2849 | |
2850 | static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, |
2851 | loff_t pos) |
2852 | { |
2853 | struct key_vector *l, **tp = &iter->tnode; |
2854 | t_key key; |
2855 | |
2856 | /* use cached location of previously found key */ |
2857 | if (iter->pos > 0 && pos >= iter->pos) { |
2858 | key = iter->key; |
2859 | } else { |
2860 | iter->pos = 1; |
2861 | key = 0; |
2862 | } |
2863 | |
2864 | pos -= iter->pos; |
2865 | |
2866 | while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { |
2867 | key = l->key + 1; |
2868 | iter->pos++; |
2869 | l = NULL; |
2870 | |
2871 | /* handle unlikely case of a key wrap */ |
2872 | if (!key) |
2873 | break; |
2874 | } |
2875 | |
2876 | if (l) |
2877 | iter->key = l->key; /* remember it */ |
2878 | else |
2879 | iter->pos = 0; /* forget it */ |
2880 | |
2881 | return l; |
2882 | } |
2883 | |
2884 | static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) |
2885 | __acquires(RCU) |
2886 | { |
2887 | struct fib_route_iter *iter = seq->private; |
2888 | struct fib_table *tb; |
2889 | struct trie *t; |
2890 | |
2891 | rcu_read_lock(); |
2892 | |
2893 | tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN); |
2894 | if (!tb) |
2895 | return NULL; |
2896 | |
2897 | iter->main_tb = tb; |
2898 | t = (struct trie *)tb->tb_data; |
2899 | iter->tnode = t->kv; |
2900 | |
2901 | if (*pos != 0) |
2902 | return fib_route_get_idx(iter, *pos); |
2903 | |
2904 | iter->pos = 0; |
2905 | iter->key = KEY_MAX; |
2906 | |
2907 | return SEQ_START_TOKEN; |
2908 | } |
2909 | |
2910 | static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2911 | { |
2912 | struct fib_route_iter *iter = seq->private; |
2913 | struct key_vector *l = NULL; |
2914 | t_key key = iter->key + 1; |
2915 | |
2916 | ++*pos; |
2917 | |
2918 | /* only allow key of 0 for start of sequence */ |
2919 | if ((v == SEQ_START_TOKEN) || key) |
2920 | l = leaf_walk_rcu(&iter->tnode, key); |
2921 | |
2922 | if (l) { |
2923 | iter->key = l->key; |
2924 | iter->pos++; |
2925 | } else { |
2926 | iter->pos = 0; |
2927 | } |
2928 | |
2929 | return l; |
2930 | } |
2931 | |
2932 | static void fib_route_seq_stop(struct seq_file *seq, void *v) |
2933 | __releases(RCU) |
2934 | { |
2935 | rcu_read_unlock(); |
2936 | } |
2937 | |
2938 | static unsigned int fib_flag_trans(int type, __be32 mask, struct fib_info *fi) |
2939 | { |
2940 | unsigned int flags = 0; |
2941 | |
2942 | if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT) |
2943 | flags = RTF_REJECT; |
2944 | if (fi) { |
2945 | const struct fib_nh_common *nhc = fib_info_nhc(fi, 0); |
2946 | |
2947 | if (nhc->nhc_gw.ipv4) |
2948 | flags |= RTF_GATEWAY; |
2949 | } |
2950 | if (mask == htonl(0xFFFFFFFF)) |
2951 | flags |= RTF_HOST; |
2952 | flags |= RTF_UP; |
2953 | return flags; |
2954 | } |
2955 | |
2956 | /* |
2957 | * This outputs /proc/net/route. |
2958 | * The format of the file is not supposed to be changed |
2959 | * and needs to be same as fib_hash output to avoid breaking |
2960 | * legacy utilities |
2961 | */ |
2962 | static int fib_route_seq_show(struct seq_file *seq, void *v) |
2963 | { |
2964 | struct fib_route_iter *iter = seq->private; |
2965 | struct fib_table *tb = iter->main_tb; |
2966 | struct fib_alias *fa; |
2967 | struct key_vector *l = v; |
2968 | __be32 prefix; |
2969 | |
2970 | if (v == SEQ_START_TOKEN) { |
2971 | seq_printf(seq, "%-127s\n" , "Iface\tDestination\tGateway " |
2972 | "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU" |
2973 | "\tWindow\tIRTT" ); |
2974 | return 0; |
2975 | } |
2976 | |
2977 | prefix = htonl(l->key); |
2978 | |
2979 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { |
2980 | struct fib_info *fi = fa->fa_info; |
2981 | __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen); |
2982 | unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); |
2983 | |
2984 | if ((fa->fa_type == RTN_BROADCAST) || |
2985 | (fa->fa_type == RTN_MULTICAST)) |
2986 | continue; |
2987 | |
2988 | if (fa->tb_id != tb->tb_id) |
2989 | continue; |
2990 | |
2991 | seq_setwidth(seq, 127); |
2992 | |
2993 | if (fi) { |
2994 | struct fib_nh_common *nhc = fib_info_nhc(fi, 0); |
2995 | __be32 gw = 0; |
2996 | |
2997 | if (nhc->nhc_gw_family == AF_INET) |
2998 | gw = nhc->nhc_gw.ipv4; |
2999 | |
3000 | seq_printf(seq, |
3001 | "%s\t%08X\t%08X\t%04X\t%d\t%u\t" |
3002 | "%d\t%08X\t%d\t%u\t%u" , |
3003 | nhc->nhc_dev ? nhc->nhc_dev->name : "*" , |
3004 | prefix, gw, flags, 0, 0, |
3005 | fi->fib_priority, |
3006 | mask, |
3007 | (fi->fib_advmss ? |
3008 | fi->fib_advmss + 40 : 0), |
3009 | fi->fib_window, |
3010 | fi->fib_rtt >> 3); |
3011 | } else { |
3012 | seq_printf(seq, |
3013 | "*\t%08X\t%08X\t%04X\t%d\t%u\t" |
3014 | "%d\t%08X\t%d\t%u\t%u" , |
3015 | prefix, 0, flags, 0, 0, 0, |
3016 | mask, 0, 0, 0); |
3017 | } |
3018 | seq_pad(seq, '\n'); |
3019 | } |
3020 | |
3021 | return 0; |
3022 | } |
3023 | |
3024 | static const struct seq_operations fib_route_seq_ops = { |
3025 | .start = fib_route_seq_start, |
3026 | .next = fib_route_seq_next, |
3027 | .stop = fib_route_seq_stop, |
3028 | .show = fib_route_seq_show, |
3029 | }; |
3030 | |
3031 | int __net_init fib_proc_init(struct net *net) |
3032 | { |
3033 | if (!proc_create_net("fib_trie" , 0444, net->proc_net, &fib_trie_seq_ops, |
3034 | sizeof(struct fib_trie_iter))) |
3035 | goto out1; |
3036 | |
3037 | if (!proc_create_net_single("fib_triestat" , 0444, net->proc_net, |
3038 | fib_triestat_seq_show, NULL)) |
3039 | goto out2; |
3040 | |
3041 | if (!proc_create_net("route" , 0444, net->proc_net, &fib_route_seq_ops, |
3042 | sizeof(struct fib_route_iter))) |
3043 | goto out3; |
3044 | |
3045 | return 0; |
3046 | |
3047 | out3: |
3048 | remove_proc_entry("fib_triestat" , net->proc_net); |
3049 | out2: |
3050 | remove_proc_entry("fib_trie" , net->proc_net); |
3051 | out1: |
3052 | return -ENOMEM; |
3053 | } |
3054 | |
3055 | void __net_exit fib_proc_exit(struct net *net) |
3056 | { |
3057 | remove_proc_entry("fib_trie" , net->proc_net); |
3058 | remove_proc_entry("fib_triestat" , net->proc_net); |
3059 | remove_proc_entry("route" , net->proc_net); |
3060 | } |
3061 | |
3062 | #endif /* CONFIG_PROC_FS */ |
3063 | |