1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * Copyright (C) 2001 Momchil Velikov |
4 | * Portions Copyright (C) 2001 Christoph Hellwig |
5 | * Copyright (C) 2005 SGI, Christoph Lameter |
6 | * Copyright (C) 2006 Nick Piggin |
7 | * Copyright (C) 2012 Konstantin Khlebnikov |
8 | * Copyright (C) 2016 Intel, Matthew Wilcox |
9 | * Copyright (C) 2016 Intel, Ross Zwisler |
10 | */ |
11 | |
12 | #include <linux/bitmap.h> |
13 | #include <linux/bitops.h> |
14 | #include <linux/bug.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/export.h> |
18 | #include <linux/idr.h> |
19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/kmemleak.h> |
22 | #include <linux/percpu.h> |
23 | #include <linux/preempt.h> /* in_interrupt() */ |
24 | #include <linux/radix-tree.h> |
25 | #include <linux/rcupdate.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/string.h> |
28 | #include <linux/xarray.h> |
29 | |
30 | #include "radix-tree.h" |
31 | |
32 | /* |
33 | * Radix tree node cache. |
34 | */ |
35 | struct kmem_cache *radix_tree_node_cachep; |
36 | |
37 | /* |
38 | * The radix tree is variable-height, so an insert operation not only has |
39 | * to build the branch to its corresponding item, it also has to build the |
40 | * branch to existing items if the size has to be increased (by |
41 | * radix_tree_extend). |
42 | * |
43 | * The worst case is a zero height tree with just a single item at index 0, |
44 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches |
45 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. |
46 | * Hence: |
47 | */ |
48 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) |
49 | |
50 | /* |
51 | * The IDR does not have to be as high as the radix tree since it uses |
52 | * signed integers, not unsigned longs. |
53 | */ |
54 | #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) |
55 | #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ |
56 | RADIX_TREE_MAP_SHIFT)) |
57 | #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) |
58 | |
59 | /* |
60 | * Per-cpu pool of preloaded nodes |
61 | */ |
62 | DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { |
63 | .lock = INIT_LOCAL_LOCK(lock), |
64 | }; |
65 | EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); |
66 | |
67 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
68 | { |
69 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); |
70 | } |
71 | |
72 | static inline void *node_to_entry(void *ptr) |
73 | { |
74 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
75 | } |
76 | |
77 | #define RADIX_TREE_RETRY XA_RETRY_ENTRY |
78 | |
79 | static inline unsigned long |
80 | get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) |
81 | { |
82 | return parent ? slot - parent->slots : 0; |
83 | } |
84 | |
85 | static unsigned int radix_tree_descend(const struct radix_tree_node *parent, |
86 | struct radix_tree_node **nodep, unsigned long index) |
87 | { |
88 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
89 | void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); |
90 | |
91 | *nodep = (void *)entry; |
92 | return offset; |
93 | } |
94 | |
95 | static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) |
96 | { |
97 | return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); |
98 | } |
99 | |
100 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
101 | int offset) |
102 | { |
103 | __set_bit(offset, node->tags[tag]); |
104 | } |
105 | |
106 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, |
107 | int offset) |
108 | { |
109 | __clear_bit(offset, node->tags[tag]); |
110 | } |
111 | |
112 | static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, |
113 | int offset) |
114 | { |
115 | return test_bit(offset, node->tags[tag]); |
116 | } |
117 | |
118 | static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) |
119 | { |
120 | root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); |
121 | } |
122 | |
123 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
124 | { |
125 | root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); |
126 | } |
127 | |
128 | static inline void root_tag_clear_all(struct radix_tree_root *root) |
129 | { |
130 | root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); |
131 | } |
132 | |
133 | static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) |
134 | { |
135 | return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); |
136 | } |
137 | |
138 | static inline unsigned root_tags_get(const struct radix_tree_root *root) |
139 | { |
140 | return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; |
141 | } |
142 | |
143 | static inline bool is_idr(const struct radix_tree_root *root) |
144 | { |
145 | return !!(root->xa_flags & ROOT_IS_IDR); |
146 | } |
147 | |
148 | /* |
149 | * Returns 1 if any slot in the node has this tag set. |
150 | * Otherwise returns 0. |
151 | */ |
152 | static inline int any_tag_set(const struct radix_tree_node *node, |
153 | unsigned int tag) |
154 | { |
155 | unsigned idx; |
156 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
157 | if (node->tags[tag][idx]) |
158 | return 1; |
159 | } |
160 | return 0; |
161 | } |
162 | |
163 | static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) |
164 | { |
165 | bitmap_fill(dst: node->tags[tag], RADIX_TREE_MAP_SIZE); |
166 | } |
167 | |
168 | /** |
169 | * radix_tree_find_next_bit - find the next set bit in a memory region |
170 | * |
171 | * @node: where to begin the search |
172 | * @tag: the tag index |
173 | * @offset: the bitnumber to start searching at |
174 | * |
175 | * Unrollable variant of find_next_bit() for constant size arrays. |
176 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. |
177 | * Returns next bit offset, or size if nothing found. |
178 | */ |
179 | static __always_inline unsigned long |
180 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
181 | unsigned long offset) |
182 | { |
183 | const unsigned long *addr = node->tags[tag]; |
184 | |
185 | if (offset < RADIX_TREE_MAP_SIZE) { |
186 | unsigned long tmp; |
187 | |
188 | addr += offset / BITS_PER_LONG; |
189 | tmp = *addr >> (offset % BITS_PER_LONG); |
190 | if (tmp) |
191 | return __ffs(tmp) + offset; |
192 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); |
193 | while (offset < RADIX_TREE_MAP_SIZE) { |
194 | tmp = *++addr; |
195 | if (tmp) |
196 | return __ffs(tmp) + offset; |
197 | offset += BITS_PER_LONG; |
198 | } |
199 | } |
200 | return RADIX_TREE_MAP_SIZE; |
201 | } |
202 | |
203 | static unsigned int iter_offset(const struct radix_tree_iter *iter) |
204 | { |
205 | return iter->index & RADIX_TREE_MAP_MASK; |
206 | } |
207 | |
208 | /* |
209 | * The maximum index which can be stored in a radix tree |
210 | */ |
211 | static inline unsigned long shift_maxindex(unsigned int shift) |
212 | { |
213 | return (RADIX_TREE_MAP_SIZE << shift) - 1; |
214 | } |
215 | |
216 | static inline unsigned long node_maxindex(const struct radix_tree_node *node) |
217 | { |
218 | return shift_maxindex(shift: node->shift); |
219 | } |
220 | |
221 | static unsigned long next_index(unsigned long index, |
222 | const struct radix_tree_node *node, |
223 | unsigned long offset) |
224 | { |
225 | return (index & ~node_maxindex(node)) + (offset << node->shift); |
226 | } |
227 | |
228 | /* |
229 | * This assumes that the caller has performed appropriate preallocation, and |
230 | * that the caller has pinned this thread of control to the current CPU. |
231 | */ |
232 | static struct radix_tree_node * |
233 | radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, |
234 | struct radix_tree_root *root, |
235 | unsigned int shift, unsigned int offset, |
236 | unsigned int count, unsigned int nr_values) |
237 | { |
238 | struct radix_tree_node *ret = NULL; |
239 | |
240 | /* |
241 | * Preload code isn't irq safe and it doesn't make sense to use |
242 | * preloading during an interrupt anyway as all the allocations have |
243 | * to be atomic. So just do normal allocation when in interrupt. |
244 | */ |
245 | if (!gfpflags_allow_blocking(gfp_flags: gfp_mask) && !in_interrupt()) { |
246 | struct radix_tree_preload *rtp; |
247 | |
248 | /* |
249 | * Even if the caller has preloaded, try to allocate from the |
250 | * cache first for the new node to get accounted to the memory |
251 | * cgroup. |
252 | */ |
253 | ret = kmem_cache_alloc(cachep: radix_tree_node_cachep, |
254 | flags: gfp_mask | __GFP_NOWARN); |
255 | if (ret) |
256 | goto out; |
257 | |
258 | /* |
259 | * Provided the caller has preloaded here, we will always |
260 | * succeed in getting a node here (and never reach |
261 | * kmem_cache_alloc) |
262 | */ |
263 | rtp = this_cpu_ptr(&radix_tree_preloads); |
264 | if (rtp->nr) { |
265 | ret = rtp->nodes; |
266 | rtp->nodes = ret->parent; |
267 | rtp->nr--; |
268 | } |
269 | /* |
270 | * Update the allocation stack trace as this is more useful |
271 | * for debugging. |
272 | */ |
273 | kmemleak_update_trace(ptr: ret); |
274 | goto out; |
275 | } |
276 | ret = kmem_cache_alloc(cachep: radix_tree_node_cachep, flags: gfp_mask); |
277 | out: |
278 | BUG_ON(radix_tree_is_internal_node(ret)); |
279 | if (ret) { |
280 | ret->shift = shift; |
281 | ret->offset = offset; |
282 | ret->count = count; |
283 | ret->nr_values = nr_values; |
284 | ret->parent = parent; |
285 | ret->array = root; |
286 | } |
287 | return ret; |
288 | } |
289 | |
290 | void radix_tree_node_rcu_free(struct rcu_head *head) |
291 | { |
292 | struct radix_tree_node *node = |
293 | container_of(head, struct radix_tree_node, rcu_head); |
294 | |
295 | /* |
296 | * Must only free zeroed nodes into the slab. We can be left with |
297 | * non-NULL entries by radix_tree_free_nodes, so clear the entries |
298 | * and tags here. |
299 | */ |
300 | memset(node->slots, 0, sizeof(node->slots)); |
301 | memset(node->tags, 0, sizeof(node->tags)); |
302 | INIT_LIST_HEAD(list: &node->private_list); |
303 | |
304 | kmem_cache_free(s: radix_tree_node_cachep, objp: node); |
305 | } |
306 | |
307 | static inline void |
308 | radix_tree_node_free(struct radix_tree_node *node) |
309 | { |
310 | call_rcu(head: &node->rcu_head, func: radix_tree_node_rcu_free); |
311 | } |
312 | |
313 | /* |
314 | * Load up this CPU's radix_tree_node buffer with sufficient objects to |
315 | * ensure that the addition of a single element in the tree cannot fail. On |
316 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
317 | * with preemption not disabled. |
318 | * |
319 | * To make use of this facility, the radix tree must be initialised without |
320 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
321 | */ |
322 | static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
323 | { |
324 | struct radix_tree_preload *rtp; |
325 | struct radix_tree_node *node; |
326 | int ret = -ENOMEM; |
327 | |
328 | /* |
329 | * Nodes preloaded by one cgroup can be used by another cgroup, so |
330 | * they should never be accounted to any particular memory cgroup. |
331 | */ |
332 | gfp_mask &= ~__GFP_ACCOUNT; |
333 | |
334 | local_lock(&radix_tree_preloads.lock); |
335 | rtp = this_cpu_ptr(&radix_tree_preloads); |
336 | while (rtp->nr < nr) { |
337 | local_unlock(&radix_tree_preloads.lock); |
338 | node = kmem_cache_alloc(cachep: radix_tree_node_cachep, flags: gfp_mask); |
339 | if (node == NULL) |
340 | goto out; |
341 | local_lock(&radix_tree_preloads.lock); |
342 | rtp = this_cpu_ptr(&radix_tree_preloads); |
343 | if (rtp->nr < nr) { |
344 | node->parent = rtp->nodes; |
345 | rtp->nodes = node; |
346 | rtp->nr++; |
347 | } else { |
348 | kmem_cache_free(s: radix_tree_node_cachep, objp: node); |
349 | } |
350 | } |
351 | ret = 0; |
352 | out: |
353 | return ret; |
354 | } |
355 | |
356 | /* |
357 | * Load up this CPU's radix_tree_node buffer with sufficient objects to |
358 | * ensure that the addition of a single element in the tree cannot fail. On |
359 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
360 | * with preemption not disabled. |
361 | * |
362 | * To make use of this facility, the radix tree must be initialised without |
363 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
364 | */ |
365 | int radix_tree_preload(gfp_t gfp_mask) |
366 | { |
367 | /* Warn on non-sensical use... */ |
368 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
369 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
370 | } |
371 | EXPORT_SYMBOL(radix_tree_preload); |
372 | |
373 | /* |
374 | * The same as above function, except we don't guarantee preloading happens. |
375 | * We do it, if we decide it helps. On success, return zero with preemption |
376 | * disabled. On error, return -ENOMEM with preemption not disabled. |
377 | */ |
378 | int radix_tree_maybe_preload(gfp_t gfp_mask) |
379 | { |
380 | if (gfpflags_allow_blocking(gfp_flags: gfp_mask)) |
381 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
382 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
383 | local_lock(&radix_tree_preloads.lock); |
384 | return 0; |
385 | } |
386 | EXPORT_SYMBOL(radix_tree_maybe_preload); |
387 | |
388 | static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
389 | struct radix_tree_node **nodep, unsigned long *maxindex) |
390 | { |
391 | struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); |
392 | |
393 | *nodep = node; |
394 | |
395 | if (likely(radix_tree_is_internal_node(node))) { |
396 | node = entry_to_node(ptr: node); |
397 | *maxindex = node_maxindex(node); |
398 | return node->shift + RADIX_TREE_MAP_SHIFT; |
399 | } |
400 | |
401 | *maxindex = 0; |
402 | return 0; |
403 | } |
404 | |
405 | /* |
406 | * Extend a radix tree so it can store key @index. |
407 | */ |
408 | static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, |
409 | unsigned long index, unsigned int shift) |
410 | { |
411 | void *entry; |
412 | unsigned int maxshift; |
413 | int tag; |
414 | |
415 | /* Figure out what the shift should be. */ |
416 | maxshift = shift; |
417 | while (index > shift_maxindex(shift: maxshift)) |
418 | maxshift += RADIX_TREE_MAP_SHIFT; |
419 | |
420 | entry = rcu_dereference_raw(root->xa_head); |
421 | if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) |
422 | goto out; |
423 | |
424 | do { |
425 | struct radix_tree_node *node = radix_tree_node_alloc(gfp_mask: gfp, NULL, |
426 | root, shift, offset: 0, count: 1, nr_values: 0); |
427 | if (!node) |
428 | return -ENOMEM; |
429 | |
430 | if (is_idr(root)) { |
431 | all_tag_set(node, IDR_FREE); |
432 | if (!root_tag_get(root, IDR_FREE)) { |
433 | tag_clear(node, IDR_FREE, offset: 0); |
434 | root_tag_set(root, IDR_FREE); |
435 | } |
436 | } else { |
437 | /* Propagate the aggregated tag info to the new child */ |
438 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
439 | if (root_tag_get(root, tag)) |
440 | tag_set(node, tag, offset: 0); |
441 | } |
442 | } |
443 | |
444 | BUG_ON(shift > BITS_PER_LONG); |
445 | if (radix_tree_is_internal_node(ptr: entry)) { |
446 | entry_to_node(ptr: entry)->parent = node; |
447 | } else if (xa_is_value(entry)) { |
448 | /* Moving a value entry root->xa_head to a node */ |
449 | node->nr_values = 1; |
450 | } |
451 | /* |
452 | * entry was already in the radix tree, so we do not need |
453 | * rcu_assign_pointer here |
454 | */ |
455 | node->slots[0] = (void __rcu *)entry; |
456 | entry = node_to_entry(ptr: node); |
457 | rcu_assign_pointer(root->xa_head, entry); |
458 | shift += RADIX_TREE_MAP_SHIFT; |
459 | } while (shift <= maxshift); |
460 | out: |
461 | return maxshift + RADIX_TREE_MAP_SHIFT; |
462 | } |
463 | |
464 | /** |
465 | * radix_tree_shrink - shrink radix tree to minimum height |
466 | * @root: radix tree root |
467 | */ |
468 | static inline bool radix_tree_shrink(struct radix_tree_root *root) |
469 | { |
470 | bool shrunk = false; |
471 | |
472 | for (;;) { |
473 | struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); |
474 | struct radix_tree_node *child; |
475 | |
476 | if (!radix_tree_is_internal_node(ptr: node)) |
477 | break; |
478 | node = entry_to_node(ptr: node); |
479 | |
480 | /* |
481 | * The candidate node has more than one child, or its child |
482 | * is not at the leftmost slot, we cannot shrink. |
483 | */ |
484 | if (node->count != 1) |
485 | break; |
486 | child = rcu_dereference_raw(node->slots[0]); |
487 | if (!child) |
488 | break; |
489 | |
490 | /* |
491 | * For an IDR, we must not shrink entry 0 into the root in |
492 | * case somebody calls idr_replace() with a pointer that |
493 | * appears to be an internal entry |
494 | */ |
495 | if (!node->shift && is_idr(root)) |
496 | break; |
497 | |
498 | if (radix_tree_is_internal_node(ptr: child)) |
499 | entry_to_node(ptr: child)->parent = NULL; |
500 | |
501 | /* |
502 | * We don't need rcu_assign_pointer(), since we are simply |
503 | * moving the node from one part of the tree to another: if it |
504 | * was safe to dereference the old pointer to it |
505 | * (node->slots[0]), it will be safe to dereference the new |
506 | * one (root->xa_head) as far as dependent read barriers go. |
507 | */ |
508 | root->xa_head = (void __rcu *)child; |
509 | if (is_idr(root) && !tag_get(node, IDR_FREE, offset: 0)) |
510 | root_tag_clear(root, IDR_FREE); |
511 | |
512 | /* |
513 | * We have a dilemma here. The node's slot[0] must not be |
514 | * NULLed in case there are concurrent lookups expecting to |
515 | * find the item. However if this was a bottom-level node, |
516 | * then it may be subject to the slot pointer being visible |
517 | * to callers dereferencing it. If item corresponding to |
518 | * slot[0] is subsequently deleted, these callers would expect |
519 | * their slot to become empty sooner or later. |
520 | * |
521 | * For example, lockless pagecache will look up a slot, deref |
522 | * the page pointer, and if the page has 0 refcount it means it |
523 | * was concurrently deleted from pagecache so try the deref |
524 | * again. Fortunately there is already a requirement for logic |
525 | * to retry the entire slot lookup -- the indirect pointer |
526 | * problem (replacing direct root node with an indirect pointer |
527 | * also results in a stale slot). So tag the slot as indirect |
528 | * to force callers to retry. |
529 | */ |
530 | node->count = 0; |
531 | if (!radix_tree_is_internal_node(ptr: child)) { |
532 | node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; |
533 | } |
534 | |
535 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
536 | radix_tree_node_free(node); |
537 | shrunk = true; |
538 | } |
539 | |
540 | return shrunk; |
541 | } |
542 | |
543 | static bool delete_node(struct radix_tree_root *root, |
544 | struct radix_tree_node *node) |
545 | { |
546 | bool deleted = false; |
547 | |
548 | do { |
549 | struct radix_tree_node *parent; |
550 | |
551 | if (node->count) { |
552 | if (node_to_entry(ptr: node) == |
553 | rcu_dereference_raw(root->xa_head)) |
554 | deleted |= radix_tree_shrink(root); |
555 | return deleted; |
556 | } |
557 | |
558 | parent = node->parent; |
559 | if (parent) { |
560 | parent->slots[node->offset] = NULL; |
561 | parent->count--; |
562 | } else { |
563 | /* |
564 | * Shouldn't the tags already have all been cleared |
565 | * by the caller? |
566 | */ |
567 | if (!is_idr(root)) |
568 | root_tag_clear_all(root); |
569 | root->xa_head = NULL; |
570 | } |
571 | |
572 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
573 | radix_tree_node_free(node); |
574 | deleted = true; |
575 | |
576 | node = parent; |
577 | } while (node); |
578 | |
579 | return deleted; |
580 | } |
581 | |
582 | /** |
583 | * __radix_tree_create - create a slot in a radix tree |
584 | * @root: radix tree root |
585 | * @index: index key |
586 | * @nodep: returns node |
587 | * @slotp: returns slot |
588 | * |
589 | * Create, if necessary, and return the node and slot for an item |
590 | * at position @index in the radix tree @root. |
591 | * |
592 | * Until there is more than one item in the tree, no nodes are |
593 | * allocated and @root->xa_head is used as a direct slot instead of |
594 | * pointing to a node, in which case *@nodep will be NULL. |
595 | * |
596 | * Returns -ENOMEM, or 0 for success. |
597 | */ |
598 | static int __radix_tree_create(struct radix_tree_root *root, |
599 | unsigned long index, struct radix_tree_node **nodep, |
600 | void __rcu ***slotp) |
601 | { |
602 | struct radix_tree_node *node = NULL, *child; |
603 | void __rcu **slot = (void __rcu **)&root->xa_head; |
604 | unsigned long maxindex; |
605 | unsigned int shift, offset = 0; |
606 | unsigned long max = index; |
607 | gfp_t gfp = root_gfp_mask(root); |
608 | |
609 | shift = radix_tree_load_root(root, nodep: &child, maxindex: &maxindex); |
610 | |
611 | /* Make sure the tree is high enough. */ |
612 | if (max > maxindex) { |
613 | int error = radix_tree_extend(root, gfp, index: max, shift); |
614 | if (error < 0) |
615 | return error; |
616 | shift = error; |
617 | child = rcu_dereference_raw(root->xa_head); |
618 | } |
619 | |
620 | while (shift > 0) { |
621 | shift -= RADIX_TREE_MAP_SHIFT; |
622 | if (child == NULL) { |
623 | /* Have to add a child node. */ |
624 | child = radix_tree_node_alloc(gfp_mask: gfp, parent: node, root, shift, |
625 | offset, count: 0, nr_values: 0); |
626 | if (!child) |
627 | return -ENOMEM; |
628 | rcu_assign_pointer(*slot, node_to_entry(child)); |
629 | if (node) |
630 | node->count++; |
631 | } else if (!radix_tree_is_internal_node(ptr: child)) |
632 | break; |
633 | |
634 | /* Go a level down */ |
635 | node = entry_to_node(ptr: child); |
636 | offset = radix_tree_descend(parent: node, nodep: &child, index); |
637 | slot = &node->slots[offset]; |
638 | } |
639 | |
640 | if (nodep) |
641 | *nodep = node; |
642 | if (slotp) |
643 | *slotp = slot; |
644 | return 0; |
645 | } |
646 | |
647 | /* |
648 | * Free any nodes below this node. The tree is presumed to not need |
649 | * shrinking, and any user data in the tree is presumed to not need a |
650 | * destructor called on it. If we need to add a destructor, we can |
651 | * add that functionality later. Note that we may not clear tags or |
652 | * slots from the tree as an RCU walker may still have a pointer into |
653 | * this subtree. We could replace the entries with RADIX_TREE_RETRY, |
654 | * but we'll still have to clear those in rcu_free. |
655 | */ |
656 | static void radix_tree_free_nodes(struct radix_tree_node *node) |
657 | { |
658 | unsigned offset = 0; |
659 | struct radix_tree_node *child = entry_to_node(ptr: node); |
660 | |
661 | for (;;) { |
662 | void *entry = rcu_dereference_raw(child->slots[offset]); |
663 | if (xa_is_node(entry) && child->shift) { |
664 | child = entry_to_node(ptr: entry); |
665 | offset = 0; |
666 | continue; |
667 | } |
668 | offset++; |
669 | while (offset == RADIX_TREE_MAP_SIZE) { |
670 | struct radix_tree_node *old = child; |
671 | offset = child->offset + 1; |
672 | child = child->parent; |
673 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
674 | radix_tree_node_free(node: old); |
675 | if (old == entry_to_node(ptr: node)) |
676 | return; |
677 | } |
678 | } |
679 | } |
680 | |
681 | static inline int insert_entries(struct radix_tree_node *node, |
682 | void __rcu **slot, void *item) |
683 | { |
684 | if (*slot) |
685 | return -EEXIST; |
686 | rcu_assign_pointer(*slot, item); |
687 | if (node) { |
688 | node->count++; |
689 | if (xa_is_value(entry: item)) |
690 | node->nr_values++; |
691 | } |
692 | return 1; |
693 | } |
694 | |
695 | /** |
696 | * radix_tree_insert - insert into a radix tree |
697 | * @root: radix tree root |
698 | * @index: index key |
699 | * @item: item to insert |
700 | * |
701 | * Insert an item into the radix tree at position @index. |
702 | */ |
703 | int radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
704 | void *item) |
705 | { |
706 | struct radix_tree_node *node; |
707 | void __rcu **slot; |
708 | int error; |
709 | |
710 | BUG_ON(radix_tree_is_internal_node(item)); |
711 | |
712 | error = __radix_tree_create(root, index, nodep: &node, slotp: &slot); |
713 | if (error) |
714 | return error; |
715 | |
716 | error = insert_entries(node, slot, item); |
717 | if (error < 0) |
718 | return error; |
719 | |
720 | if (node) { |
721 | unsigned offset = get_slot_offset(parent: node, slot); |
722 | BUG_ON(tag_get(node, 0, offset)); |
723 | BUG_ON(tag_get(node, 1, offset)); |
724 | BUG_ON(tag_get(node, 2, offset)); |
725 | } else { |
726 | BUG_ON(root_tags_get(root)); |
727 | } |
728 | |
729 | return 0; |
730 | } |
731 | EXPORT_SYMBOL(radix_tree_insert); |
732 | |
733 | /** |
734 | * __radix_tree_lookup - lookup an item in a radix tree |
735 | * @root: radix tree root |
736 | * @index: index key |
737 | * @nodep: returns node |
738 | * @slotp: returns slot |
739 | * |
740 | * Lookup and return the item at position @index in the radix |
741 | * tree @root. |
742 | * |
743 | * Until there is more than one item in the tree, no nodes are |
744 | * allocated and @root->xa_head is used as a direct slot instead of |
745 | * pointing to a node, in which case *@nodep will be NULL. |
746 | */ |
747 | void *__radix_tree_lookup(const struct radix_tree_root *root, |
748 | unsigned long index, struct radix_tree_node **nodep, |
749 | void __rcu ***slotp) |
750 | { |
751 | struct radix_tree_node *node, *parent; |
752 | unsigned long maxindex; |
753 | void __rcu **slot; |
754 | |
755 | restart: |
756 | parent = NULL; |
757 | slot = (void __rcu **)&root->xa_head; |
758 | radix_tree_load_root(root, nodep: &node, maxindex: &maxindex); |
759 | if (index > maxindex) |
760 | return NULL; |
761 | |
762 | while (radix_tree_is_internal_node(ptr: node)) { |
763 | unsigned offset; |
764 | |
765 | parent = entry_to_node(ptr: node); |
766 | offset = radix_tree_descend(parent, nodep: &node, index); |
767 | slot = parent->slots + offset; |
768 | if (node == RADIX_TREE_RETRY) |
769 | goto restart; |
770 | if (parent->shift == 0) |
771 | break; |
772 | } |
773 | |
774 | if (nodep) |
775 | *nodep = parent; |
776 | if (slotp) |
777 | *slotp = slot; |
778 | return node; |
779 | } |
780 | |
781 | /** |
782 | * radix_tree_lookup_slot - lookup a slot in a radix tree |
783 | * @root: radix tree root |
784 | * @index: index key |
785 | * |
786 | * Returns: the slot corresponding to the position @index in the |
787 | * radix tree @root. This is useful for update-if-exists operations. |
788 | * |
789 | * This function can be called under rcu_read_lock iff the slot is not |
790 | * modified by radix_tree_replace_slot, otherwise it must be called |
791 | * exclusive from other writers. Any dereference of the slot must be done |
792 | * using radix_tree_deref_slot. |
793 | */ |
794 | void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, |
795 | unsigned long index) |
796 | { |
797 | void __rcu **slot; |
798 | |
799 | if (!__radix_tree_lookup(root, index, NULL, slotp: &slot)) |
800 | return NULL; |
801 | return slot; |
802 | } |
803 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
804 | |
805 | /** |
806 | * radix_tree_lookup - perform lookup operation on a radix tree |
807 | * @root: radix tree root |
808 | * @index: index key |
809 | * |
810 | * Lookup the item at the position @index in the radix tree @root. |
811 | * |
812 | * This function can be called under rcu_read_lock, however the caller |
813 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free |
814 | * them safely). No RCU barriers are required to access or modify the |
815 | * returned item, however. |
816 | */ |
817 | void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) |
818 | { |
819 | return __radix_tree_lookup(root, index, NULL, NULL); |
820 | } |
821 | EXPORT_SYMBOL(radix_tree_lookup); |
822 | |
823 | static void replace_slot(void __rcu **slot, void *item, |
824 | struct radix_tree_node *node, int count, int values) |
825 | { |
826 | if (node && (count || values)) { |
827 | node->count += count; |
828 | node->nr_values += values; |
829 | } |
830 | |
831 | rcu_assign_pointer(*slot, item); |
832 | } |
833 | |
834 | static bool node_tag_get(const struct radix_tree_root *root, |
835 | const struct radix_tree_node *node, |
836 | unsigned int tag, unsigned int offset) |
837 | { |
838 | if (node) |
839 | return tag_get(node, tag, offset); |
840 | return root_tag_get(root, tag); |
841 | } |
842 | |
843 | /* |
844 | * IDR users want to be able to store NULL in the tree, so if the slot isn't |
845 | * free, don't adjust the count, even if it's transitioning between NULL and |
846 | * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still |
847 | * have empty bits, but it only stores NULL in slots when they're being |
848 | * deleted. |
849 | */ |
850 | static int calculate_count(struct radix_tree_root *root, |
851 | struct radix_tree_node *node, void __rcu **slot, |
852 | void *item, void *old) |
853 | { |
854 | if (is_idr(root)) { |
855 | unsigned offset = get_slot_offset(parent: node, slot); |
856 | bool free = node_tag_get(root, node, IDR_FREE, offset); |
857 | if (!free) |
858 | return 0; |
859 | if (!old) |
860 | return 1; |
861 | } |
862 | return !!item - !!old; |
863 | } |
864 | |
865 | /** |
866 | * __radix_tree_replace - replace item in a slot |
867 | * @root: radix tree root |
868 | * @node: pointer to tree node |
869 | * @slot: pointer to slot in @node |
870 | * @item: new item to store in the slot. |
871 | * |
872 | * For use with __radix_tree_lookup(). Caller must hold tree write locked |
873 | * across slot lookup and replacement. |
874 | */ |
875 | void __radix_tree_replace(struct radix_tree_root *root, |
876 | struct radix_tree_node *node, |
877 | void __rcu **slot, void *item) |
878 | { |
879 | void *old = rcu_dereference_raw(*slot); |
880 | int values = !!xa_is_value(entry: item) - !!xa_is_value(entry: old); |
881 | int count = calculate_count(root, node, slot, item, old); |
882 | |
883 | /* |
884 | * This function supports replacing value entries and |
885 | * deleting entries, but that needs accounting against the |
886 | * node unless the slot is root->xa_head. |
887 | */ |
888 | WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && |
889 | (count || values)); |
890 | replace_slot(slot, item, node, count, values); |
891 | |
892 | if (!node) |
893 | return; |
894 | |
895 | delete_node(root, node); |
896 | } |
897 | |
898 | /** |
899 | * radix_tree_replace_slot - replace item in a slot |
900 | * @root: radix tree root |
901 | * @slot: pointer to slot |
902 | * @item: new item to store in the slot. |
903 | * |
904 | * For use with radix_tree_lookup_slot() and |
905 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked |
906 | * across slot lookup and replacement. |
907 | * |
908 | * NOTE: This cannot be used to switch between non-entries (empty slots), |
909 | * regular entries, and value entries, as that requires accounting |
910 | * inside the radix tree node. When switching from one type of entry or |
911 | * deleting, use __radix_tree_lookup() and __radix_tree_replace() or |
912 | * radix_tree_iter_replace(). |
913 | */ |
914 | void radix_tree_replace_slot(struct radix_tree_root *root, |
915 | void __rcu **slot, void *item) |
916 | { |
917 | __radix_tree_replace(root, NULL, slot, item); |
918 | } |
919 | EXPORT_SYMBOL(radix_tree_replace_slot); |
920 | |
921 | /** |
922 | * radix_tree_iter_replace - replace item in a slot |
923 | * @root: radix tree root |
924 | * @iter: iterator state |
925 | * @slot: pointer to slot |
926 | * @item: new item to store in the slot. |
927 | * |
928 | * For use with radix_tree_for_each_slot(). |
929 | * Caller must hold tree write locked. |
930 | */ |
931 | void radix_tree_iter_replace(struct radix_tree_root *root, |
932 | const struct radix_tree_iter *iter, |
933 | void __rcu **slot, void *item) |
934 | { |
935 | __radix_tree_replace(root, node: iter->node, slot, item); |
936 | } |
937 | |
938 | static void node_tag_set(struct radix_tree_root *root, |
939 | struct radix_tree_node *node, |
940 | unsigned int tag, unsigned int offset) |
941 | { |
942 | while (node) { |
943 | if (tag_get(node, tag, offset)) |
944 | return; |
945 | tag_set(node, tag, offset); |
946 | offset = node->offset; |
947 | node = node->parent; |
948 | } |
949 | |
950 | if (!root_tag_get(root, tag)) |
951 | root_tag_set(root, tag); |
952 | } |
953 | |
954 | /** |
955 | * radix_tree_tag_set - set a tag on a radix tree node |
956 | * @root: radix tree root |
957 | * @index: index key |
958 | * @tag: tag index |
959 | * |
960 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
961 | * corresponding to @index in the radix tree. From |
962 | * the root all the way down to the leaf node. |
963 | * |
964 | * Returns the address of the tagged item. Setting a tag on a not-present |
965 | * item is a bug. |
966 | */ |
967 | void *radix_tree_tag_set(struct radix_tree_root *root, |
968 | unsigned long index, unsigned int tag) |
969 | { |
970 | struct radix_tree_node *node, *parent; |
971 | unsigned long maxindex; |
972 | |
973 | radix_tree_load_root(root, nodep: &node, maxindex: &maxindex); |
974 | BUG_ON(index > maxindex); |
975 | |
976 | while (radix_tree_is_internal_node(ptr: node)) { |
977 | unsigned offset; |
978 | |
979 | parent = entry_to_node(ptr: node); |
980 | offset = radix_tree_descend(parent, nodep: &node, index); |
981 | BUG_ON(!node); |
982 | |
983 | if (!tag_get(node: parent, tag, offset)) |
984 | tag_set(node: parent, tag, offset); |
985 | } |
986 | |
987 | /* set the root's tag bit */ |
988 | if (!root_tag_get(root, tag)) |
989 | root_tag_set(root, tag); |
990 | |
991 | return node; |
992 | } |
993 | EXPORT_SYMBOL(radix_tree_tag_set); |
994 | |
995 | static void node_tag_clear(struct radix_tree_root *root, |
996 | struct radix_tree_node *node, |
997 | unsigned int tag, unsigned int offset) |
998 | { |
999 | while (node) { |
1000 | if (!tag_get(node, tag, offset)) |
1001 | return; |
1002 | tag_clear(node, tag, offset); |
1003 | if (any_tag_set(node, tag)) |
1004 | return; |
1005 | |
1006 | offset = node->offset; |
1007 | node = node->parent; |
1008 | } |
1009 | |
1010 | /* clear the root's tag bit */ |
1011 | if (root_tag_get(root, tag)) |
1012 | root_tag_clear(root, tag); |
1013 | } |
1014 | |
1015 | /** |
1016 | * radix_tree_tag_clear - clear a tag on a radix tree node |
1017 | * @root: radix tree root |
1018 | * @index: index key |
1019 | * @tag: tag index |
1020 | * |
1021 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
1022 | * corresponding to @index in the radix tree. If this causes |
1023 | * the leaf node to have no tags set then clear the tag in the |
1024 | * next-to-leaf node, etc. |
1025 | * |
1026 | * Returns the address of the tagged item on success, else NULL. ie: |
1027 | * has the same return value and semantics as radix_tree_lookup(). |
1028 | */ |
1029 | void *radix_tree_tag_clear(struct radix_tree_root *root, |
1030 | unsigned long index, unsigned int tag) |
1031 | { |
1032 | struct radix_tree_node *node, *parent; |
1033 | unsigned long maxindex; |
1034 | int offset = 0; |
1035 | |
1036 | radix_tree_load_root(root, nodep: &node, maxindex: &maxindex); |
1037 | if (index > maxindex) |
1038 | return NULL; |
1039 | |
1040 | parent = NULL; |
1041 | |
1042 | while (radix_tree_is_internal_node(ptr: node)) { |
1043 | parent = entry_to_node(ptr: node); |
1044 | offset = radix_tree_descend(parent, nodep: &node, index); |
1045 | } |
1046 | |
1047 | if (node) |
1048 | node_tag_clear(root, node: parent, tag, offset); |
1049 | |
1050 | return node; |
1051 | } |
1052 | EXPORT_SYMBOL(radix_tree_tag_clear); |
1053 | |
1054 | /** |
1055 | * radix_tree_iter_tag_clear - clear a tag on the current iterator entry |
1056 | * @root: radix tree root |
1057 | * @iter: iterator state |
1058 | * @tag: tag to clear |
1059 | */ |
1060 | void radix_tree_iter_tag_clear(struct radix_tree_root *root, |
1061 | const struct radix_tree_iter *iter, unsigned int tag) |
1062 | { |
1063 | node_tag_clear(root, node: iter->node, tag, offset: iter_offset(iter)); |
1064 | } |
1065 | |
1066 | /** |
1067 | * radix_tree_tag_get - get a tag on a radix tree node |
1068 | * @root: radix tree root |
1069 | * @index: index key |
1070 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
1071 | * |
1072 | * Return values: |
1073 | * |
1074 | * 0: tag not present or not set |
1075 | * 1: tag set |
1076 | * |
1077 | * Note that the return value of this function may not be relied on, even if |
1078 | * the RCU lock is held, unless tag modification and node deletion are excluded |
1079 | * from concurrency. |
1080 | */ |
1081 | int radix_tree_tag_get(const struct radix_tree_root *root, |
1082 | unsigned long index, unsigned int tag) |
1083 | { |
1084 | struct radix_tree_node *node, *parent; |
1085 | unsigned long maxindex; |
1086 | |
1087 | if (!root_tag_get(root, tag)) |
1088 | return 0; |
1089 | |
1090 | radix_tree_load_root(root, nodep: &node, maxindex: &maxindex); |
1091 | if (index > maxindex) |
1092 | return 0; |
1093 | |
1094 | while (radix_tree_is_internal_node(ptr: node)) { |
1095 | unsigned offset; |
1096 | |
1097 | parent = entry_to_node(ptr: node); |
1098 | offset = radix_tree_descend(parent, nodep: &node, index); |
1099 | |
1100 | if (!tag_get(node: parent, tag, offset)) |
1101 | return 0; |
1102 | if (node == RADIX_TREE_RETRY) |
1103 | break; |
1104 | } |
1105 | |
1106 | return 1; |
1107 | } |
1108 | EXPORT_SYMBOL(radix_tree_tag_get); |
1109 | |
1110 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
1111 | static void set_iter_tags(struct radix_tree_iter *iter, |
1112 | struct radix_tree_node *node, unsigned offset, |
1113 | unsigned tag) |
1114 | { |
1115 | unsigned tag_long = offset / BITS_PER_LONG; |
1116 | unsigned tag_bit = offset % BITS_PER_LONG; |
1117 | |
1118 | if (!node) { |
1119 | iter->tags = 1; |
1120 | return; |
1121 | } |
1122 | |
1123 | iter->tags = node->tags[tag][tag_long] >> tag_bit; |
1124 | |
1125 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ |
1126 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { |
1127 | /* Pick tags from next element */ |
1128 | if (tag_bit) |
1129 | iter->tags |= node->tags[tag][tag_long + 1] << |
1130 | (BITS_PER_LONG - tag_bit); |
1131 | /* Clip chunk size, here only BITS_PER_LONG tags */ |
1132 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); |
1133 | } |
1134 | } |
1135 | |
1136 | void __rcu **radix_tree_iter_resume(void __rcu **slot, |
1137 | struct radix_tree_iter *iter) |
1138 | { |
1139 | iter->index = __radix_tree_iter_add(iter, slots: 1); |
1140 | iter->next_index = iter->index; |
1141 | iter->tags = 0; |
1142 | return NULL; |
1143 | } |
1144 | EXPORT_SYMBOL(radix_tree_iter_resume); |
1145 | |
1146 | /** |
1147 | * radix_tree_next_chunk - find next chunk of slots for iteration |
1148 | * |
1149 | * @root: radix tree root |
1150 | * @iter: iterator state |
1151 | * @flags: RADIX_TREE_ITER_* flags and tag index |
1152 | * Returns: pointer to chunk first slot, or NULL if iteration is over |
1153 | */ |
1154 | void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, |
1155 | struct radix_tree_iter *iter, unsigned flags) |
1156 | { |
1157 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
1158 | struct radix_tree_node *node, *child; |
1159 | unsigned long index, offset, maxindex; |
1160 | |
1161 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) |
1162 | return NULL; |
1163 | |
1164 | /* |
1165 | * Catch next_index overflow after ~0UL. iter->index never overflows |
1166 | * during iterating; it can be zero only at the beginning. |
1167 | * And we cannot overflow iter->next_index in a single step, |
1168 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. |
1169 | * |
1170 | * This condition also used by radix_tree_next_slot() to stop |
1171 | * contiguous iterating, and forbid switching to the next chunk. |
1172 | */ |
1173 | index = iter->next_index; |
1174 | if (!index && iter->index) |
1175 | return NULL; |
1176 | |
1177 | restart: |
1178 | radix_tree_load_root(root, nodep: &child, maxindex: &maxindex); |
1179 | if (index > maxindex) |
1180 | return NULL; |
1181 | if (!child) |
1182 | return NULL; |
1183 | |
1184 | if (!radix_tree_is_internal_node(ptr: child)) { |
1185 | /* Single-slot tree */ |
1186 | iter->index = index; |
1187 | iter->next_index = maxindex + 1; |
1188 | iter->tags = 1; |
1189 | iter->node = NULL; |
1190 | return (void __rcu **)&root->xa_head; |
1191 | } |
1192 | |
1193 | do { |
1194 | node = entry_to_node(ptr: child); |
1195 | offset = radix_tree_descend(parent: node, nodep: &child, index); |
1196 | |
1197 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
1198 | !tag_get(node, tag, offset) : !child) { |
1199 | /* Hole detected */ |
1200 | if (flags & RADIX_TREE_ITER_CONTIG) |
1201 | return NULL; |
1202 | |
1203 | if (flags & RADIX_TREE_ITER_TAGGED) |
1204 | offset = radix_tree_find_next_bit(node, tag, |
1205 | offset: offset + 1); |
1206 | else |
1207 | while (++offset < RADIX_TREE_MAP_SIZE) { |
1208 | void *slot = rcu_dereference_raw( |
1209 | node->slots[offset]); |
1210 | if (slot) |
1211 | break; |
1212 | } |
1213 | index &= ~node_maxindex(node); |
1214 | index += offset << node->shift; |
1215 | /* Overflow after ~0UL */ |
1216 | if (!index) |
1217 | return NULL; |
1218 | if (offset == RADIX_TREE_MAP_SIZE) |
1219 | goto restart; |
1220 | child = rcu_dereference_raw(node->slots[offset]); |
1221 | } |
1222 | |
1223 | if (!child) |
1224 | goto restart; |
1225 | if (child == RADIX_TREE_RETRY) |
1226 | break; |
1227 | } while (node->shift && radix_tree_is_internal_node(ptr: child)); |
1228 | |
1229 | /* Update the iterator state */ |
1230 | iter->index = (index &~ node_maxindex(node)) | offset; |
1231 | iter->next_index = (index | node_maxindex(node)) + 1; |
1232 | iter->node = node; |
1233 | |
1234 | if (flags & RADIX_TREE_ITER_TAGGED) |
1235 | set_iter_tags(iter, node, offset, tag); |
1236 | |
1237 | return node->slots + offset; |
1238 | } |
1239 | EXPORT_SYMBOL(radix_tree_next_chunk); |
1240 | |
1241 | /** |
1242 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree |
1243 | * @root: radix tree root |
1244 | * @results: where the results of the lookup are placed |
1245 | * @first_index: start the lookup from this key |
1246 | * @max_items: place up to this many items at *results |
1247 | * |
1248 | * Performs an index-ascending scan of the tree for present items. Places |
1249 | * them at *@results and returns the number of items which were placed at |
1250 | * *@results. |
1251 | * |
1252 | * The implementation is naive. |
1253 | * |
1254 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under |
1255 | * rcu_read_lock. In this case, rather than the returned results being |
1256 | * an atomic snapshot of the tree at a single point in time, the |
1257 | * semantics of an RCU protected gang lookup are as though multiple |
1258 | * radix_tree_lookups have been issued in individual locks, and results |
1259 | * stored in 'results'. |
1260 | */ |
1261 | unsigned int |
1262 | radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, |
1263 | unsigned long first_index, unsigned int max_items) |
1264 | { |
1265 | struct radix_tree_iter iter; |
1266 | void __rcu **slot; |
1267 | unsigned int ret = 0; |
1268 | |
1269 | if (unlikely(!max_items)) |
1270 | return 0; |
1271 | |
1272 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
1273 | results[ret] = rcu_dereference_raw(*slot); |
1274 | if (!results[ret]) |
1275 | continue; |
1276 | if (radix_tree_is_internal_node(ptr: results[ret])) { |
1277 | slot = radix_tree_iter_retry(iter: &iter); |
1278 | continue; |
1279 | } |
1280 | if (++ret == max_items) |
1281 | break; |
1282 | } |
1283 | |
1284 | return ret; |
1285 | } |
1286 | EXPORT_SYMBOL(radix_tree_gang_lookup); |
1287 | |
1288 | /** |
1289 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree |
1290 | * based on a tag |
1291 | * @root: radix tree root |
1292 | * @results: where the results of the lookup are placed |
1293 | * @first_index: start the lookup from this key |
1294 | * @max_items: place up to this many items at *results |
1295 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1296 | * |
1297 | * Performs an index-ascending scan of the tree for present items which |
1298 | * have the tag indexed by @tag set. Places the items at *@results and |
1299 | * returns the number of items which were placed at *@results. |
1300 | */ |
1301 | unsigned int |
1302 | radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, |
1303 | unsigned long first_index, unsigned int max_items, |
1304 | unsigned int tag) |
1305 | { |
1306 | struct radix_tree_iter iter; |
1307 | void __rcu **slot; |
1308 | unsigned int ret = 0; |
1309 | |
1310 | if (unlikely(!max_items)) |
1311 | return 0; |
1312 | |
1313 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1314 | results[ret] = rcu_dereference_raw(*slot); |
1315 | if (!results[ret]) |
1316 | continue; |
1317 | if (radix_tree_is_internal_node(ptr: results[ret])) { |
1318 | slot = radix_tree_iter_retry(iter: &iter); |
1319 | continue; |
1320 | } |
1321 | if (++ret == max_items) |
1322 | break; |
1323 | } |
1324 | |
1325 | return ret; |
1326 | } |
1327 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
1328 | |
1329 | /** |
1330 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a |
1331 | * radix tree based on a tag |
1332 | * @root: radix tree root |
1333 | * @results: where the results of the lookup are placed |
1334 | * @first_index: start the lookup from this key |
1335 | * @max_items: place up to this many items at *results |
1336 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1337 | * |
1338 | * Performs an index-ascending scan of the tree for present items which |
1339 | * have the tag indexed by @tag set. Places the slots at *@results and |
1340 | * returns the number of slots which were placed at *@results. |
1341 | */ |
1342 | unsigned int |
1343 | radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, |
1344 | void __rcu ***results, unsigned long first_index, |
1345 | unsigned int max_items, unsigned int tag) |
1346 | { |
1347 | struct radix_tree_iter iter; |
1348 | void __rcu **slot; |
1349 | unsigned int ret = 0; |
1350 | |
1351 | if (unlikely(!max_items)) |
1352 | return 0; |
1353 | |
1354 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1355 | results[ret] = slot; |
1356 | if (++ret == max_items) |
1357 | break; |
1358 | } |
1359 | |
1360 | return ret; |
1361 | } |
1362 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); |
1363 | |
1364 | static bool __radix_tree_delete(struct radix_tree_root *root, |
1365 | struct radix_tree_node *node, void __rcu **slot) |
1366 | { |
1367 | void *old = rcu_dereference_raw(*slot); |
1368 | int values = xa_is_value(entry: old) ? -1 : 0; |
1369 | unsigned offset = get_slot_offset(parent: node, slot); |
1370 | int tag; |
1371 | |
1372 | if (is_idr(root)) |
1373 | node_tag_set(root, node, IDR_FREE, offset); |
1374 | else |
1375 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
1376 | node_tag_clear(root, node, tag, offset); |
1377 | |
1378 | replace_slot(slot, NULL, node, count: -1, values); |
1379 | return node && delete_node(root, node); |
1380 | } |
1381 | |
1382 | /** |
1383 | * radix_tree_iter_delete - delete the entry at this iterator position |
1384 | * @root: radix tree root |
1385 | * @iter: iterator state |
1386 | * @slot: pointer to slot |
1387 | * |
1388 | * Delete the entry at the position currently pointed to by the iterator. |
1389 | * This may result in the current node being freed; if it is, the iterator |
1390 | * is advanced so that it will not reference the freed memory. This |
1391 | * function may be called without any locking if there are no other threads |
1392 | * which can access this tree. |
1393 | */ |
1394 | void radix_tree_iter_delete(struct radix_tree_root *root, |
1395 | struct radix_tree_iter *iter, void __rcu **slot) |
1396 | { |
1397 | if (__radix_tree_delete(root, node: iter->node, slot)) |
1398 | iter->index = iter->next_index; |
1399 | } |
1400 | EXPORT_SYMBOL(radix_tree_iter_delete); |
1401 | |
1402 | /** |
1403 | * radix_tree_delete_item - delete an item from a radix tree |
1404 | * @root: radix tree root |
1405 | * @index: index key |
1406 | * @item: expected item |
1407 | * |
1408 | * Remove @item at @index from the radix tree rooted at @root. |
1409 | * |
1410 | * Return: the deleted entry, or %NULL if it was not present |
1411 | * or the entry at the given @index was not @item. |
1412 | */ |
1413 | void *radix_tree_delete_item(struct radix_tree_root *root, |
1414 | unsigned long index, void *item) |
1415 | { |
1416 | struct radix_tree_node *node = NULL; |
1417 | void __rcu **slot = NULL; |
1418 | void *entry; |
1419 | |
1420 | entry = __radix_tree_lookup(root, index, nodep: &node, slotp: &slot); |
1421 | if (!slot) |
1422 | return NULL; |
1423 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
1424 | offset: get_slot_offset(parent: node, slot)))) |
1425 | return NULL; |
1426 | |
1427 | if (item && entry != item) |
1428 | return NULL; |
1429 | |
1430 | __radix_tree_delete(root, node, slot); |
1431 | |
1432 | return entry; |
1433 | } |
1434 | EXPORT_SYMBOL(radix_tree_delete_item); |
1435 | |
1436 | /** |
1437 | * radix_tree_delete - delete an entry from a radix tree |
1438 | * @root: radix tree root |
1439 | * @index: index key |
1440 | * |
1441 | * Remove the entry at @index from the radix tree rooted at @root. |
1442 | * |
1443 | * Return: The deleted entry, or %NULL if it was not present. |
1444 | */ |
1445 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) |
1446 | { |
1447 | return radix_tree_delete_item(root, index, NULL); |
1448 | } |
1449 | EXPORT_SYMBOL(radix_tree_delete); |
1450 | |
1451 | /** |
1452 | * radix_tree_tagged - test whether any items in the tree are tagged |
1453 | * @root: radix tree root |
1454 | * @tag: tag to test |
1455 | */ |
1456 | int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) |
1457 | { |
1458 | return root_tag_get(root, tag); |
1459 | } |
1460 | EXPORT_SYMBOL(radix_tree_tagged); |
1461 | |
1462 | /** |
1463 | * idr_preload - preload for idr_alloc() |
1464 | * @gfp_mask: allocation mask to use for preloading |
1465 | * |
1466 | * Preallocate memory to use for the next call to idr_alloc(). This function |
1467 | * returns with preemption disabled. It will be enabled by idr_preload_end(). |
1468 | */ |
1469 | void idr_preload(gfp_t gfp_mask) |
1470 | { |
1471 | if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) |
1472 | local_lock(&radix_tree_preloads.lock); |
1473 | } |
1474 | EXPORT_SYMBOL(idr_preload); |
1475 | |
1476 | void __rcu **idr_get_free(struct radix_tree_root *root, |
1477 | struct radix_tree_iter *iter, gfp_t gfp, |
1478 | unsigned long max) |
1479 | { |
1480 | struct radix_tree_node *node = NULL, *child; |
1481 | void __rcu **slot = (void __rcu **)&root->xa_head; |
1482 | unsigned long maxindex, start = iter->next_index; |
1483 | unsigned int shift, offset = 0; |
1484 | |
1485 | grow: |
1486 | shift = radix_tree_load_root(root, nodep: &child, maxindex: &maxindex); |
1487 | if (!radix_tree_tagged(root, IDR_FREE)) |
1488 | start = max(start, maxindex + 1); |
1489 | if (start > max) |
1490 | return ERR_PTR(error: -ENOSPC); |
1491 | |
1492 | if (start > maxindex) { |
1493 | int error = radix_tree_extend(root, gfp, index: start, shift); |
1494 | if (error < 0) |
1495 | return ERR_PTR(error); |
1496 | shift = error; |
1497 | child = rcu_dereference_raw(root->xa_head); |
1498 | } |
1499 | if (start == 0 && shift == 0) |
1500 | shift = RADIX_TREE_MAP_SHIFT; |
1501 | |
1502 | while (shift) { |
1503 | shift -= RADIX_TREE_MAP_SHIFT; |
1504 | if (child == NULL) { |
1505 | /* Have to add a child node. */ |
1506 | child = radix_tree_node_alloc(gfp_mask: gfp, parent: node, root, shift, |
1507 | offset, count: 0, nr_values: 0); |
1508 | if (!child) |
1509 | return ERR_PTR(error: -ENOMEM); |
1510 | all_tag_set(node: child, IDR_FREE); |
1511 | rcu_assign_pointer(*slot, node_to_entry(child)); |
1512 | if (node) |
1513 | node->count++; |
1514 | } else if (!radix_tree_is_internal_node(ptr: child)) |
1515 | break; |
1516 | |
1517 | node = entry_to_node(ptr: child); |
1518 | offset = radix_tree_descend(parent: node, nodep: &child, index: start); |
1519 | if (!tag_get(node, IDR_FREE, offset)) { |
1520 | offset = radix_tree_find_next_bit(node, IDR_FREE, |
1521 | offset: offset + 1); |
1522 | start = next_index(index: start, node, offset); |
1523 | if (start > max || start == 0) |
1524 | return ERR_PTR(error: -ENOSPC); |
1525 | while (offset == RADIX_TREE_MAP_SIZE) { |
1526 | offset = node->offset + 1; |
1527 | node = node->parent; |
1528 | if (!node) |
1529 | goto grow; |
1530 | shift = node->shift; |
1531 | } |
1532 | child = rcu_dereference_raw(node->slots[offset]); |
1533 | } |
1534 | slot = &node->slots[offset]; |
1535 | } |
1536 | |
1537 | iter->index = start; |
1538 | if (node) |
1539 | iter->next_index = 1 + min(max, (start | node_maxindex(node))); |
1540 | else |
1541 | iter->next_index = 1; |
1542 | iter->node = node; |
1543 | set_iter_tags(iter, node, offset, IDR_FREE); |
1544 | |
1545 | return slot; |
1546 | } |
1547 | |
1548 | /** |
1549 | * idr_destroy - release all internal memory from an IDR |
1550 | * @idr: idr handle |
1551 | * |
1552 | * After this function is called, the IDR is empty, and may be reused or |
1553 | * the data structure containing it may be freed. |
1554 | * |
1555 | * A typical clean-up sequence for objects stored in an idr tree will use |
1556 | * idr_for_each() to free all objects, if necessary, then idr_destroy() to |
1557 | * free the memory used to keep track of those objects. |
1558 | */ |
1559 | void idr_destroy(struct idr *idr) |
1560 | { |
1561 | struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); |
1562 | if (radix_tree_is_internal_node(ptr: node)) |
1563 | radix_tree_free_nodes(node); |
1564 | idr->idr_rt.xa_head = NULL; |
1565 | root_tag_set(root: &idr->idr_rt, IDR_FREE); |
1566 | } |
1567 | EXPORT_SYMBOL(idr_destroy); |
1568 | |
1569 | static void |
1570 | radix_tree_node_ctor(void *arg) |
1571 | { |
1572 | struct radix_tree_node *node = arg; |
1573 | |
1574 | memset(node, 0, sizeof(*node)); |
1575 | INIT_LIST_HEAD(list: &node->private_list); |
1576 | } |
1577 | |
1578 | static int radix_tree_cpu_dead(unsigned int cpu) |
1579 | { |
1580 | struct radix_tree_preload *rtp; |
1581 | struct radix_tree_node *node; |
1582 | |
1583 | /* Free per-cpu pool of preloaded nodes */ |
1584 | rtp = &per_cpu(radix_tree_preloads, cpu); |
1585 | while (rtp->nr) { |
1586 | node = rtp->nodes; |
1587 | rtp->nodes = node->parent; |
1588 | kmem_cache_free(s: radix_tree_node_cachep, objp: node); |
1589 | rtp->nr--; |
1590 | } |
1591 | return 0; |
1592 | } |
1593 | |
1594 | void __init radix_tree_init(void) |
1595 | { |
1596 | int ret; |
1597 | |
1598 | BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); |
1599 | BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); |
1600 | BUILD_BUG_ON(XA_CHUNK_SIZE > 255); |
1601 | radix_tree_node_cachep = kmem_cache_create(name: "radix_tree_node", |
1602 | size: sizeof(struct radix_tree_node), align: 0, |
1603 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1604 | ctor: radix_tree_node_ctor); |
1605 | ret = cpuhp_setup_state_nocalls(state: CPUHP_RADIX_DEAD, name: "lib/radix:dead", |
1606 | NULL, teardown: radix_tree_cpu_dead); |
1607 | WARN_ON(ret < 0); |
1608 | } |
1609 |
Definitions
- radix_tree_node_cachep
- radix_tree_preloads
- entry_to_node
- node_to_entry
- get_slot_offset
- radix_tree_descend
- root_gfp_mask
- tag_set
- tag_clear
- tag_get
- root_tag_set
- root_tag_clear
- root_tag_clear_all
- root_tag_get
- root_tags_get
- is_idr
- any_tag_set
- all_tag_set
- radix_tree_find_next_bit
- iter_offset
- shift_maxindex
- node_maxindex
- next_index
- radix_tree_node_alloc
- radix_tree_node_rcu_free
- radix_tree_node_free
- __radix_tree_preload
- radix_tree_preload
- radix_tree_maybe_preload
- radix_tree_load_root
- radix_tree_extend
- radix_tree_shrink
- delete_node
- __radix_tree_create
- radix_tree_free_nodes
- insert_entries
- radix_tree_insert
- __radix_tree_lookup
- radix_tree_lookup_slot
- radix_tree_lookup
- replace_slot
- node_tag_get
- calculate_count
- __radix_tree_replace
- radix_tree_replace_slot
- radix_tree_iter_replace
- node_tag_set
- radix_tree_tag_set
- node_tag_clear
- radix_tree_tag_clear
- radix_tree_iter_tag_clear
- radix_tree_tag_get
- set_iter_tags
- radix_tree_iter_resume
- radix_tree_next_chunk
- radix_tree_gang_lookup
- radix_tree_gang_lookup_tag
- radix_tree_gang_lookup_tag_slot
- __radix_tree_delete
- radix_tree_iter_delete
- radix_tree_delete_item
- radix_tree_delete
- radix_tree_tagged
- idr_preload
- idr_get_free
- idr_destroy
- radix_tree_node_ctor
- radix_tree_cpu_dead
Improve your Profiling and Debugging skills
Find out more