1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | Red Black Trees |
4 | (C) 1999 Andrea Arcangeli <andrea@suse.de> |
5 | |
6 | |
7 | linux/include/linux/rbtree.h |
8 | |
9 | To use rbtrees you'll have to implement your own insert and search cores. |
10 | This will avoid us to use callbacks and to drop drammatically performances. |
11 | I know it's not the cleaner way, but in C (not in C++) to get |
12 | performances and genericity... |
13 | |
14 | See Documentation/core-api/rbtree.rst for documentation and samples. |
15 | */ |
16 | |
17 | #ifndef _LINUX_RBTREE_H |
18 | #define _LINUX_RBTREE_H |
19 | |
20 | #include <linux/container_of.h> |
21 | #include <linux/rbtree_types.h> |
22 | |
23 | #include <linux/stddef.h> |
24 | #include <linux/rcupdate.h> |
25 | |
26 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
27 | |
28 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
29 | |
30 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
31 | |
32 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
33 | #define RB_EMPTY_NODE(node) \ |
34 | ((node)->__rb_parent_color == (unsigned long)(node)) |
35 | #define RB_CLEAR_NODE(node) \ |
36 | ((node)->__rb_parent_color = (unsigned long)(node)) |
37 | |
38 | |
39 | extern void rb_insert_color(struct rb_node *, struct rb_root *); |
40 | extern void rb_erase(struct rb_node *, struct rb_root *); |
41 | |
42 | |
43 | /* Find logical next and previous nodes in a tree */ |
44 | extern struct rb_node *rb_next(const struct rb_node *); |
45 | extern struct rb_node *rb_prev(const struct rb_node *); |
46 | extern struct rb_node *rb_first(const struct rb_root *); |
47 | extern struct rb_node *rb_last(const struct rb_root *); |
48 | |
49 | /* Postorder iteration - always visit the parent after its children */ |
50 | extern struct rb_node *rb_first_postorder(const struct rb_root *); |
51 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
52 | |
53 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
54 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
55 | struct rb_root *root); |
56 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
57 | struct rb_root *root); |
58 | |
59 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
60 | struct rb_node **rb_link) |
61 | { |
62 | node->__rb_parent_color = (unsigned long)parent; |
63 | node->rb_left = node->rb_right = NULL; |
64 | |
65 | *rb_link = node; |
66 | } |
67 | |
68 | static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, |
69 | struct rb_node **rb_link) |
70 | { |
71 | node->__rb_parent_color = (unsigned long)parent; |
72 | node->rb_left = node->rb_right = NULL; |
73 | |
74 | rcu_assign_pointer(*rb_link, node); |
75 | } |
76 | |
77 | #define rb_entry_safe(ptr, type, member) \ |
78 | ({ typeof(ptr) ____ptr = (ptr); \ |
79 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ |
80 | }) |
81 | |
82 | /** |
83 | * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of |
84 | * given type allowing the backing memory of @pos to be invalidated |
85 | * |
86 | * @pos: the 'type *' to use as a loop cursor. |
87 | * @n: another 'type *' to use as temporary storage |
88 | * @root: 'rb_root *' of the rbtree. |
89 | * @field: the name of the rb_node field within 'type'. |
90 | * |
91 | * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as |
92 | * list_for_each_entry_safe() and allows the iteration to continue independent |
93 | * of changes to @pos by the body of the loop. |
94 | * |
95 | * Note, however, that it cannot handle other modifications that re-order the |
96 | * rbtree it is iterating over. This includes calling rb_erase() on @pos, as |
97 | * rb_erase() may rebalance the tree, causing us to miss some nodes. |
98 | */ |
99 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ |
100 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
101 | pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ |
102 | typeof(*pos), field); 1; }); \ |
103 | pos = n) |
104 | |
105 | /* Same as rb_first(), but O(1) */ |
106 | #define rb_first_cached(root) (root)->rb_leftmost |
107 | |
108 | static inline void rb_insert_color_cached(struct rb_node *node, |
109 | struct rb_root_cached *root, |
110 | bool leftmost) |
111 | { |
112 | if (leftmost) |
113 | root->rb_leftmost = node; |
114 | rb_insert_color(node, &root->rb_root); |
115 | } |
116 | |
117 | |
118 | static inline struct rb_node * |
119 | rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) |
120 | { |
121 | struct rb_node *leftmost = NULL; |
122 | |
123 | if (root->rb_leftmost == node) |
124 | leftmost = root->rb_leftmost = rb_next(node); |
125 | |
126 | rb_erase(node, &root->rb_root); |
127 | |
128 | return leftmost; |
129 | } |
130 | |
131 | static inline void rb_replace_node_cached(struct rb_node *victim, |
132 | struct rb_node *new, |
133 | struct rb_root_cached *root) |
134 | { |
135 | if (root->rb_leftmost == victim) |
136 | root->rb_leftmost = new; |
137 | rb_replace_node(victim, new, root: &root->rb_root); |
138 | } |
139 | |
140 | /* |
141 | * The below helper functions use 2 operators with 3 different |
142 | * calling conventions. The operators are related like: |
143 | * |
144 | * comp(a->key,b) < 0 := less(a,b) |
145 | * comp(a->key,b) > 0 := less(b,a) |
146 | * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) |
147 | * |
148 | * If these operators define a partial order on the elements we make no |
149 | * guarantee on which of the elements matching the key is found. See |
150 | * rb_find(). |
151 | * |
152 | * The reason for this is to allow the find() interface without requiring an |
153 | * on-stack dummy object, which might not be feasible due to object size. |
154 | */ |
155 | |
156 | /** |
157 | * rb_add_cached() - insert @node into the leftmost cached tree @tree |
158 | * @node: node to insert |
159 | * @tree: leftmost cached tree to insert @node into |
160 | * @less: operator defining the (partial) node order |
161 | * |
162 | * Returns @node when it is the new leftmost, or NULL. |
163 | */ |
164 | static __always_inline struct rb_node * |
165 | rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, |
166 | bool (*less)(struct rb_node *, const struct rb_node *)) |
167 | { |
168 | struct rb_node **link = &tree->rb_root.rb_node; |
169 | struct rb_node *parent = NULL; |
170 | bool leftmost = true; |
171 | |
172 | while (*link) { |
173 | parent = *link; |
174 | if (less(node, parent)) { |
175 | link = &parent->rb_left; |
176 | } else { |
177 | link = &parent->rb_right; |
178 | leftmost = false; |
179 | } |
180 | } |
181 | |
182 | rb_link_node(node, parent, rb_link: link); |
183 | rb_insert_color_cached(node, root: tree, leftmost); |
184 | |
185 | return leftmost ? node : NULL; |
186 | } |
187 | |
188 | /** |
189 | * rb_add() - insert @node into @tree |
190 | * @node: node to insert |
191 | * @tree: tree to insert @node into |
192 | * @less: operator defining the (partial) node order |
193 | */ |
194 | static __always_inline void |
195 | rb_add(struct rb_node *node, struct rb_root *tree, |
196 | bool (*less)(struct rb_node *, const struct rb_node *)) |
197 | { |
198 | struct rb_node **link = &tree->rb_node; |
199 | struct rb_node *parent = NULL; |
200 | |
201 | while (*link) { |
202 | parent = *link; |
203 | if (less(node, parent)) |
204 | link = &parent->rb_left; |
205 | else |
206 | link = &parent->rb_right; |
207 | } |
208 | |
209 | rb_link_node(node, parent, rb_link: link); |
210 | rb_insert_color(node, tree); |
211 | } |
212 | |
213 | /** |
214 | * rb_find_add() - find equivalent @node in @tree, or add @node |
215 | * @node: node to look-for / insert |
216 | * @tree: tree to search / modify |
217 | * @cmp: operator defining the node order |
218 | * |
219 | * Returns the rb_node matching @node, or NULL when no match is found and @node |
220 | * is inserted. |
221 | */ |
222 | static __always_inline struct rb_node * |
223 | rb_find_add(struct rb_node *node, struct rb_root *tree, |
224 | int (*cmp)(struct rb_node *, const struct rb_node *)) |
225 | { |
226 | struct rb_node **link = &tree->rb_node; |
227 | struct rb_node *parent = NULL; |
228 | int c; |
229 | |
230 | while (*link) { |
231 | parent = *link; |
232 | c = cmp(node, parent); |
233 | |
234 | if (c < 0) |
235 | link = &parent->rb_left; |
236 | else if (c > 0) |
237 | link = &parent->rb_right; |
238 | else |
239 | return parent; |
240 | } |
241 | |
242 | rb_link_node(node, parent, rb_link: link); |
243 | rb_insert_color(node, tree); |
244 | return NULL; |
245 | } |
246 | |
247 | /** |
248 | * rb_find() - find @key in tree @tree |
249 | * @key: key to match |
250 | * @tree: tree to search |
251 | * @cmp: operator defining the node order |
252 | * |
253 | * Returns the rb_node matching @key or NULL. |
254 | */ |
255 | static __always_inline struct rb_node * |
256 | rb_find(const void *key, const struct rb_root *tree, |
257 | int (*cmp)(const void *key, const struct rb_node *)) |
258 | { |
259 | struct rb_node *node = tree->rb_node; |
260 | |
261 | while (node) { |
262 | int c = cmp(key, node); |
263 | |
264 | if (c < 0) |
265 | node = node->rb_left; |
266 | else if (c > 0) |
267 | node = node->rb_right; |
268 | else |
269 | return node; |
270 | } |
271 | |
272 | return NULL; |
273 | } |
274 | |
275 | /** |
276 | * rb_find_first() - find the first @key in @tree |
277 | * @key: key to match |
278 | * @tree: tree to search |
279 | * @cmp: operator defining node order |
280 | * |
281 | * Returns the leftmost node matching @key, or NULL. |
282 | */ |
283 | static __always_inline struct rb_node * |
284 | rb_find_first(const void *key, const struct rb_root *tree, |
285 | int (*cmp)(const void *key, const struct rb_node *)) |
286 | { |
287 | struct rb_node *node = tree->rb_node; |
288 | struct rb_node *match = NULL; |
289 | |
290 | while (node) { |
291 | int c = cmp(key, node); |
292 | |
293 | if (c <= 0) { |
294 | if (!c) |
295 | match = node; |
296 | node = node->rb_left; |
297 | } else if (c > 0) { |
298 | node = node->rb_right; |
299 | } |
300 | } |
301 | |
302 | return match; |
303 | } |
304 | |
305 | /** |
306 | * rb_next_match() - find the next @key in @tree |
307 | * @key: key to match |
308 | * @tree: tree to search |
309 | * @cmp: operator defining node order |
310 | * |
311 | * Returns the next node matching @key, or NULL. |
312 | */ |
313 | static __always_inline struct rb_node * |
314 | rb_next_match(const void *key, struct rb_node *node, |
315 | int (*cmp)(const void *key, const struct rb_node *)) |
316 | { |
317 | node = rb_next(node); |
318 | if (node && cmp(key, node)) |
319 | node = NULL; |
320 | return node; |
321 | } |
322 | |
323 | /** |
324 | * rb_for_each() - iterates a subtree matching @key |
325 | * @node: iterator |
326 | * @key: key to match |
327 | * @tree: tree to search |
328 | * @cmp: operator defining node order |
329 | */ |
330 | #define rb_for_each(node, key, tree, cmp) \ |
331 | for ((node) = rb_find_first((key), (tree), (cmp)); \ |
332 | (node); (node) = rb_next_match((key), (node), (cmp))) |
333 | |
334 | #endif /* _LINUX_RBTREE_H */ |
335 | |