1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_RCULIST_H |
3 | #define _LINUX_RCULIST_H |
4 | |
5 | #ifdef __KERNEL__ |
6 | |
7 | /* |
8 | * RCU-protected list version |
9 | */ |
10 | #include <linux/list.h> |
11 | #include <linux/rcupdate.h> |
12 | |
13 | /* |
14 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers |
15 | * @list: list to be initialized |
16 | * |
17 | * You should instead use INIT_LIST_HEAD() for normal initialization and |
18 | * cleanup tasks, when readers have no access to the list being initialized. |
19 | * However, if the list being initialized is visible to readers, you |
20 | * need to keep the compiler from being too mischievous. |
21 | */ |
22 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) |
23 | { |
24 | WRITE_ONCE(list->next, list); |
25 | WRITE_ONCE(list->prev, list); |
26 | } |
27 | |
28 | /* |
29 | * return the ->next pointer of a list_head in an rcu safe |
30 | * way, we must not access it directly |
31 | */ |
32 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) |
33 | |
34 | /** |
35 | * list_tail_rcu - returns the prev pointer of the head of the list |
36 | * @head: the head of the list |
37 | * |
38 | * Note: This should only be used with the list header, and even then |
39 | * only if list_del() and similar primitives are not also used on the |
40 | * list header. |
41 | */ |
42 | #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) |
43 | |
44 | /* |
45 | * Check during list traversal that we are within an RCU reader |
46 | */ |
47 | |
48 | #define check_arg_count_one(dummy) |
49 | |
50 | #ifdef CONFIG_PROVE_RCU_LIST |
51 | #define __list_check_rcu(dummy, cond, extra...) \ |
52 | ({ \ |
53 | check_arg_count_one(extra); \ |
54 | RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ |
55 | "RCU-list traversed in non-reader section!"); \ |
56 | }) |
57 | |
58 | #define __list_check_srcu(cond) \ |
59 | ({ \ |
60 | RCU_LOCKDEP_WARN(!(cond), \ |
61 | "RCU-list traversed without holding the required lock!");\ |
62 | }) |
63 | #else |
64 | #define __list_check_rcu(dummy, cond, extra...) \ |
65 | ({ check_arg_count_one(extra); }) |
66 | |
67 | #define __list_check_srcu(cond) ({ }) |
68 | #endif |
69 | |
70 | /* |
71 | * Insert a new entry between two known consecutive entries. |
72 | * |
73 | * This is only for internal list manipulation where we know |
74 | * the prev/next entries already! |
75 | */ |
76 | static inline void __list_add_rcu(struct list_head *new, |
77 | struct list_head *prev, struct list_head *next) |
78 | { |
79 | if (!__list_add_valid(new, prev, next)) |
80 | return; |
81 | |
82 | new->next = next; |
83 | new->prev = prev; |
84 | rcu_assign_pointer(list_next_rcu(prev), new); |
85 | next->prev = new; |
86 | } |
87 | |
88 | /** |
89 | * list_add_rcu - add a new entry to rcu-protected list |
90 | * @new: new entry to be added |
91 | * @head: list head to add it after |
92 | * |
93 | * Insert a new entry after the specified head. |
94 | * This is good for implementing stacks. |
95 | * |
96 | * The caller must take whatever precautions are necessary |
97 | * (such as holding appropriate locks) to avoid racing |
98 | * with another list-mutation primitive, such as list_add_rcu() |
99 | * or list_del_rcu(), running on this same list. |
100 | * However, it is perfectly legal to run concurrently with |
101 | * the _rcu list-traversal primitives, such as |
102 | * list_for_each_entry_rcu(). |
103 | */ |
104 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) |
105 | { |
106 | __list_add_rcu(new, prev: head, next: head->next); |
107 | } |
108 | |
109 | /** |
110 | * list_add_tail_rcu - add a new entry to rcu-protected list |
111 | * @new: new entry to be added |
112 | * @head: list head to add it before |
113 | * |
114 | * Insert a new entry before the specified head. |
115 | * This is useful for implementing queues. |
116 | * |
117 | * The caller must take whatever precautions are necessary |
118 | * (such as holding appropriate locks) to avoid racing |
119 | * with another list-mutation primitive, such as list_add_tail_rcu() |
120 | * or list_del_rcu(), running on this same list. |
121 | * However, it is perfectly legal to run concurrently with |
122 | * the _rcu list-traversal primitives, such as |
123 | * list_for_each_entry_rcu(). |
124 | */ |
125 | static inline void list_add_tail_rcu(struct list_head *new, |
126 | struct list_head *head) |
127 | { |
128 | __list_add_rcu(new, prev: head->prev, next: head); |
129 | } |
130 | |
131 | /** |
132 | * list_del_rcu - deletes entry from list without re-initialization |
133 | * @entry: the element to delete from the list. |
134 | * |
135 | * Note: list_empty() on entry does not return true after this, |
136 | * the entry is in an undefined state. It is useful for RCU based |
137 | * lockfree traversal. |
138 | * |
139 | * In particular, it means that we can not poison the forward |
140 | * pointers that may still be used for walking the list. |
141 | * |
142 | * The caller must take whatever precautions are necessary |
143 | * (such as holding appropriate locks) to avoid racing |
144 | * with another list-mutation primitive, such as list_del_rcu() |
145 | * or list_add_rcu(), running on this same list. |
146 | * However, it is perfectly legal to run concurrently with |
147 | * the _rcu list-traversal primitives, such as |
148 | * list_for_each_entry_rcu(). |
149 | * |
150 | * Note that the caller is not permitted to immediately free |
151 | * the newly deleted entry. Instead, either synchronize_rcu() |
152 | * or call_rcu() must be used to defer freeing until an RCU |
153 | * grace period has elapsed. |
154 | */ |
155 | static inline void list_del_rcu(struct list_head *entry) |
156 | { |
157 | __list_del_entry(entry); |
158 | entry->prev = LIST_POISON2; |
159 | } |
160 | |
161 | /** |
162 | * hlist_del_init_rcu - deletes entry from hash list with re-initialization |
163 | * @n: the element to delete from the hash list. |
164 | * |
165 | * Note: list_unhashed() on the node return true after this. It is |
166 | * useful for RCU based read lockfree traversal if the writer side |
167 | * must know if the list entry is still hashed or already unhashed. |
168 | * |
169 | * In particular, it means that we can not poison the forward pointers |
170 | * that may still be used for walking the hash list and we can only |
171 | * zero the pprev pointer so list_unhashed() will return true after |
172 | * this. |
173 | * |
174 | * The caller must take whatever precautions are necessary (such as |
175 | * holding appropriate locks) to avoid racing with another |
176 | * list-mutation primitive, such as hlist_add_head_rcu() or |
177 | * hlist_del_rcu(), running on this same list. However, it is |
178 | * perfectly legal to run concurrently with the _rcu list-traversal |
179 | * primitives, such as hlist_for_each_entry_rcu(). |
180 | */ |
181 | static inline void hlist_del_init_rcu(struct hlist_node *n) |
182 | { |
183 | if (!hlist_unhashed(h: n)) { |
184 | __hlist_del(n); |
185 | WRITE_ONCE(n->pprev, NULL); |
186 | } |
187 | } |
188 | |
189 | /** |
190 | * list_replace_rcu - replace old entry by new one |
191 | * @old : the element to be replaced |
192 | * @new : the new element to insert |
193 | * |
194 | * The @old entry will be replaced with the @new entry atomically. |
195 | * Note: @old should not be empty. |
196 | */ |
197 | static inline void list_replace_rcu(struct list_head *old, |
198 | struct list_head *new) |
199 | { |
200 | new->next = old->next; |
201 | new->prev = old->prev; |
202 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
203 | new->next->prev = new; |
204 | old->prev = LIST_POISON2; |
205 | } |
206 | |
207 | /** |
208 | * __list_splice_init_rcu - join an RCU-protected list into an existing list. |
209 | * @list: the RCU-protected list to splice |
210 | * @prev: points to the last element of the existing list |
211 | * @next: points to the first element of the existing list |
212 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
213 | * |
214 | * The list pointed to by @prev and @next can be RCU-read traversed |
215 | * concurrently with this function. |
216 | * |
217 | * Note that this function blocks. |
218 | * |
219 | * Important note: the caller must take whatever action is necessary to prevent |
220 | * any other updates to the existing list. In principle, it is possible to |
221 | * modify the list as soon as sync() begins execution. If this sort of thing |
222 | * becomes necessary, an alternative version based on call_rcu() could be |
223 | * created. But only if -really- needed -- there is no shortage of RCU API |
224 | * members. |
225 | */ |
226 | static inline void __list_splice_init_rcu(struct list_head *list, |
227 | struct list_head *prev, |
228 | struct list_head *next, |
229 | void (*sync)(void)) |
230 | { |
231 | struct list_head *first = list->next; |
232 | struct list_head *last = list->prev; |
233 | |
234 | /* |
235 | * "first" and "last" tracking list, so initialize it. RCU readers |
236 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() |
237 | * instead of INIT_LIST_HEAD(). |
238 | */ |
239 | |
240 | INIT_LIST_HEAD_RCU(list); |
241 | |
242 | /* |
243 | * At this point, the list body still points to the source list. |
244 | * Wait for any readers to finish using the list before splicing |
245 | * the list body into the new list. Any new readers will see |
246 | * an empty list. |
247 | */ |
248 | |
249 | sync(); |
250 | ASSERT_EXCLUSIVE_ACCESS(*first); |
251 | ASSERT_EXCLUSIVE_ACCESS(*last); |
252 | |
253 | /* |
254 | * Readers are finished with the source list, so perform splice. |
255 | * The order is important if the new list is global and accessible |
256 | * to concurrent RCU readers. Note that RCU readers are not |
257 | * permitted to traverse the prev pointers without excluding |
258 | * this function. |
259 | */ |
260 | |
261 | last->next = next; |
262 | rcu_assign_pointer(list_next_rcu(prev), first); |
263 | first->prev = prev; |
264 | next->prev = last; |
265 | } |
266 | |
267 | /** |
268 | * list_splice_init_rcu - splice an RCU-protected list into an existing list, |
269 | * designed for stacks. |
270 | * @list: the RCU-protected list to splice |
271 | * @head: the place in the existing list to splice the first list into |
272 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
273 | */ |
274 | static inline void list_splice_init_rcu(struct list_head *list, |
275 | struct list_head *head, |
276 | void (*sync)(void)) |
277 | { |
278 | if (!list_empty(head: list)) |
279 | __list_splice_init_rcu(list, prev: head, next: head->next, sync); |
280 | } |
281 | |
282 | /** |
283 | * list_splice_tail_init_rcu - splice an RCU-protected list into an existing |
284 | * list, designed for queues. |
285 | * @list: the RCU-protected list to splice |
286 | * @head: the place in the existing list to splice the first list into |
287 | * @sync: synchronize_rcu, synchronize_rcu_expedited, ... |
288 | */ |
289 | static inline void list_splice_tail_init_rcu(struct list_head *list, |
290 | struct list_head *head, |
291 | void (*sync)(void)) |
292 | { |
293 | if (!list_empty(head: list)) |
294 | __list_splice_init_rcu(list, prev: head->prev, next: head, sync); |
295 | } |
296 | |
297 | /** |
298 | * list_entry_rcu - get the struct for this entry |
299 | * @ptr: the &struct list_head pointer. |
300 | * @type: the type of the struct this is embedded in. |
301 | * @member: the name of the list_head within the struct. |
302 | * |
303 | * This primitive may safely run concurrently with the _rcu list-mutation |
304 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
305 | */ |
306 | #define list_entry_rcu(ptr, type, member) \ |
307 | container_of(READ_ONCE(ptr), type, member) |
308 | |
309 | /* |
310 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
311 | * |
312 | * They do not exist because they would lead to subtle race conditions: |
313 | * |
314 | * if (!list_empty_rcu(mylist)) { |
315 | * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); |
316 | * do_something(bar); |
317 | * } |
318 | * |
319 | * The list might be non-empty when list_empty_rcu() checks it, but it |
320 | * might have become empty by the time that list_first_entry_rcu() rereads |
321 | * the ->next pointer, which would result in a SEGV. |
322 | * |
323 | * When not using RCU, it is OK for list_first_entry() to re-read that |
324 | * pointer because both functions should be protected by some lock that |
325 | * blocks writers. |
326 | * |
327 | * When using RCU, list_empty() uses READ_ONCE() to fetch the |
328 | * RCU-protected ->next pointer and then compares it to the address of the |
329 | * list head. However, it neither dereferences this pointer nor provides |
330 | * this pointer to its caller. Thus, READ_ONCE() suffices (that is, |
331 | * rcu_dereference() is not needed), which means that list_empty() can be |
332 | * used anywhere you would want to use list_empty_rcu(). Just don't |
333 | * expect anything useful to happen if you do a subsequent lockless |
334 | * call to list_first_entry_rcu()!!! |
335 | * |
336 | * See list_first_or_null_rcu for an alternative. |
337 | */ |
338 | |
339 | /** |
340 | * list_first_or_null_rcu - get the first element from a list |
341 | * @ptr: the list head to take the element from. |
342 | * @type: the type of the struct this is embedded in. |
343 | * @member: the name of the list_head within the struct. |
344 | * |
345 | * Note that if the list is empty, it returns NULL. |
346 | * |
347 | * This primitive may safely run concurrently with the _rcu list-mutation |
348 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
349 | */ |
350 | #define list_first_or_null_rcu(ptr, type, member) \ |
351 | ({ \ |
352 | struct list_head *__ptr = (ptr); \ |
353 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
354 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
355 | }) |
356 | |
357 | /** |
358 | * list_next_or_null_rcu - get the first element from a list |
359 | * @head: the head for the list. |
360 | * @ptr: the list head to take the next element from. |
361 | * @type: the type of the struct this is embedded in. |
362 | * @member: the name of the list_head within the struct. |
363 | * |
364 | * Note that if the ptr is at the end of the list, NULL is returned. |
365 | * |
366 | * This primitive may safely run concurrently with the _rcu list-mutation |
367 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
368 | */ |
369 | #define list_next_or_null_rcu(head, ptr, type, member) \ |
370 | ({ \ |
371 | struct list_head *__head = (head); \ |
372 | struct list_head *__ptr = (ptr); \ |
373 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
374 | likely(__next != __head) ? list_entry_rcu(__next, type, \ |
375 | member) : NULL; \ |
376 | }) |
377 | |
378 | /** |
379 | * list_for_each_entry_rcu - iterate over rcu list of given type |
380 | * @pos: the type * to use as a loop cursor. |
381 | * @head: the head for your list. |
382 | * @member: the name of the list_head within the struct. |
383 | * @cond: optional lockdep expression if called from non-RCU protection. |
384 | * |
385 | * This list-traversal primitive may safely run concurrently with |
386 | * the _rcu list-mutation primitives such as list_add_rcu() |
387 | * as long as the traversal is guarded by rcu_read_lock(). |
388 | */ |
389 | #define list_for_each_entry_rcu(pos, head, member, cond...) \ |
390 | for (__list_check_rcu(dummy, ## cond, 0), \ |
391 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
392 | &pos->member != (head); \ |
393 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
394 | |
395 | /** |
396 | * list_for_each_entry_srcu - iterate over rcu list of given type |
397 | * @pos: the type * to use as a loop cursor. |
398 | * @head: the head for your list. |
399 | * @member: the name of the list_head within the struct. |
400 | * @cond: lockdep expression for the lock required to traverse the list. |
401 | * |
402 | * This list-traversal primitive may safely run concurrently with |
403 | * the _rcu list-mutation primitives such as list_add_rcu() |
404 | * as long as the traversal is guarded by srcu_read_lock(). |
405 | * The lockdep expression srcu_read_lock_held() can be passed as the |
406 | * cond argument from read side. |
407 | */ |
408 | #define list_for_each_entry_srcu(pos, head, member, cond) \ |
409 | for (__list_check_srcu(cond), \ |
410 | pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
411 | &pos->member != (head); \ |
412 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
413 | |
414 | /** |
415 | * list_entry_lockless - get the struct for this entry |
416 | * @ptr: the &struct list_head pointer. |
417 | * @type: the type of the struct this is embedded in. |
418 | * @member: the name of the list_head within the struct. |
419 | * |
420 | * This primitive may safely run concurrently with the _rcu |
421 | * list-mutation primitives such as list_add_rcu(), but requires some |
422 | * implicit RCU read-side guarding. One example is running within a special |
423 | * exception-time environment where preemption is disabled and where lockdep |
424 | * cannot be invoked. Another example is when items are added to the list, |
425 | * but never deleted. |
426 | */ |
427 | #define list_entry_lockless(ptr, type, member) \ |
428 | container_of((typeof(ptr))READ_ONCE(ptr), type, member) |
429 | |
430 | /** |
431 | * list_for_each_entry_lockless - iterate over rcu list of given type |
432 | * @pos: the type * to use as a loop cursor. |
433 | * @head: the head for your list. |
434 | * @member: the name of the list_struct within the struct. |
435 | * |
436 | * This primitive may safely run concurrently with the _rcu |
437 | * list-mutation primitives such as list_add_rcu(), but requires some |
438 | * implicit RCU read-side guarding. One example is running within a special |
439 | * exception-time environment where preemption is disabled and where lockdep |
440 | * cannot be invoked. Another example is when items are added to the list, |
441 | * but never deleted. |
442 | */ |
443 | #define list_for_each_entry_lockless(pos, head, member) \ |
444 | for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ |
445 | &pos->member != (head); \ |
446 | pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) |
447 | |
448 | /** |
449 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
450 | * @pos: the type * to use as a loop cursor. |
451 | * @head: the head for your list. |
452 | * @member: the name of the list_head within the struct. |
453 | * |
454 | * Continue to iterate over list of given type, continuing after |
455 | * the current position which must have been in the list when the RCU read |
456 | * lock was taken. |
457 | * This would typically require either that you obtained the node from a |
458 | * previous walk of the list in the same RCU read-side critical section, or |
459 | * that you held some sort of non-RCU reference (such as a reference count) |
460 | * to keep the node alive *and* in the list. |
461 | * |
462 | * This iterator is similar to list_for_each_entry_from_rcu() except |
463 | * this starts after the given position and that one starts at the given |
464 | * position. |
465 | */ |
466 | #define list_for_each_entry_continue_rcu(pos, head, member) \ |
467 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
468 | &pos->member != (head); \ |
469 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
470 | |
471 | /** |
472 | * list_for_each_entry_from_rcu - iterate over a list from current point |
473 | * @pos: the type * to use as a loop cursor. |
474 | * @head: the head for your list. |
475 | * @member: the name of the list_node within the struct. |
476 | * |
477 | * Iterate over the tail of a list starting from a given position, |
478 | * which must have been in the list when the RCU read lock was taken. |
479 | * This would typically require either that you obtained the node from a |
480 | * previous walk of the list in the same RCU read-side critical section, or |
481 | * that you held some sort of non-RCU reference (such as a reference count) |
482 | * to keep the node alive *and* in the list. |
483 | * |
484 | * This iterator is similar to list_for_each_entry_continue_rcu() except |
485 | * this starts from the given position and that one starts from the position |
486 | * after the given position. |
487 | */ |
488 | #define list_for_each_entry_from_rcu(pos, head, member) \ |
489 | for (; &(pos)->member != (head); \ |
490 | pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) |
491 | |
492 | /** |
493 | * hlist_del_rcu - deletes entry from hash list without re-initialization |
494 | * @n: the element to delete from the hash list. |
495 | * |
496 | * Note: list_unhashed() on entry does not return true after this, |
497 | * the entry is in an undefined state. It is useful for RCU based |
498 | * lockfree traversal. |
499 | * |
500 | * In particular, it means that we can not poison the forward |
501 | * pointers that may still be used for walking the hash list. |
502 | * |
503 | * The caller must take whatever precautions are necessary |
504 | * (such as holding appropriate locks) to avoid racing |
505 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
506 | * or hlist_del_rcu(), running on this same list. |
507 | * However, it is perfectly legal to run concurrently with |
508 | * the _rcu list-traversal primitives, such as |
509 | * hlist_for_each_entry(). |
510 | */ |
511 | static inline void hlist_del_rcu(struct hlist_node *n) |
512 | { |
513 | __hlist_del(n); |
514 | WRITE_ONCE(n->pprev, LIST_POISON2); |
515 | } |
516 | |
517 | /** |
518 | * hlist_replace_rcu - replace old entry by new one |
519 | * @old : the element to be replaced |
520 | * @new : the new element to insert |
521 | * |
522 | * The @old entry will be replaced with the @new entry atomically. |
523 | */ |
524 | static inline void hlist_replace_rcu(struct hlist_node *old, |
525 | struct hlist_node *new) |
526 | { |
527 | struct hlist_node *next = old->next; |
528 | |
529 | new->next = next; |
530 | WRITE_ONCE(new->pprev, old->pprev); |
531 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
532 | if (next) |
533 | WRITE_ONCE(new->next->pprev, &new->next); |
534 | WRITE_ONCE(old->pprev, LIST_POISON2); |
535 | } |
536 | |
537 | /** |
538 | * hlists_swap_heads_rcu - swap the lists the hlist heads point to |
539 | * @left: The hlist head on the left |
540 | * @right: The hlist head on the right |
541 | * |
542 | * The lists start out as [@left ][node1 ... ] and |
543 | * [@right ][node2 ... ] |
544 | * The lists end up as [@left ][node2 ... ] |
545 | * [@right ][node1 ... ] |
546 | */ |
547 | static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) |
548 | { |
549 | struct hlist_node *node1 = left->first; |
550 | struct hlist_node *node2 = right->first; |
551 | |
552 | rcu_assign_pointer(left->first, node2); |
553 | rcu_assign_pointer(right->first, node1); |
554 | WRITE_ONCE(node2->pprev, &left->first); |
555 | WRITE_ONCE(node1->pprev, &right->first); |
556 | } |
557 | |
558 | /* |
559 | * return the first or the next element in an RCU protected hlist |
560 | */ |
561 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) |
562 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) |
563 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) |
564 | |
565 | /** |
566 | * hlist_add_head_rcu |
567 | * @n: the element to add to the hash list. |
568 | * @h: the list to add to. |
569 | * |
570 | * Description: |
571 | * Adds the specified element to the specified hlist, |
572 | * while permitting racing traversals. |
573 | * |
574 | * The caller must take whatever precautions are necessary |
575 | * (such as holding appropriate locks) to avoid racing |
576 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
577 | * or hlist_del_rcu(), running on this same list. |
578 | * However, it is perfectly legal to run concurrently with |
579 | * the _rcu list-traversal primitives, such as |
580 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
581 | * problems on Alpha CPUs. Regardless of the type of CPU, the |
582 | * list-traversal primitive must be guarded by rcu_read_lock(). |
583 | */ |
584 | static inline void hlist_add_head_rcu(struct hlist_node *n, |
585 | struct hlist_head *h) |
586 | { |
587 | struct hlist_node *first = h->first; |
588 | |
589 | n->next = first; |
590 | WRITE_ONCE(n->pprev, &h->first); |
591 | rcu_assign_pointer(hlist_first_rcu(h), n); |
592 | if (first) |
593 | WRITE_ONCE(first->pprev, &n->next); |
594 | } |
595 | |
596 | /** |
597 | * hlist_add_tail_rcu |
598 | * @n: the element to add to the hash list. |
599 | * @h: the list to add to. |
600 | * |
601 | * Description: |
602 | * Adds the specified element to the specified hlist, |
603 | * while permitting racing traversals. |
604 | * |
605 | * The caller must take whatever precautions are necessary |
606 | * (such as holding appropriate locks) to avoid racing |
607 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
608 | * or hlist_del_rcu(), running on this same list. |
609 | * However, it is perfectly legal to run concurrently with |
610 | * the _rcu list-traversal primitives, such as |
611 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
612 | * problems on Alpha CPUs. Regardless of the type of CPU, the |
613 | * list-traversal primitive must be guarded by rcu_read_lock(). |
614 | */ |
615 | static inline void hlist_add_tail_rcu(struct hlist_node *n, |
616 | struct hlist_head *h) |
617 | { |
618 | struct hlist_node *i, *last = NULL; |
619 | |
620 | /* Note: write side code, so rcu accessors are not needed. */ |
621 | for (i = h->first; i; i = i->next) |
622 | last = i; |
623 | |
624 | if (last) { |
625 | n->next = last->next; |
626 | WRITE_ONCE(n->pprev, &last->next); |
627 | rcu_assign_pointer(hlist_next_rcu(last), n); |
628 | } else { |
629 | hlist_add_head_rcu(n, h); |
630 | } |
631 | } |
632 | |
633 | /** |
634 | * hlist_add_before_rcu |
635 | * @n: the new element to add to the hash list. |
636 | * @next: the existing element to add the new element before. |
637 | * |
638 | * Description: |
639 | * Adds the specified element to the specified hlist |
640 | * before the specified node while permitting racing traversals. |
641 | * |
642 | * The caller must take whatever precautions are necessary |
643 | * (such as holding appropriate locks) to avoid racing |
644 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
645 | * or hlist_del_rcu(), running on this same list. |
646 | * However, it is perfectly legal to run concurrently with |
647 | * the _rcu list-traversal primitives, such as |
648 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
649 | * problems on Alpha CPUs. |
650 | */ |
651 | static inline void hlist_add_before_rcu(struct hlist_node *n, |
652 | struct hlist_node *next) |
653 | { |
654 | WRITE_ONCE(n->pprev, next->pprev); |
655 | n->next = next; |
656 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
657 | WRITE_ONCE(next->pprev, &n->next); |
658 | } |
659 | |
660 | /** |
661 | * hlist_add_behind_rcu |
662 | * @n: the new element to add to the hash list. |
663 | * @prev: the existing element to add the new element after. |
664 | * |
665 | * Description: |
666 | * Adds the specified element to the specified hlist |
667 | * after the specified node while permitting racing traversals. |
668 | * |
669 | * The caller must take whatever precautions are necessary |
670 | * (such as holding appropriate locks) to avoid racing |
671 | * with another list-mutation primitive, such as hlist_add_head_rcu() |
672 | * or hlist_del_rcu(), running on this same list. |
673 | * However, it is perfectly legal to run concurrently with |
674 | * the _rcu list-traversal primitives, such as |
675 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency |
676 | * problems on Alpha CPUs. |
677 | */ |
678 | static inline void hlist_add_behind_rcu(struct hlist_node *n, |
679 | struct hlist_node *prev) |
680 | { |
681 | n->next = prev->next; |
682 | WRITE_ONCE(n->pprev, &prev->next); |
683 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
684 | if (n->next) |
685 | WRITE_ONCE(n->next->pprev, &n->next); |
686 | } |
687 | |
688 | #define __hlist_for_each_rcu(pos, head) \ |
689 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
690 | pos; \ |
691 | pos = rcu_dereference(hlist_next_rcu(pos))) |
692 | |
693 | /** |
694 | * hlist_for_each_entry_rcu - iterate over rcu list of given type |
695 | * @pos: the type * to use as a loop cursor. |
696 | * @head: the head for your list. |
697 | * @member: the name of the hlist_node within the struct. |
698 | * @cond: optional lockdep expression if called from non-RCU protection. |
699 | * |
700 | * This list-traversal primitive may safely run concurrently with |
701 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
702 | * as long as the traversal is guarded by rcu_read_lock(). |
703 | */ |
704 | #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ |
705 | for (__list_check_rcu(dummy, ## cond, 0), \ |
706 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ |
707 | typeof(*(pos)), member); \ |
708 | pos; \ |
709 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ |
710 | &(pos)->member)), typeof(*(pos)), member)) |
711 | |
712 | /** |
713 | * hlist_for_each_entry_srcu - iterate over rcu list of given type |
714 | * @pos: the type * to use as a loop cursor. |
715 | * @head: the head for your list. |
716 | * @member: the name of the hlist_node within the struct. |
717 | * @cond: lockdep expression for the lock required to traverse the list. |
718 | * |
719 | * This list-traversal primitive may safely run concurrently with |
720 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
721 | * as long as the traversal is guarded by srcu_read_lock(). |
722 | * The lockdep expression srcu_read_lock_held() can be passed as the |
723 | * cond argument from read side. |
724 | */ |
725 | #define hlist_for_each_entry_srcu(pos, head, member, cond) \ |
726 | for (__list_check_srcu(cond), \ |
727 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ |
728 | typeof(*(pos)), member); \ |
729 | pos; \ |
730 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ |
731 | &(pos)->member)), typeof(*(pos)), member)) |
732 | |
733 | /** |
734 | * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) |
735 | * @pos: the type * to use as a loop cursor. |
736 | * @head: the head for your list. |
737 | * @member: the name of the hlist_node within the struct. |
738 | * |
739 | * This list-traversal primitive may safely run concurrently with |
740 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
741 | * as long as the traversal is guarded by rcu_read_lock(). |
742 | * |
743 | * This is the same as hlist_for_each_entry_rcu() except that it does |
744 | * not do any RCU debugging or tracing. |
745 | */ |
746 | #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ |
747 | for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ |
748 | typeof(*(pos)), member); \ |
749 | pos; \ |
750 | pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ |
751 | &(pos)->member)), typeof(*(pos)), member)) |
752 | |
753 | /** |
754 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
755 | * @pos: the type * to use as a loop cursor. |
756 | * @head: the head for your list. |
757 | * @member: the name of the hlist_node within the struct. |
758 | * |
759 | * This list-traversal primitive may safely run concurrently with |
760 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
761 | * as long as the traversal is guarded by rcu_read_lock(). |
762 | */ |
763 | #define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
764 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ |
765 | typeof(*(pos)), member); \ |
766 | pos; \ |
767 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ |
768 | &(pos)->member)), typeof(*(pos)), member)) |
769 | |
770 | /** |
771 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point |
772 | * @pos: the type * to use as a loop cursor. |
773 | * @member: the name of the hlist_node within the struct. |
774 | */ |
775 | #define hlist_for_each_entry_continue_rcu(pos, member) \ |
776 | for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
777 | &(pos)->member)), typeof(*(pos)), member); \ |
778 | pos; \ |
779 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
780 | &(pos)->member)), typeof(*(pos)), member)) |
781 | |
782 | /** |
783 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point |
784 | * @pos: the type * to use as a loop cursor. |
785 | * @member: the name of the hlist_node within the struct. |
786 | */ |
787 | #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
788 | for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
789 | &(pos)->member)), typeof(*(pos)), member); \ |
790 | pos; \ |
791 | pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ |
792 | &(pos)->member)), typeof(*(pos)), member)) |
793 | |
794 | /** |
795 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point |
796 | * @pos: the type * to use as a loop cursor. |
797 | * @member: the name of the hlist_node within the struct. |
798 | */ |
799 | #define hlist_for_each_entry_from_rcu(pos, member) \ |
800 | for (; pos; \ |
801 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
802 | &(pos)->member)), typeof(*(pos)), member)) |
803 | |
804 | #endif /* __KERNEL__ */ |
805 | #endif |
806 | |