1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* |
3 | * Read-Copy Update definitions shared among RCU implementations. |
4 | * |
5 | * Copyright IBM Corporation, 2011 |
6 | * |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
8 | */ |
9 | |
10 | #ifndef __LINUX_RCU_H |
11 | #define __LINUX_RCU_H |
12 | |
13 | #include <linux/slab.h> |
14 | #include <trace/events/rcu.h> |
15 | |
16 | /* |
17 | * Grace-period counter management. |
18 | * |
19 | * The two least significant bits contain the control flags. |
20 | * The most significant bits contain the grace-period sequence counter. |
21 | * |
22 | * When both control flags are zero, no grace period is in progress. |
23 | * When either bit is non-zero, a grace period has started and is in |
24 | * progress. When the grace period completes, the control flags are reset |
25 | * to 0 and the grace-period sequence counter is incremented. |
26 | * |
27 | * However some specific RCU usages make use of custom values. |
28 | * |
29 | * SRCU special control values: |
30 | * |
31 | * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node |
32 | * is initialized. |
33 | * |
34 | * SRCU_STATE_IDLE : No SRCU gp is in progress |
35 | * |
36 | * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates |
37 | * we are scanning the readers on the slot |
38 | * defined as inactive (there might well |
39 | * be pending readers that will use that |
40 | * index, but their number is bounded). |
41 | * |
42 | * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state() |
43 | * Indicates we are flipping the readers |
44 | * index and then scanning the readers on the |
45 | * slot newly designated as inactive (again, |
46 | * the number of pending readers that will use |
47 | * this inactive index is bounded). |
48 | * |
49 | * RCU polled GP special control value: |
50 | * |
51 | * RCU_GET_STATE_COMPLETED : State value indicating an already-completed |
52 | * polled GP has completed. This value covers |
53 | * both the state and the counter of the |
54 | * grace-period sequence number. |
55 | */ |
56 | |
57 | #define RCU_SEQ_CTR_SHIFT 2 |
58 | #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) |
59 | |
60 | /* Low-order bit definition for polled grace-period APIs. */ |
61 | #define RCU_GET_STATE_COMPLETED 0x1 |
62 | |
63 | extern int sysctl_sched_rt_runtime; |
64 | |
65 | /* |
66 | * Return the counter portion of a sequence number previously returned |
67 | * by rcu_seq_snap() or rcu_seq_current(). |
68 | */ |
69 | static inline unsigned long rcu_seq_ctr(unsigned long s) |
70 | { |
71 | return s >> RCU_SEQ_CTR_SHIFT; |
72 | } |
73 | |
74 | /* |
75 | * Return the state portion of a sequence number previously returned |
76 | * by rcu_seq_snap() or rcu_seq_current(). |
77 | */ |
78 | static inline int rcu_seq_state(unsigned long s) |
79 | { |
80 | return s & RCU_SEQ_STATE_MASK; |
81 | } |
82 | |
83 | /* |
84 | * Set the state portion of the pointed-to sequence number. |
85 | * The caller is responsible for preventing conflicting updates. |
86 | */ |
87 | static inline void rcu_seq_set_state(unsigned long *sp, int newstate) |
88 | { |
89 | WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); |
90 | WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); |
91 | } |
92 | |
93 | /* Adjust sequence number for start of update-side operation. */ |
94 | static inline void rcu_seq_start(unsigned long *sp) |
95 | { |
96 | WRITE_ONCE(*sp, *sp + 1); |
97 | smp_mb(); /* Ensure update-side operation after counter increment. */ |
98 | WARN_ON_ONCE(rcu_seq_state(*sp) != 1); |
99 | } |
100 | |
101 | /* Compute the end-of-grace-period value for the specified sequence number. */ |
102 | static inline unsigned long rcu_seq_endval(unsigned long *sp) |
103 | { |
104 | return (*sp | RCU_SEQ_STATE_MASK) + 1; |
105 | } |
106 | |
107 | /* Adjust sequence number for end of update-side operation. */ |
108 | static inline void rcu_seq_end(unsigned long *sp) |
109 | { |
110 | smp_mb(); /* Ensure update-side operation before counter increment. */ |
111 | WARN_ON_ONCE(!rcu_seq_state(*sp)); |
112 | WRITE_ONCE(*sp, rcu_seq_endval(sp)); |
113 | } |
114 | |
115 | /* |
116 | * rcu_seq_snap - Take a snapshot of the update side's sequence number. |
117 | * |
118 | * This function returns the earliest value of the grace-period sequence number |
119 | * that will indicate that a full grace period has elapsed since the current |
120 | * time. Once the grace-period sequence number has reached this value, it will |
121 | * be safe to invoke all callbacks that have been registered prior to the |
122 | * current time. This value is the current grace-period number plus two to the |
123 | * power of the number of low-order bits reserved for state, then rounded up to |
124 | * the next value in which the state bits are all zero. |
125 | */ |
126 | static inline unsigned long rcu_seq_snap(unsigned long *sp) |
127 | { |
128 | unsigned long s; |
129 | |
130 | s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; |
131 | smp_mb(); /* Above access must not bleed into critical section. */ |
132 | return s; |
133 | } |
134 | |
135 | /* Return the current value the update side's sequence number, no ordering. */ |
136 | static inline unsigned long rcu_seq_current(unsigned long *sp) |
137 | { |
138 | return READ_ONCE(*sp); |
139 | } |
140 | |
141 | /* |
142 | * Given a snapshot from rcu_seq_snap(), determine whether or not the |
143 | * corresponding update-side operation has started. |
144 | */ |
145 | static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) |
146 | { |
147 | return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); |
148 | } |
149 | |
150 | /* |
151 | * Given a snapshot from rcu_seq_snap(), determine whether or not a |
152 | * full update-side operation has occurred. |
153 | */ |
154 | static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) |
155 | { |
156 | return ULONG_CMP_GE(READ_ONCE(*sp), s); |
157 | } |
158 | |
159 | /* |
160 | * Given a snapshot from rcu_seq_snap(), determine whether or not a |
161 | * full update-side operation has occurred, but do not allow the |
162 | * (ULONG_MAX / 2) safety-factor/guard-band. |
163 | */ |
164 | static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s) |
165 | { |
166 | unsigned long cur_s = READ_ONCE(*sp); |
167 | |
168 | return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1)); |
169 | } |
170 | |
171 | /* |
172 | * Has a grace period completed since the time the old gp_seq was collected? |
173 | */ |
174 | static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) |
175 | { |
176 | return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); |
177 | } |
178 | |
179 | /* |
180 | * Has a grace period started since the time the old gp_seq was collected? |
181 | */ |
182 | static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) |
183 | { |
184 | return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, |
185 | new); |
186 | } |
187 | |
188 | /* |
189 | * Roughly how many full grace periods have elapsed between the collection |
190 | * of the two specified grace periods? |
191 | */ |
192 | static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) |
193 | { |
194 | unsigned long rnd_diff; |
195 | |
196 | if (old == new) |
197 | return 0; |
198 | /* |
199 | * Compute the number of grace periods (still shifted up), plus |
200 | * one if either of new and old is not an exact grace period. |
201 | */ |
202 | rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - |
203 | ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + |
204 | ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); |
205 | if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) |
206 | return 1; /* Definitely no grace period has elapsed. */ |
207 | return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; |
208 | } |
209 | |
210 | /* |
211 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally |
212 | * by call_rcu() and rcu callback execution, and are therefore not part |
213 | * of the RCU API. These are in rcupdate.h because they are used by all |
214 | * RCU implementations. |
215 | */ |
216 | |
217 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
218 | # define STATE_RCU_HEAD_READY 0 |
219 | # define STATE_RCU_HEAD_QUEUED 1 |
220 | |
221 | extern const struct debug_obj_descr rcuhead_debug_descr; |
222 | |
223 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
224 | { |
225 | int r1; |
226 | |
227 | r1 = debug_object_activate(addr: head, descr: &rcuhead_debug_descr); |
228 | debug_object_active_state(addr: head, descr: &rcuhead_debug_descr, |
229 | STATE_RCU_HEAD_READY, |
230 | STATE_RCU_HEAD_QUEUED); |
231 | return r1; |
232 | } |
233 | |
234 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
235 | { |
236 | debug_object_active_state(addr: head, descr: &rcuhead_debug_descr, |
237 | STATE_RCU_HEAD_QUEUED, |
238 | STATE_RCU_HEAD_READY); |
239 | debug_object_deactivate(addr: head, descr: &rcuhead_debug_descr); |
240 | } |
241 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
242 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
243 | { |
244 | return 0; |
245 | } |
246 | |
247 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
248 | { |
249 | } |
250 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
251 | |
252 | static inline void debug_rcu_head_callback(struct rcu_head *rhp) |
253 | { |
254 | if (unlikely(!rhp->func)) |
255 | kmem_dump_obj(object: rhp); |
256 | } |
257 | |
258 | extern int rcu_cpu_stall_suppress_at_boot; |
259 | |
260 | static inline bool rcu_stall_is_suppressed_at_boot(void) |
261 | { |
262 | return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended(); |
263 | } |
264 | |
265 | extern int rcu_cpu_stall_notifiers; |
266 | |
267 | #ifdef CONFIG_RCU_STALL_COMMON |
268 | |
269 | extern int rcu_cpu_stall_ftrace_dump; |
270 | extern int rcu_cpu_stall_suppress; |
271 | extern int rcu_cpu_stall_timeout; |
272 | extern int rcu_exp_cpu_stall_timeout; |
273 | extern int rcu_cpu_stall_cputime; |
274 | extern bool rcu_exp_stall_task_details __read_mostly; |
275 | int rcu_jiffies_till_stall_check(void); |
276 | int rcu_exp_jiffies_till_stall_check(void); |
277 | |
278 | static inline bool rcu_stall_is_suppressed(void) |
279 | { |
280 | return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress; |
281 | } |
282 | |
283 | #define rcu_ftrace_dump_stall_suppress() \ |
284 | do { \ |
285 | if (!rcu_cpu_stall_suppress) \ |
286 | rcu_cpu_stall_suppress = 3; \ |
287 | } while (0) |
288 | |
289 | #define rcu_ftrace_dump_stall_unsuppress() \ |
290 | do { \ |
291 | if (rcu_cpu_stall_suppress == 3) \ |
292 | rcu_cpu_stall_suppress = 0; \ |
293 | } while (0) |
294 | |
295 | #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ |
296 | |
297 | static inline bool rcu_stall_is_suppressed(void) |
298 | { |
299 | return rcu_stall_is_suppressed_at_boot(); |
300 | } |
301 | #define rcu_ftrace_dump_stall_suppress() |
302 | #define rcu_ftrace_dump_stall_unsuppress() |
303 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
304 | |
305 | /* |
306 | * Strings used in tracepoints need to be exported via the |
307 | * tracing system such that tools like perf and trace-cmd can |
308 | * translate the string address pointers to actual text. |
309 | */ |
310 | #define TPS(x) tracepoint_string(x) |
311 | |
312 | /* |
313 | * Dump the ftrace buffer, but only one time per callsite per boot. |
314 | */ |
315 | #define rcu_ftrace_dump(oops_dump_mode) \ |
316 | do { \ |
317 | static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ |
318 | \ |
319 | if (!atomic_read(&___rfd_beenhere) && \ |
320 | !atomic_xchg(&___rfd_beenhere, 1)) { \ |
321 | tracing_off(); \ |
322 | rcu_ftrace_dump_stall_suppress(); \ |
323 | ftrace_dump(oops_dump_mode); \ |
324 | rcu_ftrace_dump_stall_unsuppress(); \ |
325 | } \ |
326 | } while (0) |
327 | |
328 | void rcu_early_boot_tests(void); |
329 | void rcu_test_sync_prims(void); |
330 | |
331 | /* |
332 | * This function really isn't for public consumption, but RCU is special in |
333 | * that context switches can allow the state machine to make progress. |
334 | */ |
335 | extern void resched_cpu(int cpu); |
336 | |
337 | #if !defined(CONFIG_TINY_RCU) |
338 | |
339 | #include <linux/rcu_node_tree.h> |
340 | |
341 | extern int rcu_num_lvls; |
342 | extern int num_rcu_lvl[]; |
343 | extern int rcu_num_nodes; |
344 | static bool rcu_fanout_exact; |
345 | static int rcu_fanout_leaf; |
346 | |
347 | /* |
348 | * Compute the per-level fanout, either using the exact fanout specified |
349 | * or balancing the tree, depending on the rcu_fanout_exact boot parameter. |
350 | */ |
351 | static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) |
352 | { |
353 | int i; |
354 | |
355 | for (i = 0; i < RCU_NUM_LVLS; i++) |
356 | levelspread[i] = INT_MIN; |
357 | if (rcu_fanout_exact) { |
358 | levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; |
359 | for (i = rcu_num_lvls - 2; i >= 0; i--) |
360 | levelspread[i] = RCU_FANOUT; |
361 | } else { |
362 | int ccur; |
363 | int cprv; |
364 | |
365 | cprv = nr_cpu_ids; |
366 | for (i = rcu_num_lvls - 1; i >= 0; i--) { |
367 | ccur = levelcnt[i]; |
368 | levelspread[i] = (cprv + ccur - 1) / ccur; |
369 | cprv = ccur; |
370 | } |
371 | } |
372 | } |
373 | |
374 | extern void rcu_init_geometry(void); |
375 | |
376 | /* Returns a pointer to the first leaf rcu_node structure. */ |
377 | #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) |
378 | |
379 | /* Is this rcu_node a leaf? */ |
380 | #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) |
381 | |
382 | /* Is this rcu_node the last leaf? */ |
383 | #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) |
384 | |
385 | /* |
386 | * Do a full breadth-first scan of the {s,}rcu_node structures for the |
387 | * specified state structure (for SRCU) or the only rcu_state structure |
388 | * (for RCU). |
389 | */ |
390 | #define _rcu_for_each_node_breadth_first(sp, rnp) \ |
391 | for ((rnp) = &(sp)->node[0]; \ |
392 | (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) |
393 | #define rcu_for_each_node_breadth_first(rnp) \ |
394 | _rcu_for_each_node_breadth_first(&rcu_state, rnp) |
395 | #define srcu_for_each_node_breadth_first(ssp, rnp) \ |
396 | _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp) |
397 | |
398 | /* |
399 | * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. |
400 | * Note that if there is a singleton rcu_node tree with but one rcu_node |
401 | * structure, this loop -will- visit the rcu_node structure. It is still |
402 | * a leaf node, even if it is also the root node. |
403 | */ |
404 | #define rcu_for_each_leaf_node(rnp) \ |
405 | for ((rnp) = rcu_first_leaf_node(); \ |
406 | (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) |
407 | |
408 | /* |
409 | * Iterate over all possible CPUs in a leaf RCU node. |
410 | */ |
411 | #define for_each_leaf_node_possible_cpu(rnp, cpu) \ |
412 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
413 | (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ |
414 | (cpu) <= rnp->grphi; \ |
415 | (cpu) = cpumask_next((cpu), cpu_possible_mask)) |
416 | |
417 | /* |
418 | * Iterate over all CPUs in a leaf RCU node's specified mask. |
419 | */ |
420 | #define rcu_find_next_bit(rnp, cpu, mask) \ |
421 | ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) |
422 | #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ |
423 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
424 | (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ |
425 | (cpu) <= rnp->grphi; \ |
426 | (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) |
427 | |
428 | #endif /* !defined(CONFIG_TINY_RCU) */ |
429 | |
430 | #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) |
431 | |
432 | /* |
433 | * Wrappers for the rcu_node::lock acquire and release. |
434 | * |
435 | * Because the rcu_nodes form a tree, the tree traversal locking will observe |
436 | * different lock values, this in turn means that an UNLOCK of one level |
437 | * followed by a LOCK of another level does not imply a full memory barrier; |
438 | * and most importantly transitivity is lost. |
439 | * |
440 | * In order to restore full ordering between tree levels, augment the regular |
441 | * lock acquire functions with smp_mb__after_unlock_lock(). |
442 | * |
443 | * As ->lock of struct rcu_node is a __private field, therefore one should use |
444 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. |
445 | */ |
446 | #define raw_spin_lock_rcu_node(p) \ |
447 | do { \ |
448 | raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ |
449 | smp_mb__after_unlock_lock(); \ |
450 | } while (0) |
451 | |
452 | #define raw_spin_unlock_rcu_node(p) \ |
453 | do { \ |
454 | lockdep_assert_irqs_disabled(); \ |
455 | raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \ |
456 | } while (0) |
457 | |
458 | #define raw_spin_lock_irq_rcu_node(p) \ |
459 | do { \ |
460 | raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ |
461 | smp_mb__after_unlock_lock(); \ |
462 | } while (0) |
463 | |
464 | #define raw_spin_unlock_irq_rcu_node(p) \ |
465 | do { \ |
466 | lockdep_assert_irqs_disabled(); \ |
467 | raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \ |
468 | } while (0) |
469 | |
470 | #define raw_spin_lock_irqsave_rcu_node(p, flags) \ |
471 | do { \ |
472 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
473 | smp_mb__after_unlock_lock(); \ |
474 | } while (0) |
475 | |
476 | #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ |
477 | do { \ |
478 | lockdep_assert_irqs_disabled(); \ |
479 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \ |
480 | } while (0) |
481 | |
482 | #define raw_spin_trylock_rcu_node(p) \ |
483 | ({ \ |
484 | bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ |
485 | \ |
486 | if (___locked) \ |
487 | smp_mb__after_unlock_lock(); \ |
488 | ___locked; \ |
489 | }) |
490 | |
491 | #define raw_lockdep_assert_held_rcu_node(p) \ |
492 | lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) |
493 | |
494 | #endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) |
495 | |
496 | #ifdef CONFIG_TINY_RCU |
497 | /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ |
498 | static inline bool rcu_gp_is_normal(void) { return true; } |
499 | static inline bool rcu_gp_is_expedited(void) { return false; } |
500 | static inline bool rcu_async_should_hurry(void) { return false; } |
501 | static inline void rcu_expedite_gp(void) { } |
502 | static inline void rcu_unexpedite_gp(void) { } |
503 | static inline void rcu_async_hurry(void) { } |
504 | static inline void rcu_async_relax(void) { } |
505 | static inline bool rcu_cpu_online(int cpu) { return true; } |
506 | #else /* #ifdef CONFIG_TINY_RCU */ |
507 | bool rcu_gp_is_normal(void); /* Internal RCU use. */ |
508 | bool rcu_gp_is_expedited(void); /* Internal RCU use. */ |
509 | bool rcu_async_should_hurry(void); /* Internal RCU use. */ |
510 | void rcu_expedite_gp(void); |
511 | void rcu_unexpedite_gp(void); |
512 | void rcu_async_hurry(void); |
513 | void rcu_async_relax(void); |
514 | void rcupdate_announce_bootup_oddness(void); |
515 | bool rcu_cpu_online(int cpu); |
516 | #ifdef CONFIG_TASKS_RCU_GENERIC |
517 | void show_rcu_tasks_gp_kthreads(void); |
518 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
519 | static inline void show_rcu_tasks_gp_kthreads(void) {} |
520 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
521 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
522 | |
523 | #ifdef CONFIG_TASKS_RCU |
524 | struct task_struct *get_rcu_tasks_gp_kthread(void); |
525 | #endif // # ifdef CONFIG_TASKS_RCU |
526 | |
527 | #ifdef CONFIG_TASKS_RUDE_RCU |
528 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void); |
529 | #endif // # ifdef CONFIG_TASKS_RUDE_RCU |
530 | |
531 | #ifdef CONFIG_TASKS_RCU_GENERIC |
532 | void tasks_cblist_init_generic(void); |
533 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
534 | static inline void tasks_cblist_init_generic(void) { } |
535 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
536 | |
537 | #define RCU_SCHEDULER_INACTIVE 0 |
538 | #define RCU_SCHEDULER_INIT 1 |
539 | #define RCU_SCHEDULER_RUNNING 2 |
540 | |
541 | enum rcutorture_type { |
542 | RCU_FLAVOR, |
543 | RCU_TASKS_FLAVOR, |
544 | RCU_TASKS_RUDE_FLAVOR, |
545 | RCU_TASKS_TRACING_FLAVOR, |
546 | RCU_TRIVIAL_FLAVOR, |
547 | SRCU_FLAVOR, |
548 | INVALID_RCU_FLAVOR |
549 | }; |
550 | |
551 | #if defined(CONFIG_RCU_LAZY) |
552 | unsigned long rcu_get_jiffies_lazy_flush(void); |
553 | void rcu_set_jiffies_lazy_flush(unsigned long j); |
554 | #else |
555 | static inline unsigned long rcu_get_jiffies_lazy_flush(void) { return 0; } |
556 | static inline void rcu_set_jiffies_lazy_flush(unsigned long j) { } |
557 | #endif |
558 | |
559 | #if defined(CONFIG_TREE_RCU) |
560 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
561 | unsigned long *gp_seq); |
562 | void do_trace_rcu_torture_read(const char *rcutorturename, |
563 | struct rcu_head *rhp, |
564 | unsigned long secs, |
565 | unsigned long c_old, |
566 | unsigned long c); |
567 | void rcu_gp_set_torture_wait(int duration); |
568 | #else |
569 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, |
570 | int *flags, unsigned long *gp_seq) |
571 | { |
572 | *flags = 0; |
573 | *gp_seq = 0; |
574 | } |
575 | #ifdef CONFIG_RCU_TRACE |
576 | void do_trace_rcu_torture_read(const char *rcutorturename, |
577 | struct rcu_head *rhp, |
578 | unsigned long secs, |
579 | unsigned long c_old, |
580 | unsigned long c); |
581 | #else |
582 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
583 | do { } while (0) |
584 | #endif |
585 | static inline void rcu_gp_set_torture_wait(int duration) { } |
586 | #endif |
587 | |
588 | #ifdef CONFIG_TINY_SRCU |
589 | |
590 | static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, |
591 | struct srcu_struct *sp, int *flags, |
592 | unsigned long *gp_seq) |
593 | { |
594 | if (test_type != SRCU_FLAVOR) |
595 | return; |
596 | *flags = 0; |
597 | *gp_seq = sp->srcu_idx; |
598 | } |
599 | |
600 | #elif defined(CONFIG_TREE_SRCU) |
601 | |
602 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
603 | struct srcu_struct *sp, int *flags, |
604 | unsigned long *gp_seq); |
605 | |
606 | #endif |
607 | |
608 | #ifdef CONFIG_TINY_RCU |
609 | static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; } |
610 | static inline unsigned long rcu_get_gp_seq(void) { return 0; } |
611 | static inline unsigned long rcu_exp_batches_completed(void) { return 0; } |
612 | static inline unsigned long |
613 | srcu_batches_completed(struct srcu_struct *sp) { return 0; } |
614 | static inline void rcu_force_quiescent_state(void) { } |
615 | static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } |
616 | static inline void show_rcu_gp_kthreads(void) { } |
617 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } |
618 | static inline void rcu_fwd_progress_check(unsigned long j) { } |
619 | static inline void rcu_gp_slow_register(atomic_t *rgssp) { } |
620 | static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { } |
621 | #else /* #ifdef CONFIG_TINY_RCU */ |
622 | bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); |
623 | unsigned long rcu_get_gp_seq(void); |
624 | unsigned long rcu_exp_batches_completed(void); |
625 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
626 | bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); |
627 | void show_rcu_gp_kthreads(void); |
628 | int rcu_get_gp_kthreads_prio(void); |
629 | void rcu_fwd_progress_check(unsigned long j); |
630 | void rcu_force_quiescent_state(void); |
631 | extern struct workqueue_struct *rcu_gp_wq; |
632 | extern struct kthread_worker *rcu_exp_gp_kworker; |
633 | void rcu_gp_slow_register(atomic_t *rgssp); |
634 | void rcu_gp_slow_unregister(atomic_t *rgssp); |
635 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
636 | |
637 | #ifdef CONFIG_RCU_NOCB_CPU |
638 | void rcu_bind_current_to_nocb(void); |
639 | #else |
640 | static inline void rcu_bind_current_to_nocb(void) { } |
641 | #endif |
642 | |
643 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU) |
644 | void show_rcu_tasks_classic_gp_kthread(void); |
645 | #else |
646 | static inline void show_rcu_tasks_classic_gp_kthread(void) {} |
647 | #endif |
648 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU) |
649 | void show_rcu_tasks_rude_gp_kthread(void); |
650 | #else |
651 | static inline void show_rcu_tasks_rude_gp_kthread(void) {} |
652 | #endif |
653 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) |
654 | void show_rcu_tasks_trace_gp_kthread(void); |
655 | #else |
656 | static inline void show_rcu_tasks_trace_gp_kthread(void) {} |
657 | #endif |
658 | |
659 | #ifdef CONFIG_TINY_RCU |
660 | static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; } |
661 | #else |
662 | bool rcu_cpu_beenfullyonline(int cpu); |
663 | #endif |
664 | |
665 | #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
666 | int rcu_stall_notifier_call_chain(unsigned long val, void *v); |
667 | #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
668 | static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; } |
669 | #endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
670 | |
671 | #endif /* __LINUX_RCU_H */ |
672 | |