| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* |
| 3 | * Read-Copy Update definitions shared among RCU implementations. |
| 4 | * |
| 5 | * Copyright IBM Corporation, 2011 |
| 6 | * |
| 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #ifndef __LINUX_RCU_H |
| 11 | #define __LINUX_RCU_H |
| 12 | |
| 13 | #include <linux/slab.h> |
| 14 | #include <trace/events/rcu.h> |
| 15 | |
| 16 | /* |
| 17 | * Grace-period counter management. |
| 18 | * |
| 19 | * The two least significant bits contain the control flags. |
| 20 | * The most significant bits contain the grace-period sequence counter. |
| 21 | * |
| 22 | * When both control flags are zero, no grace period is in progress. |
| 23 | * When either bit is non-zero, a grace period has started and is in |
| 24 | * progress. When the grace period completes, the control flags are reset |
| 25 | * to 0 and the grace-period sequence counter is incremented. |
| 26 | * |
| 27 | * However some specific RCU usages make use of custom values. |
| 28 | * |
| 29 | * SRCU special control values: |
| 30 | * |
| 31 | * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node |
| 32 | * is initialized. |
| 33 | * |
| 34 | * SRCU_STATE_IDLE : No SRCU gp is in progress |
| 35 | * |
| 36 | * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates |
| 37 | * we are scanning the readers on the slot |
| 38 | * defined as inactive (there might well |
| 39 | * be pending readers that will use that |
| 40 | * index, but their number is bounded). |
| 41 | * |
| 42 | * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state() |
| 43 | * Indicates we are flipping the readers |
| 44 | * index and then scanning the readers on the |
| 45 | * slot newly designated as inactive (again, |
| 46 | * the number of pending readers that will use |
| 47 | * this inactive index is bounded). |
| 48 | * |
| 49 | * RCU polled GP special control value: |
| 50 | * |
| 51 | * RCU_GET_STATE_COMPLETED : State value indicating an already-completed |
| 52 | * polled GP has completed. This value covers |
| 53 | * both the state and the counter of the |
| 54 | * grace-period sequence number. |
| 55 | */ |
| 56 | |
| 57 | /* Low-order bit definition for polled grace-period APIs. */ |
| 58 | #define RCU_GET_STATE_COMPLETED 0x1 |
| 59 | |
| 60 | /* A complete grace period count */ |
| 61 | #define RCU_SEQ_GP (RCU_SEQ_STATE_MASK + 1) |
| 62 | |
| 63 | extern int sysctl_sched_rt_runtime; |
| 64 | |
| 65 | /* |
| 66 | * Return the counter portion of a sequence number previously returned |
| 67 | * by rcu_seq_snap() or rcu_seq_current(). |
| 68 | */ |
| 69 | static inline unsigned long rcu_seq_ctr(unsigned long s) |
| 70 | { |
| 71 | return s >> RCU_SEQ_CTR_SHIFT; |
| 72 | } |
| 73 | |
| 74 | /* |
| 75 | * Return the state portion of a sequence number previously returned |
| 76 | * by rcu_seq_snap() or rcu_seq_current(). |
| 77 | */ |
| 78 | static inline int rcu_seq_state(unsigned long s) |
| 79 | { |
| 80 | return s & RCU_SEQ_STATE_MASK; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Set the state portion of the pointed-to sequence number. |
| 85 | * The caller is responsible for preventing conflicting updates. |
| 86 | */ |
| 87 | static inline void rcu_seq_set_state(unsigned long *sp, int newstate) |
| 88 | { |
| 89 | WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); |
| 90 | WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); |
| 91 | } |
| 92 | |
| 93 | /* Adjust sequence number for start of update-side operation. */ |
| 94 | static inline void rcu_seq_start(unsigned long *sp) |
| 95 | { |
| 96 | WRITE_ONCE(*sp, *sp + 1); |
| 97 | smp_mb(); /* Ensure update-side operation after counter increment. */ |
| 98 | WARN_ON_ONCE(rcu_seq_state(*sp) != 1); |
| 99 | } |
| 100 | |
| 101 | /* Compute the end-of-grace-period value for the specified sequence number. */ |
| 102 | static inline unsigned long rcu_seq_endval(unsigned long *sp) |
| 103 | { |
| 104 | return (*sp | RCU_SEQ_STATE_MASK) + 1; |
| 105 | } |
| 106 | |
| 107 | /* Adjust sequence number for end of update-side operation. */ |
| 108 | static inline void rcu_seq_end(unsigned long *sp) |
| 109 | { |
| 110 | smp_mb(); /* Ensure update-side operation before counter increment. */ |
| 111 | WARN_ON_ONCE(!rcu_seq_state(*sp)); |
| 112 | WRITE_ONCE(*sp, rcu_seq_endval(sp)); |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * rcu_seq_snap - Take a snapshot of the update side's sequence number. |
| 117 | * |
| 118 | * This function returns the earliest value of the grace-period sequence number |
| 119 | * that will indicate that a full grace period has elapsed since the current |
| 120 | * time. Once the grace-period sequence number has reached this value, it will |
| 121 | * be safe to invoke all callbacks that have been registered prior to the |
| 122 | * current time. This value is the current grace-period number plus two to the |
| 123 | * power of the number of low-order bits reserved for state, then rounded up to |
| 124 | * the next value in which the state bits are all zero. |
| 125 | */ |
| 126 | static inline unsigned long rcu_seq_snap(unsigned long *sp) |
| 127 | { |
| 128 | unsigned long s; |
| 129 | |
| 130 | s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; |
| 131 | smp_mb(); /* Above access must not bleed into critical section. */ |
| 132 | return s; |
| 133 | } |
| 134 | |
| 135 | /* Return the current value the update side's sequence number, no ordering. */ |
| 136 | static inline unsigned long rcu_seq_current(unsigned long *sp) |
| 137 | { |
| 138 | return READ_ONCE(*sp); |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Given a snapshot from rcu_seq_snap(), determine whether or not the |
| 143 | * corresponding update-side operation has started. |
| 144 | */ |
| 145 | static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) |
| 146 | { |
| 147 | return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Given a snapshot from rcu_seq_snap(), determine whether or not a |
| 152 | * full update-side operation has occurred. |
| 153 | */ |
| 154 | static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) |
| 155 | { |
| 156 | return ULONG_CMP_GE(READ_ONCE(*sp), s); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Given a snapshot from rcu_seq_snap(), determine whether or not a |
| 161 | * full update-side operation has occurred, but do not allow the |
| 162 | * (ULONG_MAX / 2) safety-factor/guard-band. |
| 163 | * |
| 164 | * The token returned by get_state_synchronize_rcu_full() is based on |
| 165 | * rcu_state.gp_seq but it is tested in poll_state_synchronize_rcu_full() |
| 166 | * against the root rnp->gp_seq. Since rcu_seq_start() is first called |
| 167 | * on rcu_state.gp_seq and only later reflected on the root rnp->gp_seq, |
| 168 | * it is possible that rcu_seq_snap(rcu_state.gp_seq) returns 2 full grace |
| 169 | * periods ahead of the root rnp->gp_seq. To prevent false-positives with the |
| 170 | * full polling API that a wrap around instantly completed the GP, when nothing |
| 171 | * like that happened, adjust for the 2 GPs in the ULONG_CMP_LT(). |
| 172 | */ |
| 173 | static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s) |
| 174 | { |
| 175 | unsigned long cur_s = READ_ONCE(*sp); |
| 176 | |
| 177 | return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_GP)); |
| 178 | } |
| 179 | |
| 180 | /* |
| 181 | * Has a grace period completed since the time the old gp_seq was collected? |
| 182 | */ |
| 183 | static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) |
| 184 | { |
| 185 | return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * Has a grace period started since the time the old gp_seq was collected? |
| 190 | */ |
| 191 | static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) |
| 192 | { |
| 193 | return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, |
| 194 | new); |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Roughly how many full grace periods have elapsed between the collection |
| 199 | * of the two specified grace periods? |
| 200 | */ |
| 201 | static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) |
| 202 | { |
| 203 | unsigned long rnd_diff; |
| 204 | |
| 205 | if (old == new) |
| 206 | return 0; |
| 207 | /* |
| 208 | * Compute the number of grace periods (still shifted up), plus |
| 209 | * one if either of new and old is not an exact grace period. |
| 210 | */ |
| 211 | rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - |
| 212 | ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + |
| 213 | ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); |
| 214 | if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) |
| 215 | return 1; /* Definitely no grace period has elapsed. */ |
| 216 | return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; |
| 217 | } |
| 218 | |
| 219 | /* |
| 220 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally |
| 221 | * by call_rcu() and rcu callback execution, and are therefore not part |
| 222 | * of the RCU API. These are in rcupdate.h because they are used by all |
| 223 | * RCU implementations. |
| 224 | */ |
| 225 | |
| 226 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 227 | # define STATE_RCU_HEAD_READY 0 |
| 228 | # define STATE_RCU_HEAD_QUEUED 1 |
| 229 | |
| 230 | extern const struct debug_obj_descr rcuhead_debug_descr; |
| 231 | |
| 232 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
| 233 | { |
| 234 | int r1; |
| 235 | |
| 236 | r1 = debug_object_activate(addr: head, descr: &rcuhead_debug_descr); |
| 237 | debug_object_active_state(addr: head, descr: &rcuhead_debug_descr, |
| 238 | STATE_RCU_HEAD_READY, |
| 239 | STATE_RCU_HEAD_QUEUED); |
| 240 | return r1; |
| 241 | } |
| 242 | |
| 243 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
| 244 | { |
| 245 | debug_object_active_state(addr: head, descr: &rcuhead_debug_descr, |
| 246 | STATE_RCU_HEAD_QUEUED, |
| 247 | STATE_RCU_HEAD_READY); |
| 248 | debug_object_deactivate(addr: head, descr: &rcuhead_debug_descr); |
| 249 | } |
| 250 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 251 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
| 252 | { |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
| 257 | { |
| 258 | } |
| 259 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 260 | |
| 261 | static inline void debug_rcu_head_callback(struct rcu_head *rhp) |
| 262 | { |
| 263 | if (unlikely(!rhp->func)) |
| 264 | kmem_dump_obj(object: rhp); |
| 265 | } |
| 266 | |
| 267 | static inline bool rcu_barrier_cb_is_done(struct rcu_head *rhp) |
| 268 | { |
| 269 | return rhp->next == rhp; |
| 270 | } |
| 271 | |
| 272 | extern int rcu_cpu_stall_suppress_at_boot; |
| 273 | |
| 274 | static inline bool rcu_stall_is_suppressed_at_boot(void) |
| 275 | { |
| 276 | return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended(); |
| 277 | } |
| 278 | |
| 279 | extern int rcu_cpu_stall_notifiers; |
| 280 | |
| 281 | #ifdef CONFIG_RCU_STALL_COMMON |
| 282 | |
| 283 | extern int rcu_cpu_stall_ftrace_dump; |
| 284 | extern int rcu_cpu_stall_suppress; |
| 285 | extern int rcu_cpu_stall_timeout; |
| 286 | extern int rcu_exp_cpu_stall_timeout; |
| 287 | extern int rcu_cpu_stall_cputime; |
| 288 | extern bool rcu_exp_stall_task_details __read_mostly; |
| 289 | int rcu_jiffies_till_stall_check(void); |
| 290 | int rcu_exp_jiffies_till_stall_check(void); |
| 291 | |
| 292 | static inline bool rcu_stall_is_suppressed(void) |
| 293 | { |
| 294 | return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress; |
| 295 | } |
| 296 | |
| 297 | #define rcu_ftrace_dump_stall_suppress() \ |
| 298 | do { \ |
| 299 | if (!rcu_cpu_stall_suppress) \ |
| 300 | rcu_cpu_stall_suppress = 3; \ |
| 301 | } while (0) |
| 302 | |
| 303 | #define rcu_ftrace_dump_stall_unsuppress() \ |
| 304 | do { \ |
| 305 | if (rcu_cpu_stall_suppress == 3) \ |
| 306 | rcu_cpu_stall_suppress = 0; \ |
| 307 | } while (0) |
| 308 | |
| 309 | #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ |
| 310 | |
| 311 | static inline bool rcu_stall_is_suppressed(void) |
| 312 | { |
| 313 | return rcu_stall_is_suppressed_at_boot(); |
| 314 | } |
| 315 | #define rcu_ftrace_dump_stall_suppress() |
| 316 | #define rcu_ftrace_dump_stall_unsuppress() |
| 317 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
| 318 | |
| 319 | /* |
| 320 | * Strings used in tracepoints need to be exported via the |
| 321 | * tracing system such that tools like perf and trace-cmd can |
| 322 | * translate the string address pointers to actual text. |
| 323 | */ |
| 324 | #define TPS(x) tracepoint_string(x) |
| 325 | |
| 326 | /* |
| 327 | * Dump the ftrace buffer, but only one time per callsite per boot. |
| 328 | */ |
| 329 | #define rcu_ftrace_dump(oops_dump_mode) \ |
| 330 | do { \ |
| 331 | static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ |
| 332 | \ |
| 333 | if (!atomic_read(&___rfd_beenhere) && \ |
| 334 | !atomic_xchg(&___rfd_beenhere, 1)) { \ |
| 335 | tracing_off(); \ |
| 336 | rcu_ftrace_dump_stall_suppress(); \ |
| 337 | ftrace_dump(oops_dump_mode); \ |
| 338 | rcu_ftrace_dump_stall_unsuppress(); \ |
| 339 | } \ |
| 340 | } while (0) |
| 341 | |
| 342 | void rcu_early_boot_tests(void); |
| 343 | void rcu_test_sync_prims(void); |
| 344 | |
| 345 | /* |
| 346 | * This function really isn't for public consumption, but RCU is special in |
| 347 | * that context switches can allow the state machine to make progress. |
| 348 | */ |
| 349 | extern void resched_cpu(int cpu); |
| 350 | |
| 351 | #if !defined(CONFIG_TINY_RCU) |
| 352 | |
| 353 | #include <linux/rcu_node_tree.h> |
| 354 | |
| 355 | extern int rcu_num_lvls; |
| 356 | extern int num_rcu_lvl[]; |
| 357 | extern int rcu_num_nodes; |
| 358 | static bool rcu_fanout_exact; |
| 359 | static int rcu_fanout_leaf; |
| 360 | |
| 361 | /* |
| 362 | * Compute the per-level fanout, either using the exact fanout specified |
| 363 | * or balancing the tree, depending on the rcu_fanout_exact boot parameter. |
| 364 | */ |
| 365 | static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) |
| 366 | { |
| 367 | int i; |
| 368 | |
| 369 | for (i = 0; i < RCU_NUM_LVLS; i++) |
| 370 | levelspread[i] = INT_MIN; |
| 371 | if (rcu_fanout_exact) { |
| 372 | levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; |
| 373 | for (i = rcu_num_lvls - 2; i >= 0; i--) |
| 374 | levelspread[i] = RCU_FANOUT; |
| 375 | } else { |
| 376 | int ccur; |
| 377 | int cprv; |
| 378 | |
| 379 | cprv = nr_cpu_ids; |
| 380 | for (i = rcu_num_lvls - 1; i >= 0; i--) { |
| 381 | ccur = levelcnt[i]; |
| 382 | levelspread[i] = (cprv + ccur - 1) / ccur; |
| 383 | cprv = ccur; |
| 384 | } |
| 385 | } |
| 386 | } |
| 387 | |
| 388 | extern void rcu_init_geometry(void); |
| 389 | |
| 390 | /* Returns a pointer to the first leaf rcu_node structure. */ |
| 391 | #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) |
| 392 | |
| 393 | /* Is this rcu_node a leaf? */ |
| 394 | #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) |
| 395 | |
| 396 | /* Is this rcu_node the last leaf? */ |
| 397 | #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) |
| 398 | |
| 399 | /* |
| 400 | * Do a full breadth-first scan of the {s,}rcu_node structures for the |
| 401 | * specified state structure (for SRCU) or the only rcu_state structure |
| 402 | * (for RCU). |
| 403 | */ |
| 404 | #define _rcu_for_each_node_breadth_first(sp, rnp) \ |
| 405 | for ((rnp) = &(sp)->node[0]; \ |
| 406 | (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) |
| 407 | #define rcu_for_each_node_breadth_first(rnp) \ |
| 408 | _rcu_for_each_node_breadth_first(&rcu_state, rnp) |
| 409 | #define srcu_for_each_node_breadth_first(ssp, rnp) \ |
| 410 | _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp) |
| 411 | |
| 412 | /* |
| 413 | * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. |
| 414 | * Note that if there is a singleton rcu_node tree with but one rcu_node |
| 415 | * structure, this loop -will- visit the rcu_node structure. It is still |
| 416 | * a leaf node, even if it is also the root node. |
| 417 | */ |
| 418 | #define rcu_for_each_leaf_node(rnp) \ |
| 419 | for ((rnp) = rcu_first_leaf_node(); \ |
| 420 | (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) |
| 421 | |
| 422 | /* |
| 423 | * Iterate over all possible CPUs in a leaf RCU node. |
| 424 | */ |
| 425 | #define for_each_leaf_node_possible_cpu(rnp, cpu) \ |
| 426 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
| 427 | (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ |
| 428 | (cpu) <= rnp->grphi; \ |
| 429 | (cpu) = cpumask_next((cpu), cpu_possible_mask)) |
| 430 | |
| 431 | /* |
| 432 | * Iterate over all CPUs in a leaf RCU node's specified mask. |
| 433 | */ |
| 434 | #define rcu_find_next_bit(rnp, cpu, mask) \ |
| 435 | ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) |
| 436 | #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ |
| 437 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
| 438 | (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ |
| 439 | (cpu) <= rnp->grphi; \ |
| 440 | (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) |
| 441 | |
| 442 | #endif /* !defined(CONFIG_TINY_RCU) */ |
| 443 | |
| 444 | #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) |
| 445 | |
| 446 | /* |
| 447 | * Wrappers for the rcu_node::lock acquire and release. |
| 448 | * |
| 449 | * Because the rcu_nodes form a tree, the tree traversal locking will observe |
| 450 | * different lock values, this in turn means that an UNLOCK of one level |
| 451 | * followed by a LOCK of another level does not imply a full memory barrier; |
| 452 | * and most importantly transitivity is lost. |
| 453 | * |
| 454 | * In order to restore full ordering between tree levels, augment the regular |
| 455 | * lock acquire functions with smp_mb__after_unlock_lock(). |
| 456 | * |
| 457 | * As ->lock of struct rcu_node is a __private field, therefore one should use |
| 458 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. |
| 459 | */ |
| 460 | #define raw_spin_lock_rcu_node(p) \ |
| 461 | do { \ |
| 462 | raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ |
| 463 | smp_mb__after_unlock_lock(); \ |
| 464 | } while (0) |
| 465 | |
| 466 | #define raw_spin_unlock_rcu_node(p) \ |
| 467 | do { \ |
| 468 | lockdep_assert_irqs_disabled(); \ |
| 469 | raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \ |
| 470 | } while (0) |
| 471 | |
| 472 | #define raw_spin_lock_irq_rcu_node(p) \ |
| 473 | do { \ |
| 474 | raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ |
| 475 | smp_mb__after_unlock_lock(); \ |
| 476 | } while (0) |
| 477 | |
| 478 | #define raw_spin_unlock_irq_rcu_node(p) \ |
| 479 | do { \ |
| 480 | lockdep_assert_irqs_disabled(); \ |
| 481 | raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \ |
| 482 | } while (0) |
| 483 | |
| 484 | #define raw_spin_lock_irqsave_rcu_node(p, flags) \ |
| 485 | do { \ |
| 486 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
| 487 | smp_mb__after_unlock_lock(); \ |
| 488 | } while (0) |
| 489 | |
| 490 | #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ |
| 491 | do { \ |
| 492 | lockdep_assert_irqs_disabled(); \ |
| 493 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \ |
| 494 | } while (0) |
| 495 | |
| 496 | #define raw_spin_trylock_rcu_node(p) \ |
| 497 | ({ \ |
| 498 | bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ |
| 499 | \ |
| 500 | if (___locked) \ |
| 501 | smp_mb__after_unlock_lock(); \ |
| 502 | ___locked; \ |
| 503 | }) |
| 504 | |
| 505 | #define raw_lockdep_assert_held_rcu_node(p) \ |
| 506 | lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) |
| 507 | |
| 508 | #endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) |
| 509 | |
| 510 | #ifdef CONFIG_TINY_RCU |
| 511 | /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ |
| 512 | static inline bool rcu_gp_is_normal(void) { return true; } |
| 513 | static inline bool rcu_gp_is_expedited(void) { return false; } |
| 514 | static inline bool rcu_async_should_hurry(void) { return false; } |
| 515 | static inline void rcu_expedite_gp(void) { } |
| 516 | static inline void rcu_unexpedite_gp(void) { } |
| 517 | static inline void rcu_async_hurry(void) { } |
| 518 | static inline void rcu_async_relax(void) { } |
| 519 | static inline bool rcu_cpu_online(int cpu) { return true; } |
| 520 | #else /* #ifdef CONFIG_TINY_RCU */ |
| 521 | bool rcu_gp_is_normal(void); /* Internal RCU use. */ |
| 522 | bool rcu_gp_is_expedited(void); /* Internal RCU use. */ |
| 523 | bool rcu_async_should_hurry(void); /* Internal RCU use. */ |
| 524 | void rcu_expedite_gp(void); |
| 525 | void rcu_unexpedite_gp(void); |
| 526 | void rcu_async_hurry(void); |
| 527 | void rcu_async_relax(void); |
| 528 | void rcupdate_announce_bootup_oddness(void); |
| 529 | bool rcu_cpu_online(int cpu); |
| 530 | #ifdef CONFIG_TASKS_RCU_GENERIC |
| 531 | void show_rcu_tasks_gp_kthreads(void); |
| 532 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 533 | static inline void show_rcu_tasks_gp_kthreads(void) {} |
| 534 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 535 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
| 536 | |
| 537 | #ifdef CONFIG_TASKS_RCU |
| 538 | struct task_struct *get_rcu_tasks_gp_kthread(void); |
| 539 | void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq); |
| 540 | #endif // # ifdef CONFIG_TASKS_RCU |
| 541 | |
| 542 | #ifdef CONFIG_TASKS_RUDE_RCU |
| 543 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void); |
| 544 | void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq); |
| 545 | #endif // # ifdef CONFIG_TASKS_RUDE_RCU |
| 546 | |
| 547 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 548 | void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq); |
| 549 | #endif |
| 550 | |
| 551 | #ifdef CONFIG_TASKS_RCU_GENERIC |
| 552 | void tasks_cblist_init_generic(void); |
| 553 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 554 | static inline void tasks_cblist_init_generic(void) { } |
| 555 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 556 | |
| 557 | #define RCU_SCHEDULER_INACTIVE 0 |
| 558 | #define RCU_SCHEDULER_INIT 1 |
| 559 | #define RCU_SCHEDULER_RUNNING 2 |
| 560 | |
| 561 | enum rcutorture_type { |
| 562 | RCU_FLAVOR, |
| 563 | RCU_TASKS_FLAVOR, |
| 564 | RCU_TASKS_RUDE_FLAVOR, |
| 565 | RCU_TASKS_TRACING_FLAVOR, |
| 566 | RCU_TRIVIAL_FLAVOR, |
| 567 | SRCU_FLAVOR, |
| 568 | INVALID_RCU_FLAVOR |
| 569 | }; |
| 570 | |
| 571 | #if defined(CONFIG_RCU_LAZY) |
| 572 | unsigned long rcu_get_jiffies_lazy_flush(void); |
| 573 | void rcu_set_jiffies_lazy_flush(unsigned long j); |
| 574 | #else |
| 575 | static inline unsigned long rcu_get_jiffies_lazy_flush(void) { return 0; } |
| 576 | static inline void rcu_set_jiffies_lazy_flush(unsigned long j) { } |
| 577 | #endif |
| 578 | |
| 579 | #if defined(CONFIG_TREE_RCU) |
| 580 | void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq); |
| 581 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 582 | struct rcu_head *rhp, |
| 583 | unsigned long secs, |
| 584 | unsigned long c_old, |
| 585 | unsigned long c); |
| 586 | void rcu_gp_set_torture_wait(int duration); |
| 587 | void rcu_set_gpwrap_lag(unsigned long lag); |
| 588 | int rcu_get_gpwrap_count(int cpu); |
| 589 | #else |
| 590 | static inline void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq) |
| 591 | { |
| 592 | *flags = 0; |
| 593 | *gp_seq = 0; |
| 594 | } |
| 595 | #ifdef CONFIG_RCU_TRACE |
| 596 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 597 | struct rcu_head *rhp, |
| 598 | unsigned long secs, |
| 599 | unsigned long c_old, |
| 600 | unsigned long c); |
| 601 | #else |
| 602 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
| 603 | do { } while (0) |
| 604 | #endif |
| 605 | static inline void rcu_gp_set_torture_wait(int duration) { } |
| 606 | static inline void rcu_set_gpwrap_lag(unsigned long lag) { } |
| 607 | static inline int rcu_get_gpwrap_count(int cpu) { return 0; } |
| 608 | #endif |
| 609 | unsigned long long rcutorture_gather_gp_seqs(void); |
| 610 | void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len); |
| 611 | |
| 612 | #ifdef CONFIG_TINY_SRCU |
| 613 | |
| 614 | static inline void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags, |
| 615 | unsigned long *gp_seq) |
| 616 | { |
| 617 | *flags = 0; |
| 618 | *gp_seq = sp->srcu_idx; |
| 619 | } |
| 620 | |
| 621 | #elif defined(CONFIG_TREE_SRCU) |
| 622 | |
| 623 | void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags, |
| 624 | unsigned long *gp_seq); |
| 625 | |
| 626 | #endif |
| 627 | |
| 628 | #ifdef CONFIG_TINY_RCU |
| 629 | static inline bool rcu_watching_zero_in_eqs(int cpu, int *vp) { return false; } |
| 630 | static inline unsigned long rcu_get_gp_seq(void) { return 0; } |
| 631 | static inline unsigned long rcu_exp_batches_completed(void) { return 0; } |
| 632 | static inline void rcu_force_quiescent_state(void) { } |
| 633 | static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } |
| 634 | static inline void show_rcu_gp_kthreads(void) { } |
| 635 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } |
| 636 | static inline void rcu_fwd_progress_check(unsigned long j) { } |
| 637 | static inline void rcu_gp_slow_register(atomic_t *rgssp) { } |
| 638 | static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { } |
| 639 | #else /* #ifdef CONFIG_TINY_RCU */ |
| 640 | bool rcu_watching_zero_in_eqs(int cpu, int *vp); |
| 641 | unsigned long rcu_get_gp_seq(void); |
| 642 | unsigned long rcu_exp_batches_completed(void); |
| 643 | bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); |
| 644 | void show_rcu_gp_kthreads(void); |
| 645 | int rcu_get_gp_kthreads_prio(void); |
| 646 | void rcu_fwd_progress_check(unsigned long j); |
| 647 | void rcu_force_quiescent_state(void); |
| 648 | extern struct workqueue_struct *rcu_gp_wq; |
| 649 | extern struct kthread_worker *rcu_exp_gp_kworker; |
| 650 | void rcu_gp_slow_register(atomic_t *rgssp); |
| 651 | void rcu_gp_slow_unregister(atomic_t *rgssp); |
| 652 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
| 653 | |
| 654 | #ifdef CONFIG_TINY_SRCU |
| 655 | static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) { return 0; } |
| 656 | #else // #ifdef CONFIG_TINY_SRCU |
| 657 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
| 658 | #endif // #else // #ifdef CONFIG_TINY_SRCU |
| 659 | |
| 660 | #ifdef CONFIG_RCU_NOCB_CPU |
| 661 | void rcu_bind_current_to_nocb(void); |
| 662 | #else |
| 663 | static inline void rcu_bind_current_to_nocb(void) { } |
| 664 | #endif |
| 665 | |
| 666 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU) |
| 667 | void show_rcu_tasks_classic_gp_kthread(void); |
| 668 | #else |
| 669 | static inline void show_rcu_tasks_classic_gp_kthread(void) {} |
| 670 | #endif |
| 671 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU) |
| 672 | void show_rcu_tasks_rude_gp_kthread(void); |
| 673 | #else |
| 674 | static inline void show_rcu_tasks_rude_gp_kthread(void) {} |
| 675 | #endif |
| 676 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) |
| 677 | void show_rcu_tasks_trace_gp_kthread(void); |
| 678 | #else |
| 679 | static inline void show_rcu_tasks_trace_gp_kthread(void) {} |
| 680 | #endif |
| 681 | |
| 682 | #ifdef CONFIG_TINY_RCU |
| 683 | static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; } |
| 684 | #else |
| 685 | bool rcu_cpu_beenfullyonline(int cpu); |
| 686 | #endif |
| 687 | |
| 688 | #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
| 689 | int rcu_stall_notifier_call_chain(unsigned long val, void *v); |
| 690 | #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
| 691 | static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; } |
| 692 | #endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
| 693 | |
| 694 | #endif /* __LINUX_RCU_H */ |
| 695 | |