| 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Read-Copy Update module-based torture test facility |
| 4 | * |
| 5 | * Copyright (C) IBM Corporation, 2005, 2006 |
| 6 | * |
| 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
| 8 | * Josh Triplett <josh@joshtriplett.org> |
| 9 | * |
| 10 | * See also: Documentation/RCU/torture.rst |
| 11 | */ |
| 12 | |
| 13 | #define pr_fmt(fmt) fmt |
| 14 | |
| 15 | #include <linux/types.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kthread.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/smp.h> |
| 23 | #include <linux/rcupdate_wait.h> |
| 24 | #include <linux/rcu_notifier.h> |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/sched/signal.h> |
| 27 | #include <uapi/linux/sched/types.h> |
| 28 | #include <linux/atomic.h> |
| 29 | #include <linux/bitops.h> |
| 30 | #include <linux/completion.h> |
| 31 | #include <linux/moduleparam.h> |
| 32 | #include <linux/percpu.h> |
| 33 | #include <linux/notifier.h> |
| 34 | #include <linux/reboot.h> |
| 35 | #include <linux/freezer.h> |
| 36 | #include <linux/cpu.h> |
| 37 | #include <linux/delay.h> |
| 38 | #include <linux/stat.h> |
| 39 | #include <linux/srcu.h> |
| 40 | #include <linux/slab.h> |
| 41 | #include <linux/trace_clock.h> |
| 42 | #include <asm/byteorder.h> |
| 43 | #include <linux/torture.h> |
| 44 | #include <linux/vmalloc.h> |
| 45 | #include <linux/sched/debug.h> |
| 46 | #include <linux/sched/sysctl.h> |
| 47 | #include <linux/oom.h> |
| 48 | #include <linux/tick.h> |
| 49 | #include <linux/rcupdate_trace.h> |
| 50 | #include <linux/nmi.h> |
| 51 | |
| 52 | #include "rcu.h" |
| 53 | |
| 54 | MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility" ); |
| 55 | MODULE_LICENSE("GPL" ); |
| 56 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>" ); |
| 57 | |
| 58 | // Bits for ->extendables field, extendables param, and related definitions. |
| 59 | #define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits. |
| 60 | #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1) |
| 61 | #define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits. |
| 62 | #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2) |
| 63 | #define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh. |
| 64 | #define RCUTORTURE_RDR_IRQ 0x02 // ... disabling interrupts. |
| 65 | #define RCUTORTURE_RDR_PREEMPT 0x04 // ... disabling preemption. |
| 66 | #define RCUTORTURE_RDR_RBH 0x08 // ... rcu_read_lock_bh(). |
| 67 | #define RCUTORTURE_RDR_SCHED 0x10 // ... rcu_read_lock_sched(). |
| 68 | #define RCUTORTURE_RDR_RCU_1 0x20 // ... entering another RCU reader. |
| 69 | #define RCUTORTURE_RDR_RCU_2 0x40 // ... entering another RCU reader. |
| 70 | #define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer. |
| 71 | // Note: Manual start, automatic end. |
| 72 | #define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above. |
| 73 | #define RCUTORTURE_MAX_EXTEND \ |
| 74 | (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ |
| 75 | RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) // Intentionally omit RCUTORTURE_RDR_UPDOWN. |
| 76 | #define RCUTORTURE_RDR_ALLBITS \ |
| 77 | (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \ |
| 78 | RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2) |
| 79 | #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ |
| 80 | /* Must be power of two minus one. */ |
| 81 | #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) |
| 82 | |
| 83 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, |
| 84 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)" ); |
| 85 | torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable" ); |
| 86 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)" ); |
| 87 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)" ); |
| 88 | torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)" ); |
| 89 | torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait" ); |
| 90 | torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)" ); |
| 91 | torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()" ); |
| 92 | torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives" ); |
| 93 | torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives" ); |
| 94 | torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives" ); |
| 95 | torture_param(bool, gp_cond_exp_full, false, |
| 96 | "Use conditional/async full-stateexpedited GP wait primitives" ); |
| 97 | torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ, |
| 98 | "Wait interval for normal conditional grace periods, us (default 16 jiffies)" ); |
| 99 | torture_param(int, gp_cond_wi_exp, 128, |
| 100 | "Wait interval for expedited conditional grace periods, us (default 128 us)" ); |
| 101 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives" ); |
| 102 | torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives" ); |
| 103 | torture_param(bool, gp_poll, false, "Use polling GP wait primitives" ); |
| 104 | torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives" ); |
| 105 | torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives" ); |
| 106 | torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives" ); |
| 107 | torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ, |
| 108 | "Wait interval for normal polled grace periods, us (default 16 jiffies)" ); |
| 109 | torture_param(int, gp_poll_wi_exp, 128, |
| 110 | "Wait interval for expedited polled grace periods, us (default 128 us)" ); |
| 111 | torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives" ); |
| 112 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers" ); |
| 113 | torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers" ); |
| 114 | torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing" ); |
| 115 | torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers" ); |
| 116 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads" ); |
| 117 | torture_param(int, nreaders, -1, "Number of RCU reader threads" ); |
| 118 | torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing" ); |
| 119 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)" ); |
| 120 | torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable" ); |
| 121 | torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing" ); |
| 122 | torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period." ); |
| 123 | torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)" ); |
| 124 | torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)" ); |
| 125 | torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable" ); |
| 126 | torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)" ); |
| 127 | torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable" ); |
| 128 | torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)" ); |
| 129 | torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)" ); |
| 130 | torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable" ); |
| 131 | torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit." ); |
| 132 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles" ); |
| 133 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable." ); |
| 134 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable." ); |
| 135 | torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)." ); |
| 136 | torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall." ); |
| 137 | torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling." ); |
| 138 | torture_param(int, stall_cpu_block, 0, "Sleep while stalling." ); |
| 139 | torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one." ); |
| 140 | torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)." ); |
| 141 | torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s" ); |
| 142 | torture_param(int, stutter, 5, "Number of seconds to run/halt test" ); |
| 143 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes." ); |
| 144 | torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds." ); |
| 145 | torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds." ); |
| 146 | torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds." ); |
| 147 | torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable." ); |
| 148 | torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs" ); |
| 149 | torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario." ); |
| 150 | torture_param(int, verbose, 1, "Enable verbose debugging printk()s" ); |
| 151 | |
| 152 | static char *torture_type = "rcu" ; |
| 153 | module_param(torture_type, charp, 0444); |
| 154 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)" ); |
| 155 | |
| 156 | static int nrealnocbers; |
| 157 | static int nrealreaders; |
| 158 | static int nrealfakewriters; |
| 159 | static struct task_struct *writer_task; |
| 160 | static struct task_struct **fakewriter_tasks; |
| 161 | static struct task_struct **reader_tasks; |
| 162 | static struct task_struct *updown_task; |
| 163 | static struct task_struct **nocb_tasks; |
| 164 | static struct task_struct *stats_task; |
| 165 | static struct task_struct *fqs_task; |
| 166 | static struct task_struct *boost_tasks[NR_CPUS]; |
| 167 | static struct task_struct *stall_task; |
| 168 | static struct task_struct **fwd_prog_tasks; |
| 169 | static struct task_struct **barrier_cbs_tasks; |
| 170 | static struct task_struct *barrier_task; |
| 171 | static struct task_struct *read_exit_task; |
| 172 | static struct task_struct *preempt_task; |
| 173 | |
| 174 | #define RCU_TORTURE_PIPE_LEN 10 |
| 175 | |
| 176 | // Mailbox-like structure to check RCU global memory ordering. |
| 177 | struct rcu_torture_reader_check { |
| 178 | unsigned long rtc_myloops; |
| 179 | int rtc_chkrdr; |
| 180 | unsigned long rtc_chkloops; |
| 181 | int rtc_ready; |
| 182 | struct rcu_torture_reader_check *rtc_assigner; |
| 183 | } ____cacheline_internodealigned_in_smp; |
| 184 | |
| 185 | // Update-side data structure used to check RCU readers. |
| 186 | struct rcu_torture { |
| 187 | struct rcu_head rtort_rcu; |
| 188 | int rtort_pipe_count; |
| 189 | struct list_head rtort_free; |
| 190 | int rtort_mbtest; |
| 191 | struct rcu_torture_reader_check *rtort_chkp; |
| 192 | }; |
| 193 | |
| 194 | static LIST_HEAD(rcu_torture_freelist); |
| 195 | static struct rcu_torture __rcu *rcu_torture_current; |
| 196 | static unsigned long rcu_torture_current_version; |
| 197 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 198 | static DEFINE_SPINLOCK(rcu_torture_lock); |
| 199 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); |
| 200 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); |
| 201 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
| 202 | static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; |
| 203 | static atomic_t n_rcu_torture_alloc; |
| 204 | static atomic_t n_rcu_torture_alloc_fail; |
| 205 | static atomic_t n_rcu_torture_free; |
| 206 | static atomic_t n_rcu_torture_mberror; |
| 207 | static atomic_t n_rcu_torture_mbchk_fail; |
| 208 | static atomic_t n_rcu_torture_mbchk_tries; |
| 209 | static atomic_t n_rcu_torture_error; |
| 210 | static long n_rcu_torture_barrier_error; |
| 211 | static long n_rcu_torture_boost_ktrerror; |
| 212 | static long n_rcu_torture_boost_failure; |
| 213 | static long n_rcu_torture_boosts; |
| 214 | static atomic_long_t n_rcu_torture_timers; |
| 215 | static long n_barrier_attempts; |
| 216 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ |
| 217 | static unsigned long n_read_exits; |
| 218 | static struct list_head rcu_torture_removed; |
| 219 | static unsigned long shutdown_jiffies; |
| 220 | static unsigned long start_gp_seq; |
| 221 | static atomic_long_t n_nocb_offload; |
| 222 | static atomic_long_t n_nocb_deoffload; |
| 223 | |
| 224 | static int rcu_torture_writer_state; |
| 225 | #define RTWS_FIXED_DELAY 0 |
| 226 | #define RTWS_DELAY 1 |
| 227 | #define RTWS_REPLACE 2 |
| 228 | #define RTWS_DEF_FREE 3 |
| 229 | #define RTWS_EXP_SYNC 4 |
| 230 | #define RTWS_COND_GET 5 |
| 231 | #define RTWS_COND_GET_FULL 6 |
| 232 | #define RTWS_COND_GET_EXP 7 |
| 233 | #define RTWS_COND_GET_EXP_FULL 8 |
| 234 | #define RTWS_COND_SYNC 9 |
| 235 | #define RTWS_COND_SYNC_FULL 10 |
| 236 | #define RTWS_COND_SYNC_EXP 11 |
| 237 | #define RTWS_COND_SYNC_EXP_FULL 12 |
| 238 | #define RTWS_POLL_GET 13 |
| 239 | #define RTWS_POLL_GET_FULL 14 |
| 240 | #define RTWS_POLL_GET_EXP 15 |
| 241 | #define RTWS_POLL_GET_EXP_FULL 16 |
| 242 | #define RTWS_POLL_WAIT 17 |
| 243 | #define RTWS_POLL_WAIT_FULL 18 |
| 244 | #define RTWS_POLL_WAIT_EXP 19 |
| 245 | #define RTWS_POLL_WAIT_EXP_FULL 20 |
| 246 | #define RTWS_SYNC 21 |
| 247 | #define RTWS_STUTTER 22 |
| 248 | #define RTWS_STOPPING 23 |
| 249 | static const char * const rcu_torture_writer_state_names[] = { |
| 250 | "RTWS_FIXED_DELAY" , |
| 251 | "RTWS_DELAY" , |
| 252 | "RTWS_REPLACE" , |
| 253 | "RTWS_DEF_FREE" , |
| 254 | "RTWS_EXP_SYNC" , |
| 255 | "RTWS_COND_GET" , |
| 256 | "RTWS_COND_GET_FULL" , |
| 257 | "RTWS_COND_GET_EXP" , |
| 258 | "RTWS_COND_GET_EXP_FULL" , |
| 259 | "RTWS_COND_SYNC" , |
| 260 | "RTWS_COND_SYNC_FULL" , |
| 261 | "RTWS_COND_SYNC_EXP" , |
| 262 | "RTWS_COND_SYNC_EXP_FULL" , |
| 263 | "RTWS_POLL_GET" , |
| 264 | "RTWS_POLL_GET_FULL" , |
| 265 | "RTWS_POLL_GET_EXP" , |
| 266 | "RTWS_POLL_GET_EXP_FULL" , |
| 267 | "RTWS_POLL_WAIT" , |
| 268 | "RTWS_POLL_WAIT_FULL" , |
| 269 | "RTWS_POLL_WAIT_EXP" , |
| 270 | "RTWS_POLL_WAIT_EXP_FULL" , |
| 271 | "RTWS_SYNC" , |
| 272 | "RTWS_STUTTER" , |
| 273 | "RTWS_STOPPING" , |
| 274 | }; |
| 275 | |
| 276 | /* Record reader segment types and duration for first failing read. */ |
| 277 | struct rt_read_seg { |
| 278 | int rt_readstate; |
| 279 | unsigned long rt_delay_jiffies; |
| 280 | unsigned long rt_delay_ms; |
| 281 | unsigned long rt_delay_us; |
| 282 | bool rt_preempted; |
| 283 | int rt_cpu; |
| 284 | int rt_end_cpu; |
| 285 | unsigned long long rt_gp_seq; |
| 286 | unsigned long long rt_gp_seq_end; |
| 287 | u64 rt_ts; |
| 288 | }; |
| 289 | static int err_segs_recorded; |
| 290 | static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; |
| 291 | static int rt_read_nsegs; |
| 292 | static int rt_read_preempted; |
| 293 | |
| 294 | static const char *rcu_torture_writer_state_getname(void) |
| 295 | { |
| 296 | unsigned int i = READ_ONCE(rcu_torture_writer_state); |
| 297 | |
| 298 | if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) |
| 299 | return "???" ; |
| 300 | return rcu_torture_writer_state_names[i]; |
| 301 | } |
| 302 | |
| 303 | #ifdef CONFIG_RCU_TRACE |
| 304 | static u64 notrace rcu_trace_clock_local(void) |
| 305 | { |
| 306 | u64 ts = trace_clock_local(); |
| 307 | |
| 308 | (void)do_div(ts, NSEC_PER_USEC); |
| 309 | return ts; |
| 310 | } |
| 311 | #else /* #ifdef CONFIG_RCU_TRACE */ |
| 312 | static u64 notrace rcu_trace_clock_local(void) |
| 313 | { |
| 314 | return 0ULL; |
| 315 | } |
| 316 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 317 | |
| 318 | /* |
| 319 | * Stop aggressive CPU-hog tests a bit before the end of the test in order |
| 320 | * to avoid interfering with test shutdown. |
| 321 | */ |
| 322 | static bool shutdown_time_arrived(void) |
| 323 | { |
| 324 | return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); |
| 325 | } |
| 326 | |
| 327 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
| 328 | static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
| 329 | /* and boost task create/destroy. */ |
| 330 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ |
| 331 | static bool barrier_phase; /* Test phase. */ |
| 332 | static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ |
| 333 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
| 334 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
| 335 | |
| 336 | static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ |
| 337 | |
| 338 | /* |
| 339 | * Allocate an element from the rcu_tortures pool. |
| 340 | */ |
| 341 | static struct rcu_torture * |
| 342 | rcu_torture_alloc(void) |
| 343 | { |
| 344 | struct list_head *p; |
| 345 | |
| 346 | spin_lock_bh(lock: &rcu_torture_lock); |
| 347 | if (list_empty(head: &rcu_torture_freelist)) { |
| 348 | atomic_inc(v: &n_rcu_torture_alloc_fail); |
| 349 | spin_unlock_bh(lock: &rcu_torture_lock); |
| 350 | return NULL; |
| 351 | } |
| 352 | atomic_inc(v: &n_rcu_torture_alloc); |
| 353 | p = rcu_torture_freelist.next; |
| 354 | list_del_init(entry: p); |
| 355 | spin_unlock_bh(lock: &rcu_torture_lock); |
| 356 | return container_of(p, struct rcu_torture, rtort_free); |
| 357 | } |
| 358 | |
| 359 | /* |
| 360 | * Free an element to the rcu_tortures pool. |
| 361 | */ |
| 362 | static void |
| 363 | rcu_torture_free(struct rcu_torture *p) |
| 364 | { |
| 365 | atomic_inc(v: &n_rcu_torture_free); |
| 366 | spin_lock_bh(lock: &rcu_torture_lock); |
| 367 | list_add_tail(new: &p->rtort_free, head: &rcu_torture_freelist); |
| 368 | spin_unlock_bh(lock: &rcu_torture_lock); |
| 369 | } |
| 370 | |
| 371 | /* |
| 372 | * Operations vector for selecting different types of tests. |
| 373 | */ |
| 374 | |
| 375 | struct rcu_torture_ops { |
| 376 | int ttype; |
| 377 | void (*init)(void); |
| 378 | void (*cleanup)(void); |
| 379 | int (*readlock)(void); |
| 380 | void (*read_delay)(struct torture_random_state *rrsp, |
| 381 | struct rt_read_seg *rtrsp); |
| 382 | void (*readunlock)(int idx); |
| 383 | int (*readlock_held)(void); // lockdep. |
| 384 | int (*readlock_nesting)(void); // actual nesting, if available, -1 if not. |
| 385 | int (*down_read)(void); |
| 386 | void (*up_read)(int idx); |
| 387 | unsigned long (*get_gp_seq)(void); |
| 388 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
| 389 | void (*deferred_free)(struct rcu_torture *p); |
| 390 | void (*sync)(void); |
| 391 | void (*exp_sync)(void); |
| 392 | void (*exp_current)(void); |
| 393 | unsigned long (*get_gp_state_exp)(void); |
| 394 | unsigned long (*start_gp_poll_exp)(void); |
| 395 | void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); |
| 396 | bool (*poll_gp_state_exp)(unsigned long oldstate); |
| 397 | void (*cond_sync_exp)(unsigned long oldstate); |
| 398 | void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); |
| 399 | unsigned long (*get_comp_state)(void); |
| 400 | void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); |
| 401 | bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); |
| 402 | bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); |
| 403 | unsigned long (*get_gp_state)(void); |
| 404 | void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); |
| 405 | unsigned long (*start_gp_poll)(void); |
| 406 | void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); |
| 407 | bool (*poll_gp_state)(unsigned long oldstate); |
| 408 | bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); |
| 409 | bool (*poll_need_2gp)(bool poll, bool poll_full); |
| 410 | void (*cond_sync)(unsigned long oldstate); |
| 411 | void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); |
| 412 | int poll_active; |
| 413 | int poll_active_full; |
| 414 | call_rcu_func_t call; |
| 415 | void (*cb_barrier)(void); |
| 416 | void (*fqs)(void); |
| 417 | void (*stats)(void); |
| 418 | void (*gp_kthread_dbg)(void); |
| 419 | bool (*check_boost_failed)(unsigned long gp_state, int *cpup); |
| 420 | int (*stall_dur)(void); |
| 421 | void (*get_gp_data)(int *flags, unsigned long *gp_seq); |
| 422 | void (*gp_slow_register)(atomic_t *rgssp); |
| 423 | void (*gp_slow_unregister)(atomic_t *rgssp); |
| 424 | bool (*reader_blocked)(void); |
| 425 | unsigned long long (*gather_gp_seqs)(void); |
| 426 | void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len); |
| 427 | void (*set_gpwrap_lag)(unsigned long lag); |
| 428 | int (*get_gpwrap_count)(int cpu); |
| 429 | long cbflood_max; |
| 430 | int irq_capable; |
| 431 | int can_boost; |
| 432 | int extendables; |
| 433 | int slow_gps; |
| 434 | int no_pi_lock; |
| 435 | int debug_objects; |
| 436 | int start_poll_irqsoff; |
| 437 | int have_up_down; |
| 438 | const char *name; |
| 439 | }; |
| 440 | |
| 441 | static struct rcu_torture_ops *cur_ops; |
| 442 | |
| 443 | /* |
| 444 | * Definitions for rcu torture testing. |
| 445 | */ |
| 446 | |
| 447 | static int torture_readlock_not_held(void) |
| 448 | { |
| 449 | return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); |
| 450 | } |
| 451 | |
| 452 | static int rcu_torture_read_lock(void) |
| 453 | { |
| 454 | rcu_read_lock(); |
| 455 | return 0; |
| 456 | } |
| 457 | |
| 458 | static void |
| 459 | rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
| 460 | { |
| 461 | unsigned long started; |
| 462 | unsigned long completed; |
| 463 | const unsigned long shortdelay_us = 200; |
| 464 | unsigned long longdelay_ms = 300; |
| 465 | unsigned long long ts; |
| 466 | |
| 467 | /* We want a short delay sometimes to make a reader delay the grace |
| 468 | * period, and we want a long delay occasionally to trigger |
| 469 | * force_quiescent_state. */ |
| 470 | |
| 471 | if (!atomic_read(v: &rcu_fwd_cb_nodelay) && |
| 472 | !(torture_random(trsp: rrsp) % (nrealreaders * 2000 * longdelay_ms))) { |
| 473 | started = cur_ops->get_gp_seq(); |
| 474 | ts = rcu_trace_clock_local(); |
| 475 | if ((preempt_count() & HARDIRQ_MASK) || softirq_count()) |
| 476 | longdelay_ms = 5; /* Avoid triggering BH limits. */ |
| 477 | mdelay(longdelay_ms); |
| 478 | rtrsp->rt_delay_ms = longdelay_ms; |
| 479 | completed = cur_ops->get_gp_seq(); |
| 480 | do_trace_rcu_torture_read(rcutorturename: cur_ops->name, NULL, secs: ts, |
| 481 | c_old: started, c: completed); |
| 482 | } |
| 483 | if (!(torture_random(trsp: rrsp) % (nrealreaders * 2 * shortdelay_us))) { |
| 484 | udelay(usec: shortdelay_us); |
| 485 | rtrsp->rt_delay_us = shortdelay_us; |
| 486 | } |
| 487 | if (!preempt_count() && |
| 488 | !(torture_random(trsp: rrsp) % (nrealreaders * 500))) |
| 489 | torture_preempt_schedule(); /* QS only if preemptible. */ |
| 490 | } |
| 491 | |
| 492 | static void rcu_torture_read_unlock(int idx) |
| 493 | { |
| 494 | rcu_read_unlock(); |
| 495 | } |
| 496 | |
| 497 | static int rcu_torture_readlock_nesting(void) |
| 498 | { |
| 499 | if (IS_ENABLED(CONFIG_PREEMPT_RCU)) |
| 500 | return rcu_preempt_depth(); |
| 501 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
| 502 | return (preempt_count() & PREEMPT_MASK); |
| 503 | return -1; |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * Update callback in the pipe. This should be invoked after a grace period. |
| 508 | */ |
| 509 | static bool |
| 510 | rcu_torture_pipe_update_one(struct rcu_torture *rp) |
| 511 | { |
| 512 | int i; |
| 513 | struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); |
| 514 | |
| 515 | if (rtrcp) { |
| 516 | WRITE_ONCE(rp->rtort_chkp, NULL); |
| 517 | smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). |
| 518 | } |
| 519 | i = rp->rtort_pipe_count; |
| 520 | if (i > RCU_TORTURE_PIPE_LEN) |
| 521 | i = RCU_TORTURE_PIPE_LEN; |
| 522 | atomic_inc(v: &rcu_torture_wcount[i]); |
| 523 | WRITE_ONCE(rp->rtort_pipe_count, i + 1); |
| 524 | ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); |
| 525 | if (i + 1 >= RCU_TORTURE_PIPE_LEN) { |
| 526 | rp->rtort_mbtest = 0; |
| 527 | return true; |
| 528 | } |
| 529 | return false; |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * Update all callbacks in the pipe. Suitable for synchronous grace-period |
| 534 | * primitives. |
| 535 | */ |
| 536 | static void |
| 537 | rcu_torture_pipe_update(struct rcu_torture *old_rp) |
| 538 | { |
| 539 | struct rcu_torture *rp; |
| 540 | struct rcu_torture *rp1; |
| 541 | |
| 542 | if (old_rp) |
| 543 | list_add(new: &old_rp->rtort_free, head: &rcu_torture_removed); |
| 544 | list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { |
| 545 | if (rcu_torture_pipe_update_one(rp)) { |
| 546 | list_del(entry: &rp->rtort_free); |
| 547 | rcu_torture_free(p: rp); |
| 548 | } |
| 549 | } |
| 550 | } |
| 551 | |
| 552 | static void |
| 553 | rcu_torture_cb(struct rcu_head *p) |
| 554 | { |
| 555 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
| 556 | |
| 557 | if (torture_must_stop_irq()) { |
| 558 | /* Test is ending, just drop callbacks on the floor. */ |
| 559 | /* The next initialization will pick up the pieces. */ |
| 560 | return; |
| 561 | } |
| 562 | if (rcu_torture_pipe_update_one(rp)) |
| 563 | rcu_torture_free(p: rp); |
| 564 | else |
| 565 | cur_ops->deferred_free(rp); |
| 566 | } |
| 567 | |
| 568 | static unsigned long rcu_no_completed(void) |
| 569 | { |
| 570 | return 0; |
| 571 | } |
| 572 | |
| 573 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
| 574 | { |
| 575 | call_rcu_hurry(head: &p->rtort_rcu, func: rcu_torture_cb); |
| 576 | } |
| 577 | |
| 578 | static void rcu_sync_torture_init(void) |
| 579 | { |
| 580 | INIT_LIST_HEAD(list: &rcu_torture_removed); |
| 581 | } |
| 582 | |
| 583 | static bool rcu_poll_need_2gp(bool poll, bool poll_full) |
| 584 | { |
| 585 | return poll; |
| 586 | } |
| 587 | |
| 588 | static struct rcu_torture_ops rcu_ops = { |
| 589 | .ttype = RCU_FLAVOR, |
| 590 | .init = rcu_sync_torture_init, |
| 591 | .readlock = rcu_torture_read_lock, |
| 592 | .read_delay = rcu_read_delay, |
| 593 | .readunlock = rcu_torture_read_unlock, |
| 594 | .readlock_held = torture_readlock_not_held, |
| 595 | .readlock_nesting = rcu_torture_readlock_nesting, |
| 596 | .get_gp_seq = rcu_get_gp_seq, |
| 597 | .gp_diff = rcu_seq_diff, |
| 598 | .deferred_free = rcu_torture_deferred_free, |
| 599 | .sync = synchronize_rcu, |
| 600 | .exp_sync = synchronize_rcu_expedited, |
| 601 | .same_gp_state = same_state_synchronize_rcu, |
| 602 | .same_gp_state_full = same_state_synchronize_rcu_full, |
| 603 | .get_comp_state = get_completed_synchronize_rcu, |
| 604 | .get_comp_state_full = get_completed_synchronize_rcu_full, |
| 605 | .get_gp_state = get_state_synchronize_rcu, |
| 606 | .get_gp_state_full = get_state_synchronize_rcu_full, |
| 607 | .start_gp_poll = start_poll_synchronize_rcu, |
| 608 | .start_gp_poll_full = start_poll_synchronize_rcu_full, |
| 609 | .poll_gp_state = poll_state_synchronize_rcu, |
| 610 | .poll_gp_state_full = poll_state_synchronize_rcu_full, |
| 611 | .poll_need_2gp = rcu_poll_need_2gp, |
| 612 | .cond_sync = cond_synchronize_rcu, |
| 613 | .cond_sync_full = cond_synchronize_rcu_full, |
| 614 | .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE, |
| 615 | .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE, |
| 616 | .get_gp_state_exp = get_state_synchronize_rcu, |
| 617 | .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, |
| 618 | .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, |
| 619 | .poll_gp_state_exp = poll_state_synchronize_rcu, |
| 620 | .cond_sync_exp = cond_synchronize_rcu_expedited, |
| 621 | .cond_sync_exp_full = cond_synchronize_rcu_expedited_full, |
| 622 | .call = call_rcu_hurry, |
| 623 | .cb_barrier = rcu_barrier, |
| 624 | .fqs = rcu_force_quiescent_state, |
| 625 | .gp_kthread_dbg = show_rcu_gp_kthreads, |
| 626 | .check_boost_failed = rcu_check_boost_fail, |
| 627 | .stall_dur = rcu_jiffies_till_stall_check, |
| 628 | .get_gp_data = rcutorture_get_gp_data, |
| 629 | .gp_slow_register = rcu_gp_slow_register, |
| 630 | .gp_slow_unregister = rcu_gp_slow_unregister, |
| 631 | .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU) |
| 632 | ? has_rcu_reader_blocked |
| 633 | : NULL, |
| 634 | .gather_gp_seqs = rcutorture_gather_gp_seqs, |
| 635 | .format_gp_seqs = rcutorture_format_gp_seqs, |
| 636 | .set_gpwrap_lag = rcu_set_gpwrap_lag, |
| 637 | .get_gpwrap_count = rcu_get_gpwrap_count, |
| 638 | .irq_capable = 1, |
| 639 | .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), |
| 640 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 641 | .debug_objects = 1, |
| 642 | .start_poll_irqsoff = 1, |
| 643 | .name = "rcu" |
| 644 | }; |
| 645 | |
| 646 | /* |
| 647 | * Don't even think about trying any of these in real life!!! |
| 648 | * The names includes "busted", and they really means it! |
| 649 | * The only purpose of these functions is to provide a buggy RCU |
| 650 | * implementation to make sure that rcutorture correctly emits |
| 651 | * buggy-RCU error messages. |
| 652 | */ |
| 653 | static void rcu_busted_torture_deferred_free(struct rcu_torture *p) |
| 654 | { |
| 655 | /* This is a deliberate bug for testing purposes only! */ |
| 656 | rcu_torture_cb(p: &p->rtort_rcu); |
| 657 | } |
| 658 | |
| 659 | static void synchronize_rcu_busted(void) |
| 660 | { |
| 661 | /* This is a deliberate bug for testing purposes only! */ |
| 662 | } |
| 663 | |
| 664 | static void |
| 665 | call_rcu_busted(struct rcu_head *head, rcu_callback_t func) |
| 666 | { |
| 667 | /* This is a deliberate bug for testing purposes only! */ |
| 668 | func(head); |
| 669 | } |
| 670 | |
| 671 | static struct rcu_torture_ops rcu_busted_ops = { |
| 672 | .ttype = INVALID_RCU_FLAVOR, |
| 673 | .init = rcu_sync_torture_init, |
| 674 | .readlock = rcu_torture_read_lock, |
| 675 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 676 | .readunlock = rcu_torture_read_unlock, |
| 677 | .readlock_held = torture_readlock_not_held, |
| 678 | .get_gp_seq = rcu_no_completed, |
| 679 | .deferred_free = rcu_busted_torture_deferred_free, |
| 680 | .sync = synchronize_rcu_busted, |
| 681 | .exp_sync = synchronize_rcu_busted, |
| 682 | .call = call_rcu_busted, |
| 683 | .gather_gp_seqs = rcutorture_gather_gp_seqs, |
| 684 | .format_gp_seqs = rcutorture_format_gp_seqs, |
| 685 | .irq_capable = 1, |
| 686 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 687 | .name = "busted" |
| 688 | }; |
| 689 | |
| 690 | /* |
| 691 | * Definitions for srcu torture testing. |
| 692 | */ |
| 693 | |
| 694 | DEFINE_STATIC_SRCU(srcu_ctl); |
| 695 | DEFINE_STATIC_SRCU_FAST(srcu_ctlf); |
| 696 | DEFINE_STATIC_SRCU_FAST_UPDOWN(srcu_ctlfud); |
| 697 | static struct srcu_struct srcu_ctld; |
| 698 | static struct srcu_struct *srcu_ctlp = &srcu_ctl; |
| 699 | static struct rcu_torture_ops srcud_ops; |
| 700 | |
| 701 | static void srcu_torture_init(void) |
| 702 | { |
| 703 | rcu_sync_torture_init(); |
| 704 | if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) |
| 705 | VERBOSE_TOROUT_STRING("srcu_torture_init normal SRCU" ); |
| 706 | if (reader_flavor & SRCU_READ_FLAVOR_NMI) |
| 707 | VERBOSE_TOROUT_STRING("srcu_torture_init NMI-safe SRCU" ); |
| 708 | if (reader_flavor & SRCU_READ_FLAVOR_FAST) { |
| 709 | srcu_ctlp = &srcu_ctlf; |
| 710 | VERBOSE_TOROUT_STRING("srcu_torture_init fast SRCU" ); |
| 711 | } |
| 712 | if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { |
| 713 | srcu_ctlp = &srcu_ctlfud; |
| 714 | VERBOSE_TOROUT_STRING("srcu_torture_init fast-up/down SRCU" ); |
| 715 | } |
| 716 | } |
| 717 | |
| 718 | static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) |
| 719 | { |
| 720 | srcutorture_get_gp_data(sp: srcu_ctlp, flags, gp_seq); |
| 721 | } |
| 722 | |
| 723 | static int srcu_torture_read_lock(void) |
| 724 | { |
| 725 | int idx; |
| 726 | struct srcu_ctr __percpu *scp; |
| 727 | int ret = 0; |
| 728 | |
| 729 | WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); |
| 730 | |
| 731 | if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { |
| 732 | idx = srcu_read_lock(ssp: srcu_ctlp); |
| 733 | WARN_ON_ONCE(idx & ~0x1); |
| 734 | ret += idx; |
| 735 | } |
| 736 | if (reader_flavor & SRCU_READ_FLAVOR_NMI) { |
| 737 | idx = srcu_read_lock_nmisafe(ssp: srcu_ctlp); |
| 738 | WARN_ON_ONCE(idx & ~0x1); |
| 739 | ret += idx << 1; |
| 740 | } |
| 741 | if (reader_flavor & SRCU_READ_FLAVOR_FAST) { |
| 742 | scp = srcu_read_lock_fast(ssp: srcu_ctlp); |
| 743 | idx = __srcu_ptr_to_ctr(ssp: srcu_ctlp, scpp: scp); |
| 744 | WARN_ON_ONCE(idx & ~0x1); |
| 745 | ret += idx << 2; |
| 746 | } |
| 747 | if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { |
| 748 | scp = srcu_read_lock_fast_updown(ssp: srcu_ctlp); |
| 749 | idx = __srcu_ptr_to_ctr(ssp: srcu_ctlp, scpp: scp); |
| 750 | WARN_ON_ONCE(idx & ~0x1); |
| 751 | ret += idx << 3; |
| 752 | } |
| 753 | return ret; |
| 754 | } |
| 755 | |
| 756 | static void |
| 757 | srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
| 758 | { |
| 759 | long delay; |
| 760 | const long uspertick = 1000000 / HZ; |
| 761 | const long longdelay = 10; |
| 762 | |
| 763 | /* We want there to be long-running readers, but not all the time. */ |
| 764 | |
| 765 | delay = torture_random(trsp: rrsp) % |
| 766 | (nrealreaders * 2 * longdelay * uspertick); |
| 767 | if (!delay && in_task()) { |
| 768 | schedule_timeout_interruptible(timeout: longdelay); |
| 769 | rtrsp->rt_delay_jiffies = longdelay; |
| 770 | } else { |
| 771 | rcu_read_delay(rrsp, rtrsp); |
| 772 | } |
| 773 | } |
| 774 | |
| 775 | static void srcu_torture_read_unlock(int idx) |
| 776 | { |
| 777 | WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); |
| 778 | if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) |
| 779 | srcu_read_unlock_fast_updown(ssp: srcu_ctlp, |
| 780 | scp: __srcu_ctr_to_ptr(ssp: srcu_ctlp, idx: (idx & 0x8) >> 3)); |
| 781 | if (reader_flavor & SRCU_READ_FLAVOR_FAST) |
| 782 | srcu_read_unlock_fast(ssp: srcu_ctlp, scp: __srcu_ctr_to_ptr(ssp: srcu_ctlp, idx: (idx & 0x4) >> 2)); |
| 783 | if (reader_flavor & SRCU_READ_FLAVOR_NMI) |
| 784 | srcu_read_unlock_nmisafe(ssp: srcu_ctlp, idx: (idx & 0x2) >> 1); |
| 785 | if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) |
| 786 | srcu_read_unlock(ssp: srcu_ctlp, idx: idx & 0x1); |
| 787 | } |
| 788 | |
| 789 | static int torture_srcu_read_lock_held(void) |
| 790 | { |
| 791 | return srcu_read_lock_held(ssp: srcu_ctlp); |
| 792 | } |
| 793 | |
| 794 | static bool srcu_torture_have_up_down(void) |
| 795 | { |
| 796 | int rf = reader_flavor; |
| 797 | |
| 798 | if (!rf) |
| 799 | rf = SRCU_READ_FLAVOR_NORMAL; |
| 800 | return !!(cur_ops->have_up_down & rf); |
| 801 | } |
| 802 | |
| 803 | static int srcu_torture_down_read(void) |
| 804 | { |
| 805 | int idx; |
| 806 | struct srcu_ctr __percpu *scp; |
| 807 | |
| 808 | WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); |
| 809 | WARN_ON_ONCE(reader_flavor & (reader_flavor - 1)); |
| 810 | |
| 811 | if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { |
| 812 | idx = srcu_down_read(ssp: srcu_ctlp); |
| 813 | WARN_ON_ONCE(idx & ~0x1); |
| 814 | return idx; |
| 815 | } |
| 816 | if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { |
| 817 | scp = srcu_down_read_fast(ssp: srcu_ctlp); |
| 818 | idx = __srcu_ptr_to_ctr(ssp: srcu_ctlp, scpp: scp); |
| 819 | WARN_ON_ONCE(idx & ~0x1); |
| 820 | return idx << 3; |
| 821 | } |
| 822 | WARN_ON_ONCE(1); |
| 823 | return 0; |
| 824 | } |
| 825 | |
| 826 | static void srcu_torture_up_read(int idx) |
| 827 | { |
| 828 | WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); |
| 829 | if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) |
| 830 | srcu_up_read_fast(ssp: srcu_ctlp, scp: __srcu_ctr_to_ptr(ssp: srcu_ctlp, idx: (idx & 0x8) >> 3)); |
| 831 | else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || |
| 832 | !(reader_flavor & SRCU_READ_FLAVOR_ALL)) |
| 833 | srcu_up_read(ssp: srcu_ctlp, idx: idx & 0x1); |
| 834 | else |
| 835 | WARN_ON_ONCE(1); |
| 836 | } |
| 837 | |
| 838 | static unsigned long srcu_torture_completed(void) |
| 839 | { |
| 840 | return srcu_batches_completed(sp: srcu_ctlp); |
| 841 | } |
| 842 | |
| 843 | static void srcu_torture_deferred_free(struct rcu_torture *rp) |
| 844 | { |
| 845 | call_srcu(ssp: srcu_ctlp, head: &rp->rtort_rcu, func: rcu_torture_cb); |
| 846 | } |
| 847 | |
| 848 | static void srcu_torture_synchronize(void) |
| 849 | { |
| 850 | synchronize_srcu(ssp: srcu_ctlp); |
| 851 | } |
| 852 | |
| 853 | static unsigned long srcu_torture_get_gp_state(void) |
| 854 | { |
| 855 | return get_state_synchronize_srcu(ssp: srcu_ctlp); |
| 856 | } |
| 857 | |
| 858 | static unsigned long srcu_torture_start_gp_poll(void) |
| 859 | { |
| 860 | return start_poll_synchronize_srcu(ssp: srcu_ctlp); |
| 861 | } |
| 862 | |
| 863 | static bool srcu_torture_poll_gp_state(unsigned long oldstate) |
| 864 | { |
| 865 | return poll_state_synchronize_srcu(ssp: srcu_ctlp, cookie: oldstate); |
| 866 | } |
| 867 | |
| 868 | static void srcu_torture_call(struct rcu_head *head, |
| 869 | rcu_callback_t func) |
| 870 | { |
| 871 | call_srcu(ssp: srcu_ctlp, head, func); |
| 872 | } |
| 873 | |
| 874 | static void srcu_torture_barrier(void) |
| 875 | { |
| 876 | srcu_barrier(ssp: srcu_ctlp); |
| 877 | } |
| 878 | |
| 879 | static void srcu_torture_stats(void) |
| 880 | { |
| 881 | srcu_torture_stats_print(ssp: srcu_ctlp, tt: torture_type, TORTURE_FLAG); |
| 882 | } |
| 883 | |
| 884 | static void srcu_torture_synchronize_expedited(void) |
| 885 | { |
| 886 | synchronize_srcu_expedited(ssp: srcu_ctlp); |
| 887 | } |
| 888 | |
| 889 | static void srcu_torture_expedite_current(void) |
| 890 | { |
| 891 | srcu_expedite_current(ssp: srcu_ctlp); |
| 892 | } |
| 893 | |
| 894 | static struct rcu_torture_ops srcu_ops = { |
| 895 | .ttype = SRCU_FLAVOR, |
| 896 | .init = srcu_torture_init, |
| 897 | .readlock = srcu_torture_read_lock, |
| 898 | .read_delay = srcu_read_delay, |
| 899 | .readunlock = srcu_torture_read_unlock, |
| 900 | .down_read = srcu_torture_down_read, |
| 901 | .up_read = srcu_torture_up_read, |
| 902 | .readlock_held = torture_srcu_read_lock_held, |
| 903 | .get_gp_seq = srcu_torture_completed, |
| 904 | .gp_diff = rcu_seq_diff, |
| 905 | .deferred_free = srcu_torture_deferred_free, |
| 906 | .sync = srcu_torture_synchronize, |
| 907 | .exp_sync = srcu_torture_synchronize_expedited, |
| 908 | .exp_current = srcu_torture_expedite_current, |
| 909 | .same_gp_state = same_state_synchronize_srcu, |
| 910 | .get_comp_state = get_completed_synchronize_srcu, |
| 911 | .get_gp_state = srcu_torture_get_gp_state, |
| 912 | .start_gp_poll = srcu_torture_start_gp_poll, |
| 913 | .poll_gp_state = srcu_torture_poll_gp_state, |
| 914 | .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, |
| 915 | .call = srcu_torture_call, |
| 916 | .cb_barrier = srcu_torture_barrier, |
| 917 | .stats = srcu_torture_stats, |
| 918 | .get_gp_data = srcu_get_gp_data, |
| 919 | .cbflood_max = 50000, |
| 920 | .irq_capable = 1, |
| 921 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
| 922 | .debug_objects = 1, |
| 923 | .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) |
| 924 | ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN, |
| 925 | .name = "srcu" |
| 926 | }; |
| 927 | |
| 928 | static void srcud_torture_init(void) |
| 929 | { |
| 930 | rcu_sync_torture_init(); |
| 931 | if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) { |
| 932 | WARN_ON(init_srcu_struct(&srcu_ctld)); |
| 933 | VERBOSE_TOROUT_STRING("srcud_torture_init normal SRCU" ); |
| 934 | } else if (reader_flavor & SRCU_READ_FLAVOR_NMI) { |
| 935 | WARN_ON(init_srcu_struct(&srcu_ctld)); |
| 936 | VERBOSE_TOROUT_STRING("srcud_torture_init NMI-safe SRCU" ); |
| 937 | } else if (reader_flavor & SRCU_READ_FLAVOR_FAST) { |
| 938 | WARN_ON(init_srcu_struct_fast(&srcu_ctld)); |
| 939 | VERBOSE_TOROUT_STRING("srcud_torture_init fast SRCU" ); |
| 940 | } else if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { |
| 941 | WARN_ON(init_srcu_struct_fast_updown(&srcu_ctld)); |
| 942 | VERBOSE_TOROUT_STRING("srcud_torture_init fast-up/down SRCU" ); |
| 943 | } else { |
| 944 | WARN_ON(init_srcu_struct(&srcu_ctld)); |
| 945 | } |
| 946 | srcu_ctlp = &srcu_ctld; |
| 947 | } |
| 948 | |
| 949 | static void srcu_torture_cleanup(void) |
| 950 | { |
| 951 | cleanup_srcu_struct(ssp: &srcu_ctld); |
| 952 | srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ |
| 953 | } |
| 954 | |
| 955 | /* As above, but dynamically allocated. */ |
| 956 | static struct rcu_torture_ops srcud_ops = { |
| 957 | .ttype = SRCU_FLAVOR, |
| 958 | .init = srcud_torture_init, |
| 959 | .cleanup = srcu_torture_cleanup, |
| 960 | .readlock = srcu_torture_read_lock, |
| 961 | .read_delay = srcu_read_delay, |
| 962 | .readunlock = srcu_torture_read_unlock, |
| 963 | .readlock_held = torture_srcu_read_lock_held, |
| 964 | .down_read = srcu_torture_down_read, |
| 965 | .up_read = srcu_torture_up_read, |
| 966 | .get_gp_seq = srcu_torture_completed, |
| 967 | .gp_diff = rcu_seq_diff, |
| 968 | .deferred_free = srcu_torture_deferred_free, |
| 969 | .sync = srcu_torture_synchronize, |
| 970 | .exp_sync = srcu_torture_synchronize_expedited, |
| 971 | .exp_current = srcu_torture_expedite_current, |
| 972 | .same_gp_state = same_state_synchronize_srcu, |
| 973 | .get_comp_state = get_completed_synchronize_srcu, |
| 974 | .get_gp_state = srcu_torture_get_gp_state, |
| 975 | .start_gp_poll = srcu_torture_start_gp_poll, |
| 976 | .poll_gp_state = srcu_torture_poll_gp_state, |
| 977 | .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, |
| 978 | .call = srcu_torture_call, |
| 979 | .cb_barrier = srcu_torture_barrier, |
| 980 | .stats = srcu_torture_stats, |
| 981 | .get_gp_data = srcu_get_gp_data, |
| 982 | .cbflood_max = 50000, |
| 983 | .irq_capable = 1, |
| 984 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
| 985 | .debug_objects = 1, |
| 986 | .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) |
| 987 | ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN, |
| 988 | .name = "srcud" |
| 989 | }; |
| 990 | |
| 991 | /* As above, but broken due to inappropriate reader extension. */ |
| 992 | static struct rcu_torture_ops busted_srcud_ops = { |
| 993 | .ttype = SRCU_FLAVOR, |
| 994 | .init = srcu_torture_init, |
| 995 | .cleanup = srcu_torture_cleanup, |
| 996 | .readlock = srcu_torture_read_lock, |
| 997 | .read_delay = rcu_read_delay, |
| 998 | .readunlock = srcu_torture_read_unlock, |
| 999 | .readlock_held = torture_srcu_read_lock_held, |
| 1000 | .get_gp_seq = srcu_torture_completed, |
| 1001 | .deferred_free = srcu_torture_deferred_free, |
| 1002 | .sync = srcu_torture_synchronize, |
| 1003 | .exp_sync = srcu_torture_synchronize_expedited, |
| 1004 | .call = srcu_torture_call, |
| 1005 | .cb_barrier = srcu_torture_barrier, |
| 1006 | .stats = srcu_torture_stats, |
| 1007 | .irq_capable = 1, |
| 1008 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
| 1009 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 1010 | .name = "busted_srcud" |
| 1011 | }; |
| 1012 | |
| 1013 | /* |
| 1014 | * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. |
| 1015 | * This implementation does not work well with CPU hotplug nor |
| 1016 | * with rcutorture's shuffling. |
| 1017 | */ |
| 1018 | |
| 1019 | static void synchronize_rcu_trivial(void) |
| 1020 | { |
| 1021 | int cpu; |
| 1022 | |
| 1023 | for_each_online_cpu(cpu) { |
| 1024 | torture_sched_setaffinity(current->pid, cpumask_of(cpu), dowarn: true); |
| 1025 | WARN_ON_ONCE(raw_smp_processor_id() != cpu); |
| 1026 | } |
| 1027 | } |
| 1028 | |
| 1029 | static void rcu_sync_torture_init_trivial(void) |
| 1030 | { |
| 1031 | rcu_sync_torture_init(); |
| 1032 | // if (onoff_interval || shuffle_interval) { |
| 1033 | if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero" , __func__, onoff_interval, shuffle_interval)) { |
| 1034 | onoff_interval = 0; |
| 1035 | shuffle_interval = 0; |
| 1036 | } |
| 1037 | } |
| 1038 | |
| 1039 | static int rcu_torture_read_lock_trivial(void) |
| 1040 | { |
| 1041 | preempt_disable(); |
| 1042 | return 0; |
| 1043 | } |
| 1044 | |
| 1045 | static void rcu_torture_read_unlock_trivial(int idx) |
| 1046 | { |
| 1047 | preempt_enable(); |
| 1048 | } |
| 1049 | |
| 1050 | static struct rcu_torture_ops trivial_ops = { |
| 1051 | .ttype = RCU_TRIVIAL_FLAVOR, |
| 1052 | .init = rcu_sync_torture_init_trivial, |
| 1053 | .readlock = rcu_torture_read_lock_trivial, |
| 1054 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 1055 | .readunlock = rcu_torture_read_unlock_trivial, |
| 1056 | .readlock_held = torture_readlock_not_held, |
| 1057 | .get_gp_seq = rcu_no_completed, |
| 1058 | .sync = synchronize_rcu_trivial, |
| 1059 | .exp_sync = synchronize_rcu_trivial, |
| 1060 | .irq_capable = 1, |
| 1061 | .name = "trivial" |
| 1062 | }; |
| 1063 | |
| 1064 | #ifdef CONFIG_TASKS_RCU |
| 1065 | |
| 1066 | /* |
| 1067 | * Definitions for RCU-tasks torture testing. |
| 1068 | */ |
| 1069 | |
| 1070 | static int tasks_torture_read_lock(void) |
| 1071 | { |
| 1072 | return 0; |
| 1073 | } |
| 1074 | |
| 1075 | static void tasks_torture_read_unlock(int idx) |
| 1076 | { |
| 1077 | } |
| 1078 | |
| 1079 | static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) |
| 1080 | { |
| 1081 | call_rcu_tasks(head: &p->rtort_rcu, func: rcu_torture_cb); |
| 1082 | } |
| 1083 | |
| 1084 | static void synchronize_rcu_mult_test(void) |
| 1085 | { |
| 1086 | synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); |
| 1087 | } |
| 1088 | |
| 1089 | static struct rcu_torture_ops tasks_ops = { |
| 1090 | .ttype = RCU_TASKS_FLAVOR, |
| 1091 | .init = rcu_sync_torture_init, |
| 1092 | .readlock = tasks_torture_read_lock, |
| 1093 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 1094 | .readunlock = tasks_torture_read_unlock, |
| 1095 | .get_gp_seq = rcu_no_completed, |
| 1096 | .deferred_free = rcu_tasks_torture_deferred_free, |
| 1097 | .sync = synchronize_rcu_tasks, |
| 1098 | .exp_sync = synchronize_rcu_mult_test, |
| 1099 | .call = call_rcu_tasks, |
| 1100 | .cb_barrier = rcu_barrier_tasks, |
| 1101 | .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, |
| 1102 | .get_gp_data = rcu_tasks_get_gp_data, |
| 1103 | .irq_capable = 1, |
| 1104 | .slow_gps = 1, |
| 1105 | .name = "tasks" |
| 1106 | }; |
| 1107 | |
| 1108 | #define TASKS_OPS &tasks_ops, |
| 1109 | |
| 1110 | #else // #ifdef CONFIG_TASKS_RCU |
| 1111 | |
| 1112 | #define TASKS_OPS |
| 1113 | |
| 1114 | #endif // #else #ifdef CONFIG_TASKS_RCU |
| 1115 | |
| 1116 | |
| 1117 | #ifdef CONFIG_TASKS_RUDE_RCU |
| 1118 | |
| 1119 | /* |
| 1120 | * Definitions for rude RCU-tasks torture testing. |
| 1121 | */ |
| 1122 | |
| 1123 | static struct rcu_torture_ops tasks_rude_ops = { |
| 1124 | .ttype = RCU_TASKS_RUDE_FLAVOR, |
| 1125 | .init = rcu_sync_torture_init, |
| 1126 | .readlock = rcu_torture_read_lock_trivial, |
| 1127 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 1128 | .readunlock = rcu_torture_read_unlock_trivial, |
| 1129 | .get_gp_seq = rcu_no_completed, |
| 1130 | .sync = synchronize_rcu_tasks_rude, |
| 1131 | .exp_sync = synchronize_rcu_tasks_rude, |
| 1132 | .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, |
| 1133 | .get_gp_data = rcu_tasks_rude_get_gp_data, |
| 1134 | .cbflood_max = 50000, |
| 1135 | .irq_capable = 1, |
| 1136 | .name = "tasks-rude" |
| 1137 | }; |
| 1138 | |
| 1139 | #define TASKS_RUDE_OPS &tasks_rude_ops, |
| 1140 | |
| 1141 | #else // #ifdef CONFIG_TASKS_RUDE_RCU |
| 1142 | |
| 1143 | #define TASKS_RUDE_OPS |
| 1144 | |
| 1145 | #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU |
| 1146 | |
| 1147 | |
| 1148 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 1149 | |
| 1150 | /* |
| 1151 | * Definitions for tracing RCU-tasks torture testing. |
| 1152 | */ |
| 1153 | |
| 1154 | static int tasks_tracing_torture_read_lock(void) |
| 1155 | { |
| 1156 | rcu_read_lock_trace(); |
| 1157 | return 0; |
| 1158 | } |
| 1159 | |
| 1160 | static void tasks_tracing_torture_read_unlock(int idx) |
| 1161 | { |
| 1162 | rcu_read_unlock_trace(); |
| 1163 | } |
| 1164 | |
| 1165 | static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) |
| 1166 | { |
| 1167 | call_rcu_tasks_trace(rhp: &p->rtort_rcu, func: rcu_torture_cb); |
| 1168 | } |
| 1169 | |
| 1170 | static struct rcu_torture_ops tasks_tracing_ops = { |
| 1171 | .ttype = RCU_TASKS_TRACING_FLAVOR, |
| 1172 | .init = rcu_sync_torture_init, |
| 1173 | .readlock = tasks_tracing_torture_read_lock, |
| 1174 | .read_delay = srcu_read_delay, /* just reuse srcu's version. */ |
| 1175 | .readunlock = tasks_tracing_torture_read_unlock, |
| 1176 | .readlock_held = rcu_read_lock_trace_held, |
| 1177 | .get_gp_seq = rcu_no_completed, |
| 1178 | .deferred_free = rcu_tasks_tracing_torture_deferred_free, |
| 1179 | .sync = synchronize_rcu_tasks_trace, |
| 1180 | .exp_sync = synchronize_rcu_tasks_trace, |
| 1181 | .call = call_rcu_tasks_trace, |
| 1182 | .cb_barrier = rcu_barrier_tasks_trace, |
| 1183 | .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, |
| 1184 | .get_gp_data = rcu_tasks_trace_get_gp_data, |
| 1185 | .cbflood_max = 50000, |
| 1186 | .irq_capable = 1, |
| 1187 | .slow_gps = 1, |
| 1188 | .name = "tasks-tracing" |
| 1189 | }; |
| 1190 | |
| 1191 | #define TASKS_TRACING_OPS &tasks_tracing_ops, |
| 1192 | |
| 1193 | #else // #ifdef CONFIG_TASKS_TRACE_RCU |
| 1194 | |
| 1195 | #define TASKS_TRACING_OPS |
| 1196 | |
| 1197 | #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU |
| 1198 | |
| 1199 | |
| 1200 | static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) |
| 1201 | { |
| 1202 | if (!cur_ops->gp_diff) |
| 1203 | return new - old; |
| 1204 | return cur_ops->gp_diff(new, old); |
| 1205 | } |
| 1206 | |
| 1207 | /* |
| 1208 | * RCU torture priority-boost testing. Runs one real-time thread per |
| 1209 | * CPU for moderate bursts, repeatedly starting grace periods and waiting |
| 1210 | * for them to complete. If a given grace period takes too long, we assume |
| 1211 | * that priority inversion has occurred. |
| 1212 | */ |
| 1213 | |
| 1214 | static int old_rt_runtime = -1; |
| 1215 | |
| 1216 | static void rcu_torture_disable_rt_throttle(void) |
| 1217 | { |
| 1218 | /* |
| 1219 | * Disable RT throttling so that rcutorture's boost threads don't get |
| 1220 | * throttled. Only possible if rcutorture is built-in otherwise the |
| 1221 | * user should manually do this by setting the sched_rt_period_us and |
| 1222 | * sched_rt_runtime sysctls. |
| 1223 | */ |
| 1224 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) |
| 1225 | return; |
| 1226 | |
| 1227 | old_rt_runtime = sysctl_sched_rt_runtime; |
| 1228 | sysctl_sched_rt_runtime = -1; |
| 1229 | } |
| 1230 | |
| 1231 | static void rcu_torture_enable_rt_throttle(void) |
| 1232 | { |
| 1233 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) |
| 1234 | return; |
| 1235 | |
| 1236 | sysctl_sched_rt_runtime = old_rt_runtime; |
| 1237 | old_rt_runtime = -1; |
| 1238 | } |
| 1239 | |
| 1240 | static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) |
| 1241 | { |
| 1242 | int cpu; |
| 1243 | static int dbg_done; |
| 1244 | unsigned long end = jiffies; |
| 1245 | bool gp_done; |
| 1246 | unsigned long j; |
| 1247 | static unsigned long last_persist; |
| 1248 | unsigned long lp; |
| 1249 | unsigned long mininterval = test_boost_duration * HZ - HZ / 2; |
| 1250 | |
| 1251 | if (end - *start > mininterval) { |
| 1252 | // Recheck after checking time to avoid false positives. |
| 1253 | smp_mb(); // Time check before grace-period check. |
| 1254 | if (cur_ops->poll_gp_state(gp_state)) |
| 1255 | return false; // passed, though perhaps just barely |
| 1256 | if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { |
| 1257 | // At most one persisted message per boost test. |
| 1258 | j = jiffies; |
| 1259 | lp = READ_ONCE(last_persist); |
| 1260 | if (time_after(j, lp + mininterval) && |
| 1261 | cmpxchg(&last_persist, lp, j) == lp) { |
| 1262 | if (cpu < 0) |
| 1263 | pr_info("Boost inversion persisted: QS from all CPUs\n" ); |
| 1264 | else |
| 1265 | pr_info("Boost inversion persisted: No QS from CPU %d\n" , cpu); |
| 1266 | } |
| 1267 | return false; // passed on a technicality |
| 1268 | } |
| 1269 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed" ); |
| 1270 | n_rcu_torture_boost_failure++; |
| 1271 | if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { |
| 1272 | pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n" , |
| 1273 | current->rt_priority, gp_state, end - *start); |
| 1274 | cur_ops->gp_kthread_dbg(); |
| 1275 | // Recheck after print to flag grace period ending during splat. |
| 1276 | gp_done = cur_ops->poll_gp_state(gp_state); |
| 1277 | pr_info("Boost inversion: GP %lu %s.\n" , gp_state, |
| 1278 | gp_done ? "ended already" : "still pending" ); |
| 1279 | |
| 1280 | } |
| 1281 | |
| 1282 | return true; // failed |
| 1283 | } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { |
| 1284 | *start = jiffies; |
| 1285 | } |
| 1286 | |
| 1287 | return false; // passed |
| 1288 | } |
| 1289 | |
| 1290 | static int rcu_torture_boost(void *arg) |
| 1291 | { |
| 1292 | unsigned long endtime; |
| 1293 | unsigned long gp_state; |
| 1294 | unsigned long gp_state_time; |
| 1295 | unsigned long oldstarttime; |
| 1296 | unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ; |
| 1297 | |
| 1298 | if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) { |
| 1299 | VERBOSE_TOROUT_STRING("rcu_torture_boost started" ); |
| 1300 | } else { |
| 1301 | VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period" ); |
| 1302 | while (time_before(jiffies, booststarttime)) { |
| 1303 | schedule_timeout_idle(HZ); |
| 1304 | if (kthread_should_stop()) |
| 1305 | goto cleanup; |
| 1306 | } |
| 1307 | VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period" ); |
| 1308 | } |
| 1309 | |
| 1310 | /* Set real-time priority. */ |
| 1311 | sched_set_fifo_low(current); |
| 1312 | |
| 1313 | /* Each pass through the following loop does one boost-test cycle. */ |
| 1314 | do { |
| 1315 | bool failed = false; // Test failed already in this test interval |
| 1316 | bool gp_initiated = false; |
| 1317 | |
| 1318 | if (kthread_should_stop()) |
| 1319 | goto checkwait; |
| 1320 | |
| 1321 | /* Wait for the next test interval. */ |
| 1322 | oldstarttime = READ_ONCE(boost_starttime); |
| 1323 | while (time_before(jiffies, oldstarttime)) { |
| 1324 | schedule_timeout_interruptible(timeout: oldstarttime - jiffies); |
| 1325 | if (stutter_wait(title: "rcu_torture_boost" )) |
| 1326 | sched_set_fifo_low(current); |
| 1327 | if (torture_must_stop()) |
| 1328 | goto checkwait; |
| 1329 | } |
| 1330 | |
| 1331 | // Do one boost-test interval. |
| 1332 | endtime = oldstarttime + test_boost_duration * HZ; |
| 1333 | while (time_before(jiffies, endtime)) { |
| 1334 | // Has current GP gone too long? |
| 1335 | if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) |
| 1336 | failed = rcu_torture_boost_failed(gp_state, start: &gp_state_time); |
| 1337 | // If we don't have a grace period in flight, start one. |
| 1338 | if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { |
| 1339 | gp_state = cur_ops->start_gp_poll(); |
| 1340 | gp_initiated = true; |
| 1341 | gp_state_time = jiffies; |
| 1342 | } |
| 1343 | if (stutter_wait(title: "rcu_torture_boost" )) { |
| 1344 | sched_set_fifo_low(current); |
| 1345 | // If the grace period already ended, |
| 1346 | // we don't know when that happened, so |
| 1347 | // start over. |
| 1348 | if (cur_ops->poll_gp_state(gp_state)) |
| 1349 | gp_initiated = false; |
| 1350 | } |
| 1351 | if (torture_must_stop()) |
| 1352 | goto checkwait; |
| 1353 | } |
| 1354 | |
| 1355 | // In case the grace period extended beyond the end of the loop. |
| 1356 | if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) |
| 1357 | rcu_torture_boost_failed(gp_state, start: &gp_state_time); |
| 1358 | |
| 1359 | /* |
| 1360 | * Set the start time of the next test interval. |
| 1361 | * Yes, this is vulnerable to long delays, but such |
| 1362 | * delays simply cause a false negative for the next |
| 1363 | * interval. Besides, we are running at RT priority, |
| 1364 | * so delays should be relatively rare. |
| 1365 | */ |
| 1366 | while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { |
| 1367 | if (mutex_trylock(&boost_mutex)) { |
| 1368 | if (oldstarttime == boost_starttime) { |
| 1369 | WRITE_ONCE(boost_starttime, |
| 1370 | jiffies + test_boost_interval * HZ); |
| 1371 | n_rcu_torture_boosts++; |
| 1372 | } |
| 1373 | mutex_unlock(lock: &boost_mutex); |
| 1374 | break; |
| 1375 | } |
| 1376 | schedule_timeout_uninterruptible(HZ / 20); |
| 1377 | } |
| 1378 | |
| 1379 | /* Go do the stutter. */ |
| 1380 | checkwait: if (stutter_wait(title: "rcu_torture_boost" )) |
| 1381 | sched_set_fifo_low(current); |
| 1382 | } while (!torture_must_stop()); |
| 1383 | |
| 1384 | cleanup: |
| 1385 | /* Clean up and exit. */ |
| 1386 | while (!kthread_should_stop()) { |
| 1387 | torture_shutdown_absorb(title: "rcu_torture_boost" ); |
| 1388 | schedule_timeout_uninterruptible(HZ / 20); |
| 1389 | } |
| 1390 | torture_kthread_stopping(title: "rcu_torture_boost" ); |
| 1391 | return 0; |
| 1392 | } |
| 1393 | |
| 1394 | /* |
| 1395 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
| 1396 | * bursts of calls to force_quiescent_state(), increasing the probability |
| 1397 | * of occurrence of some important types of race conditions. |
| 1398 | */ |
| 1399 | static int |
| 1400 | rcu_torture_fqs(void *arg) |
| 1401 | { |
| 1402 | unsigned long fqs_resume_time; |
| 1403 | int fqs_burst_remaining; |
| 1404 | int oldnice = task_nice(current); |
| 1405 | |
| 1406 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started" ); |
| 1407 | do { |
| 1408 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
| 1409 | while (time_before(jiffies, fqs_resume_time) && |
| 1410 | !kthread_should_stop()) { |
| 1411 | schedule_timeout_interruptible(HZ / 20); |
| 1412 | } |
| 1413 | fqs_burst_remaining = fqs_duration; |
| 1414 | while (fqs_burst_remaining > 0 && |
| 1415 | !kthread_should_stop()) { |
| 1416 | cur_ops->fqs(); |
| 1417 | udelay(usec: fqs_holdoff); |
| 1418 | fqs_burst_remaining -= fqs_holdoff; |
| 1419 | } |
| 1420 | if (stutter_wait(title: "rcu_torture_fqs" )) |
| 1421 | sched_set_normal(current, nice: oldnice); |
| 1422 | } while (!torture_must_stop()); |
| 1423 | torture_kthread_stopping(title: "rcu_torture_fqs" ); |
| 1424 | return 0; |
| 1425 | } |
| 1426 | |
| 1427 | // Used by writers to randomly choose from the available grace-period primitives. |
| 1428 | static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; |
| 1429 | static int nsynctypes; |
| 1430 | |
| 1431 | /* |
| 1432 | * Determine which grace-period primitives are available. |
| 1433 | */ |
| 1434 | static void rcu_torture_write_types(void) |
| 1435 | { |
| 1436 | bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; |
| 1437 | bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; |
| 1438 | bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; |
| 1439 | bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; |
| 1440 | |
| 1441 | /* Initialize synctype[] array. If none set, take default. */ |
| 1442 | if (!gp_cond1 && |
| 1443 | !gp_cond_exp1 && |
| 1444 | !gp_cond_full1 && |
| 1445 | !gp_cond_exp_full1 && |
| 1446 | !gp_exp1 && |
| 1447 | !gp_poll_exp1 && |
| 1448 | !gp_poll_exp_full1 && |
| 1449 | !gp_normal1 && |
| 1450 | !gp_poll1 && |
| 1451 | !gp_poll_full1 && |
| 1452 | !gp_sync1) { |
| 1453 | gp_cond1 = true; |
| 1454 | gp_cond_exp1 = true; |
| 1455 | gp_cond_full1 = true; |
| 1456 | gp_cond_exp_full1 = true; |
| 1457 | gp_exp1 = true; |
| 1458 | gp_poll_exp1 = true; |
| 1459 | gp_poll_exp_full1 = true; |
| 1460 | gp_normal1 = true; |
| 1461 | gp_poll1 = true; |
| 1462 | gp_poll_full1 = true; |
| 1463 | gp_sync1 = true; |
| 1464 | } |
| 1465 | if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { |
| 1466 | synctype[nsynctypes++] = RTWS_COND_GET; |
| 1467 | pr_info("%s: Testing conditional GPs.\n" , __func__); |
| 1468 | } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { |
| 1469 | pr_alert("%s: gp_cond without primitives.\n" , __func__); |
| 1470 | } |
| 1471 | if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { |
| 1472 | synctype[nsynctypes++] = RTWS_COND_GET_EXP; |
| 1473 | pr_info("%s: Testing conditional expedited GPs.\n" , __func__); |
| 1474 | } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { |
| 1475 | pr_alert("%s: gp_cond_exp without primitives.\n" , __func__); |
| 1476 | } |
| 1477 | if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { |
| 1478 | synctype[nsynctypes++] = RTWS_COND_GET_FULL; |
| 1479 | pr_info("%s: Testing conditional full-state GPs.\n" , __func__); |
| 1480 | } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { |
| 1481 | pr_alert("%s: gp_cond_full without primitives.\n" , __func__); |
| 1482 | } |
| 1483 | if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { |
| 1484 | synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; |
| 1485 | pr_info("%s: Testing conditional full-state expedited GPs.\n" , __func__); |
| 1486 | } else if (gp_cond_exp_full && |
| 1487 | (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { |
| 1488 | pr_alert("%s: gp_cond_exp_full without primitives.\n" , __func__); |
| 1489 | } |
| 1490 | if (gp_exp1 && cur_ops->exp_sync) { |
| 1491 | synctype[nsynctypes++] = RTWS_EXP_SYNC; |
| 1492 | pr_info("%s: Testing expedited GPs.\n" , __func__); |
| 1493 | } else if (gp_exp && !cur_ops->exp_sync) { |
| 1494 | pr_alert("%s: gp_exp without primitives.\n" , __func__); |
| 1495 | } |
| 1496 | if (gp_normal1 && cur_ops->deferred_free) { |
| 1497 | synctype[nsynctypes++] = RTWS_DEF_FREE; |
| 1498 | pr_info("%s: Testing asynchronous GPs.\n" , __func__); |
| 1499 | } else if (gp_normal && !cur_ops->deferred_free) { |
| 1500 | pr_alert("%s: gp_normal without primitives.\n" , __func__); |
| 1501 | } |
| 1502 | if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && |
| 1503 | cur_ops->start_gp_poll && cur_ops->poll_gp_state) { |
| 1504 | synctype[nsynctypes++] = RTWS_POLL_GET; |
| 1505 | pr_info("%s: Testing polling GPs.\n" , __func__); |
| 1506 | } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { |
| 1507 | pr_alert("%s: gp_poll without primitives.\n" , __func__); |
| 1508 | } |
| 1509 | if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full |
| 1510 | && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { |
| 1511 | synctype[nsynctypes++] = RTWS_POLL_GET_FULL; |
| 1512 | pr_info("%s: Testing polling full-state GPs.\n" , __func__); |
| 1513 | } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { |
| 1514 | pr_alert("%s: gp_poll_full without primitives.\n" , __func__); |
| 1515 | } |
| 1516 | if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { |
| 1517 | synctype[nsynctypes++] = RTWS_POLL_GET_EXP; |
| 1518 | pr_info("%s: Testing polling expedited GPs.\n" , __func__); |
| 1519 | } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { |
| 1520 | pr_alert("%s: gp_poll_exp without primitives.\n" , __func__); |
| 1521 | } |
| 1522 | if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { |
| 1523 | synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; |
| 1524 | pr_info("%s: Testing polling full-state expedited GPs.\n" , __func__); |
| 1525 | } else if (gp_poll_exp_full && |
| 1526 | (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { |
| 1527 | pr_alert("%s: gp_poll_exp_full without primitives.\n" , __func__); |
| 1528 | } |
| 1529 | if (gp_sync1 && cur_ops->sync) { |
| 1530 | synctype[nsynctypes++] = RTWS_SYNC; |
| 1531 | pr_info("%s: Testing normal GPs.\n" , __func__); |
| 1532 | } else if (gp_sync && !cur_ops->sync) { |
| 1533 | pr_alert("%s: gp_sync without primitives.\n" , __func__); |
| 1534 | } |
| 1535 | pr_alert("%s: Testing %d update types.\n" , __func__, nsynctypes); |
| 1536 | pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n" , __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp); |
| 1537 | } |
| 1538 | |
| 1539 | /* |
| 1540 | * Do the specified rcu_torture_writer() synchronous grace period, |
| 1541 | * while also testing out the polled APIs. Note well that the single-CPU |
| 1542 | * grace-period optimizations must be accounted for. |
| 1543 | */ |
| 1544 | static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) |
| 1545 | { |
| 1546 | unsigned long cookie; |
| 1547 | struct rcu_gp_oldstate cookie_full; |
| 1548 | bool dopoll; |
| 1549 | bool dopoll_full; |
| 1550 | unsigned long r = torture_random(trsp); |
| 1551 | |
| 1552 | dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); |
| 1553 | dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); |
| 1554 | if (dopoll || dopoll_full) |
| 1555 | cpus_read_lock(); |
| 1556 | if (dopoll) |
| 1557 | cookie = cur_ops->get_gp_state(); |
| 1558 | if (dopoll_full) |
| 1559 | cur_ops->get_gp_state_full(&cookie_full); |
| 1560 | if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) |
| 1561 | sync(); |
| 1562 | sync(); |
| 1563 | WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), |
| 1564 | "%s: Cookie check 3 failed %pS() online %*pbl." , |
| 1565 | __func__, sync, cpumask_pr_args(cpu_online_mask)); |
| 1566 | WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), |
| 1567 | "%s: Cookie check 4 failed %pS() online %*pbl" , |
| 1568 | __func__, sync, cpumask_pr_args(cpu_online_mask)); |
| 1569 | if (dopoll || dopoll_full) |
| 1570 | cpus_read_unlock(); |
| 1571 | } |
| 1572 | |
| 1573 | /* |
| 1574 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
| 1575 | * for that pointed to by rcu_torture_current, freeing the old structure |
| 1576 | * after a series of grace periods (the "pipeline"). |
| 1577 | */ |
| 1578 | static int |
| 1579 | rcu_torture_writer(void *arg) |
| 1580 | { |
| 1581 | bool booting_still = false; |
| 1582 | bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); |
| 1583 | unsigned long cookie; |
| 1584 | struct rcu_gp_oldstate cookie_full; |
| 1585 | int expediting = 0; |
| 1586 | unsigned long gp_snap; |
| 1587 | unsigned long gp_snap1; |
| 1588 | struct rcu_gp_oldstate gp_snap_full; |
| 1589 | struct rcu_gp_oldstate gp_snap1_full; |
| 1590 | int i; |
| 1591 | int idx; |
| 1592 | unsigned long j; |
| 1593 | int oldnice = task_nice(current); |
| 1594 | struct rcu_gp_oldstate *rgo = NULL; |
| 1595 | int rgo_size = 0; |
| 1596 | struct rcu_torture *rp; |
| 1597 | struct rcu_torture *old_rp; |
| 1598 | static DEFINE_TORTURE_RANDOM(rand); |
| 1599 | unsigned long stallsdone = jiffies; |
| 1600 | bool stutter_waited; |
| 1601 | unsigned long *ulo = NULL; |
| 1602 | int ulo_size = 0; |
| 1603 | |
| 1604 | // If a new stall test is added, this must be adjusted. |
| 1605 | if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) |
| 1606 | stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * |
| 1607 | HZ * (stall_cpu_repeat + 1); |
| 1608 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started" ); |
| 1609 | if (!can_expedite) |
| 1610 | pr_alert("%s" TORTURE_FLAG |
| 1611 | " GP expediting controlled from boot/sysfs for %s.\n" , |
| 1612 | torture_type, cur_ops->name); |
| 1613 | if (WARN_ONCE(nsynctypes == 0, |
| 1614 | "%s: No update-side primitives.\n" , __func__)) { |
| 1615 | /* |
| 1616 | * No updates primitives, so don't try updating. |
| 1617 | * The resulting test won't be testing much, hence the |
| 1618 | * above WARN_ONCE(). |
| 1619 | */ |
| 1620 | rcu_torture_writer_state = RTWS_STOPPING; |
| 1621 | torture_kthread_stopping(title: "rcu_torture_writer" ); |
| 1622 | return 0; |
| 1623 | } |
| 1624 | if (cur_ops->poll_active > 0) { |
| 1625 | ulo = kcalloc(cur_ops->poll_active, sizeof(*ulo), GFP_KERNEL); |
| 1626 | if (!WARN_ON(!ulo)) |
| 1627 | ulo_size = cur_ops->poll_active; |
| 1628 | } |
| 1629 | if (cur_ops->poll_active_full > 0) { |
| 1630 | rgo = kcalloc(cur_ops->poll_active_full, sizeof(*rgo), GFP_KERNEL); |
| 1631 | if (!WARN_ON(!rgo)) |
| 1632 | rgo_size = cur_ops->poll_active_full; |
| 1633 | } |
| 1634 | |
| 1635 | // If the system is still booting, let it finish. |
| 1636 | j = jiffies; |
| 1637 | while (!torture_must_stop() && !rcu_inkernel_boot_has_ended()) { |
| 1638 | booting_still = true; |
| 1639 | schedule_timeout_interruptible(HZ); |
| 1640 | } |
| 1641 | if (booting_still) |
| 1642 | pr_alert("%s" TORTURE_FLAG " Waited %lu jiffies for boot to complete.\n" , |
| 1643 | torture_type, jiffies - j); |
| 1644 | |
| 1645 | do { |
| 1646 | rcu_torture_writer_state = RTWS_FIXED_DELAY; |
| 1647 | torture_hrtimeout_us(baset_us: 500, fuzzt_ns: 1000, trsp: &rand); |
| 1648 | rp = rcu_torture_alloc(); |
| 1649 | if (rp == NULL) |
| 1650 | continue; |
| 1651 | rp->rtort_pipe_count = 0; |
| 1652 | ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); |
| 1653 | rcu_torture_writer_state = RTWS_DELAY; |
| 1654 | udelay(usec: torture_random(trsp: &rand) & 0x3ff); |
| 1655 | rcu_torture_writer_state = RTWS_REPLACE; |
| 1656 | old_rp = rcu_dereference_check(rcu_torture_current, |
| 1657 | current == writer_task); |
| 1658 | rp->rtort_mbtest = 1; |
| 1659 | rcu_assign_pointer(rcu_torture_current, rp); |
| 1660 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
| 1661 | if (old_rp) { |
| 1662 | i = old_rp->rtort_pipe_count; |
| 1663 | if (i > RCU_TORTURE_PIPE_LEN) |
| 1664 | i = RCU_TORTURE_PIPE_LEN; |
| 1665 | atomic_inc(v: &rcu_torture_wcount[i]); |
| 1666 | WRITE_ONCE(old_rp->rtort_pipe_count, |
| 1667 | old_rp->rtort_pipe_count + 1); |
| 1668 | ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); |
| 1669 | |
| 1670 | // Make sure readers block polled grace periods. |
| 1671 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { |
| 1672 | idx = cur_ops->readlock(); |
| 1673 | cookie = cur_ops->get_gp_state(); |
| 1674 | WARN_ONCE(cur_ops->poll_gp_state(cookie), |
| 1675 | "%s: Cookie check 1 failed %s(%d) %lu->%lu\n" , |
| 1676 | __func__, |
| 1677 | rcu_torture_writer_state_getname(), |
| 1678 | rcu_torture_writer_state, |
| 1679 | cookie, cur_ops->get_gp_state()); |
| 1680 | if (cur_ops->get_comp_state) { |
| 1681 | cookie = cur_ops->get_comp_state(); |
| 1682 | WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); |
| 1683 | } |
| 1684 | cur_ops->readunlock(idx); |
| 1685 | } |
| 1686 | if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { |
| 1687 | idx = cur_ops->readlock(); |
| 1688 | cur_ops->get_gp_state_full(&cookie_full); |
| 1689 | WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), |
| 1690 | "%s: Cookie check 5 failed %s(%d) online %*pbl\n" , |
| 1691 | __func__, |
| 1692 | rcu_torture_writer_state_getname(), |
| 1693 | rcu_torture_writer_state, |
| 1694 | cpumask_pr_args(cpu_online_mask)); |
| 1695 | if (cur_ops->get_comp_state_full) { |
| 1696 | cur_ops->get_comp_state_full(&cookie_full); |
| 1697 | WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); |
| 1698 | } |
| 1699 | cur_ops->readunlock(idx); |
| 1700 | } |
| 1701 | switch (synctype[torture_random(trsp: &rand) % nsynctypes]) { |
| 1702 | case RTWS_DEF_FREE: |
| 1703 | rcu_torture_writer_state = RTWS_DEF_FREE; |
| 1704 | cur_ops->deferred_free(old_rp); |
| 1705 | break; |
| 1706 | case RTWS_EXP_SYNC: |
| 1707 | rcu_torture_writer_state = RTWS_EXP_SYNC; |
| 1708 | do_rtws_sync(trsp: &rand, sync: cur_ops->exp_sync); |
| 1709 | rcu_torture_pipe_update(old_rp); |
| 1710 | break; |
| 1711 | case RTWS_COND_GET: |
| 1712 | rcu_torture_writer_state = RTWS_COND_GET; |
| 1713 | gp_snap = cur_ops->get_gp_state(); |
| 1714 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_cond_wi, |
| 1715 | fuzzt_ns: 1000, trsp: &rand); |
| 1716 | rcu_torture_writer_state = RTWS_COND_SYNC; |
| 1717 | cur_ops->cond_sync(gp_snap); |
| 1718 | rcu_torture_pipe_update(old_rp); |
| 1719 | break; |
| 1720 | case RTWS_COND_GET_EXP: |
| 1721 | rcu_torture_writer_state = RTWS_COND_GET_EXP; |
| 1722 | gp_snap = cur_ops->get_gp_state_exp(); |
| 1723 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_cond_wi_exp, |
| 1724 | fuzzt_ns: 1000, trsp: &rand); |
| 1725 | rcu_torture_writer_state = RTWS_COND_SYNC_EXP; |
| 1726 | cur_ops->cond_sync_exp(gp_snap); |
| 1727 | rcu_torture_pipe_update(old_rp); |
| 1728 | break; |
| 1729 | case RTWS_COND_GET_FULL: |
| 1730 | rcu_torture_writer_state = RTWS_COND_GET_FULL; |
| 1731 | cur_ops->get_gp_state_full(&gp_snap_full); |
| 1732 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_cond_wi, |
| 1733 | fuzzt_ns: 1000, trsp: &rand); |
| 1734 | rcu_torture_writer_state = RTWS_COND_SYNC_FULL; |
| 1735 | cur_ops->cond_sync_full(&gp_snap_full); |
| 1736 | rcu_torture_pipe_update(old_rp); |
| 1737 | break; |
| 1738 | case RTWS_COND_GET_EXP_FULL: |
| 1739 | rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; |
| 1740 | cur_ops->get_gp_state_full(&gp_snap_full); |
| 1741 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_cond_wi_exp, |
| 1742 | fuzzt_ns: 1000, trsp: &rand); |
| 1743 | rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; |
| 1744 | cur_ops->cond_sync_exp_full(&gp_snap_full); |
| 1745 | rcu_torture_pipe_update(old_rp); |
| 1746 | break; |
| 1747 | case RTWS_POLL_GET: |
| 1748 | rcu_torture_writer_state = RTWS_POLL_GET; |
| 1749 | for (i = 0; i < ulo_size; i++) |
| 1750 | ulo[i] = cur_ops->get_comp_state(); |
| 1751 | gp_snap = cur_ops->start_gp_poll(); |
| 1752 | rcu_torture_writer_state = RTWS_POLL_WAIT; |
| 1753 | if (cur_ops->exp_current && !torture_random(trsp: &rand) % 0xff) |
| 1754 | cur_ops->exp_current(); |
| 1755 | while (!cur_ops->poll_gp_state(gp_snap)) { |
| 1756 | gp_snap1 = cur_ops->get_gp_state(); |
| 1757 | for (i = 0; i < ulo_size; i++) |
| 1758 | if (cur_ops->poll_gp_state(ulo[i]) || |
| 1759 | cur_ops->same_gp_state(ulo[i], gp_snap1)) { |
| 1760 | ulo[i] = gp_snap1; |
| 1761 | break; |
| 1762 | } |
| 1763 | WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size); |
| 1764 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_poll_wi, |
| 1765 | fuzzt_ns: 1000, trsp: &rand); |
| 1766 | } |
| 1767 | rcu_torture_pipe_update(old_rp); |
| 1768 | break; |
| 1769 | case RTWS_POLL_GET_FULL: |
| 1770 | rcu_torture_writer_state = RTWS_POLL_GET_FULL; |
| 1771 | for (i = 0; i < rgo_size; i++) |
| 1772 | cur_ops->get_comp_state_full(&rgo[i]); |
| 1773 | cur_ops->start_gp_poll_full(&gp_snap_full); |
| 1774 | rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; |
| 1775 | if (cur_ops->exp_current && !torture_random(trsp: &rand) % 0xff) |
| 1776 | cur_ops->exp_current(); |
| 1777 | while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { |
| 1778 | cur_ops->get_gp_state_full(&gp_snap1_full); |
| 1779 | for (i = 0; i < rgo_size; i++) |
| 1780 | if (cur_ops->poll_gp_state_full(&rgo[i]) || |
| 1781 | cur_ops->same_gp_state_full(&rgo[i], |
| 1782 | &gp_snap1_full)) { |
| 1783 | rgo[i] = gp_snap1_full; |
| 1784 | break; |
| 1785 | } |
| 1786 | WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size); |
| 1787 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_poll_wi, |
| 1788 | fuzzt_ns: 1000, trsp: &rand); |
| 1789 | } |
| 1790 | rcu_torture_pipe_update(old_rp); |
| 1791 | break; |
| 1792 | case RTWS_POLL_GET_EXP: |
| 1793 | rcu_torture_writer_state = RTWS_POLL_GET_EXP; |
| 1794 | gp_snap = cur_ops->start_gp_poll_exp(); |
| 1795 | rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; |
| 1796 | while (!cur_ops->poll_gp_state_exp(gp_snap)) |
| 1797 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_poll_wi_exp, |
| 1798 | fuzzt_ns: 1000, trsp: &rand); |
| 1799 | rcu_torture_pipe_update(old_rp); |
| 1800 | break; |
| 1801 | case RTWS_POLL_GET_EXP_FULL: |
| 1802 | rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; |
| 1803 | cur_ops->start_gp_poll_exp_full(&gp_snap_full); |
| 1804 | rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; |
| 1805 | while (!cur_ops->poll_gp_state_full(&gp_snap_full)) |
| 1806 | torture_hrtimeout_us(baset_us: torture_random(trsp: &rand) % gp_poll_wi_exp, |
| 1807 | fuzzt_ns: 1000, trsp: &rand); |
| 1808 | rcu_torture_pipe_update(old_rp); |
| 1809 | break; |
| 1810 | case RTWS_SYNC: |
| 1811 | rcu_torture_writer_state = RTWS_SYNC; |
| 1812 | do_rtws_sync(trsp: &rand, sync: cur_ops->sync); |
| 1813 | rcu_torture_pipe_update(old_rp); |
| 1814 | break; |
| 1815 | default: |
| 1816 | WARN_ON_ONCE(1); |
| 1817 | break; |
| 1818 | } |
| 1819 | } |
| 1820 | WRITE_ONCE(rcu_torture_current_version, |
| 1821 | rcu_torture_current_version + 1); |
| 1822 | /* Cycle through nesting levels of rcu_expedite_gp() calls. */ |
| 1823 | if (can_expedite && |
| 1824 | !(torture_random(trsp: &rand) & 0xff & (!!expediting - 1))) { |
| 1825 | WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); |
| 1826 | if (expediting >= 0) |
| 1827 | rcu_expedite_gp(); |
| 1828 | else |
| 1829 | rcu_unexpedite_gp(); |
| 1830 | if (++expediting > 3) |
| 1831 | expediting = -expediting; |
| 1832 | } else if (!can_expedite) { /* Disabled during boot, recheck. */ |
| 1833 | can_expedite = !rcu_gp_is_expedited() && |
| 1834 | !rcu_gp_is_normal(); |
| 1835 | } |
| 1836 | rcu_torture_writer_state = RTWS_STUTTER; |
| 1837 | stutter_waited = stutter_wait(title: "rcu_torture_writer" ); |
| 1838 | if (stutter_waited && |
| 1839 | !atomic_read(v: &rcu_fwd_cb_nodelay) && |
| 1840 | !cur_ops->slow_gps && |
| 1841 | !torture_must_stop() && |
| 1842 | time_after(jiffies, stallsdone)) |
| 1843 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) |
| 1844 | if (list_empty(head: &rcu_tortures[i].rtort_free) && |
| 1845 | rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { |
| 1846 | tracing_off(); |
| 1847 | if (cur_ops->gp_kthread_dbg) |
| 1848 | cur_ops->gp_kthread_dbg(); |
| 1849 | WARN(1, "%s: rtort_pipe_count: %d\n" , __func__, rcu_tortures[i].rtort_pipe_count); |
| 1850 | rcu_ftrace_dump(DUMP_ALL); |
| 1851 | break; |
| 1852 | } |
| 1853 | if (stutter_waited) |
| 1854 | sched_set_normal(current, nice: oldnice); |
| 1855 | } while (!torture_must_stop()); |
| 1856 | rcu_torture_current = NULL; // Let stats task know that we are done. |
| 1857 | /* Reset expediting back to unexpedited. */ |
| 1858 | if (expediting > 0) |
| 1859 | expediting = -expediting; |
| 1860 | while (can_expedite && expediting++ < 0) |
| 1861 | rcu_unexpedite_gp(); |
| 1862 | WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); |
| 1863 | if (!can_expedite) |
| 1864 | pr_alert("%s" TORTURE_FLAG |
| 1865 | " Dynamic grace-period expediting was disabled.\n" , |
| 1866 | torture_type); |
| 1867 | kfree(objp: ulo); |
| 1868 | kfree(objp: rgo); |
| 1869 | rcu_torture_writer_state = RTWS_STOPPING; |
| 1870 | torture_kthread_stopping(title: "rcu_torture_writer" ); |
| 1871 | return 0; |
| 1872 | } |
| 1873 | |
| 1874 | /* |
| 1875 | * RCU torture fake writer kthread. Repeatedly calls sync, with a random |
| 1876 | * delay between calls. |
| 1877 | */ |
| 1878 | static int |
| 1879 | rcu_torture_fakewriter(void *arg) |
| 1880 | { |
| 1881 | unsigned long gp_snap; |
| 1882 | struct rcu_gp_oldstate gp_snap_full; |
| 1883 | DEFINE_TORTURE_RANDOM(rand); |
| 1884 | |
| 1885 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started" ); |
| 1886 | set_user_nice(current, MAX_NICE); |
| 1887 | |
| 1888 | if (WARN_ONCE(nsynctypes == 0, |
| 1889 | "%s: No update-side primitives.\n" , __func__)) { |
| 1890 | /* |
| 1891 | * No updates primitives, so don't try updating. |
| 1892 | * The resulting test won't be testing much, hence the |
| 1893 | * above WARN_ONCE(). |
| 1894 | */ |
| 1895 | torture_kthread_stopping(title: "rcu_torture_fakewriter" ); |
| 1896 | return 0; |
| 1897 | } |
| 1898 | |
| 1899 | do { |
| 1900 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 10, trsp: &rand); |
| 1901 | if (cur_ops->cb_barrier != NULL && |
| 1902 | torture_random(trsp: &rand) % (nrealfakewriters * 8) == 0) { |
| 1903 | cur_ops->cb_barrier(); |
| 1904 | } else { |
| 1905 | switch (synctype[torture_random(trsp: &rand) % nsynctypes]) { |
| 1906 | case RTWS_DEF_FREE: |
| 1907 | break; |
| 1908 | case RTWS_EXP_SYNC: |
| 1909 | cur_ops->exp_sync(); |
| 1910 | break; |
| 1911 | case RTWS_COND_GET: |
| 1912 | gp_snap = cur_ops->get_gp_state(); |
| 1913 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, trsp: &rand); |
| 1914 | cur_ops->cond_sync(gp_snap); |
| 1915 | break; |
| 1916 | case RTWS_COND_GET_EXP: |
| 1917 | gp_snap = cur_ops->get_gp_state_exp(); |
| 1918 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, trsp: &rand); |
| 1919 | cur_ops->cond_sync_exp(gp_snap); |
| 1920 | break; |
| 1921 | case RTWS_COND_GET_FULL: |
| 1922 | cur_ops->get_gp_state_full(&gp_snap_full); |
| 1923 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, trsp: &rand); |
| 1924 | cur_ops->cond_sync_full(&gp_snap_full); |
| 1925 | break; |
| 1926 | case RTWS_COND_GET_EXP_FULL: |
| 1927 | cur_ops->get_gp_state_full(&gp_snap_full); |
| 1928 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, trsp: &rand); |
| 1929 | cur_ops->cond_sync_exp_full(&gp_snap_full); |
| 1930 | break; |
| 1931 | case RTWS_POLL_GET: |
| 1932 | if (cur_ops->start_poll_irqsoff) |
| 1933 | local_irq_disable(); |
| 1934 | gp_snap = cur_ops->start_gp_poll(); |
| 1935 | if (cur_ops->start_poll_irqsoff) |
| 1936 | local_irq_enable(); |
| 1937 | while (!cur_ops->poll_gp_state(gp_snap)) { |
| 1938 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, |
| 1939 | trsp: &rand); |
| 1940 | } |
| 1941 | break; |
| 1942 | case RTWS_POLL_GET_FULL: |
| 1943 | if (cur_ops->start_poll_irqsoff) |
| 1944 | local_irq_disable(); |
| 1945 | cur_ops->start_gp_poll_full(&gp_snap_full); |
| 1946 | if (cur_ops->start_poll_irqsoff) |
| 1947 | local_irq_enable(); |
| 1948 | while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { |
| 1949 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, |
| 1950 | trsp: &rand); |
| 1951 | } |
| 1952 | break; |
| 1953 | case RTWS_POLL_GET_EXP: |
| 1954 | gp_snap = cur_ops->start_gp_poll_exp(); |
| 1955 | while (!cur_ops->poll_gp_state_exp(gp_snap)) { |
| 1956 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, |
| 1957 | trsp: &rand); |
| 1958 | } |
| 1959 | break; |
| 1960 | case RTWS_POLL_GET_EXP_FULL: |
| 1961 | cur_ops->start_gp_poll_exp_full(&gp_snap_full); |
| 1962 | while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { |
| 1963 | torture_hrtimeout_jiffies(baset_j: torture_random(trsp: &rand) % 16, |
| 1964 | trsp: &rand); |
| 1965 | } |
| 1966 | break; |
| 1967 | case RTWS_SYNC: |
| 1968 | cur_ops->sync(); |
| 1969 | break; |
| 1970 | default: |
| 1971 | WARN_ON_ONCE(1); |
| 1972 | break; |
| 1973 | } |
| 1974 | } |
| 1975 | stutter_wait(title: "rcu_torture_fakewriter" ); |
| 1976 | } while (!torture_must_stop()); |
| 1977 | |
| 1978 | torture_kthread_stopping(title: "rcu_torture_fakewriter" ); |
| 1979 | return 0; |
| 1980 | } |
| 1981 | |
| 1982 | static void rcu_torture_timer_cb(struct rcu_head *rhp) |
| 1983 | { |
| 1984 | kfree(objp: rhp); |
| 1985 | } |
| 1986 | |
| 1987 | // Set up and carry out testing of RCU's global memory ordering |
| 1988 | static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, |
| 1989 | struct torture_random_state *trsp) |
| 1990 | { |
| 1991 | unsigned long loops; |
| 1992 | int noc = torture_num_online_cpus(); |
| 1993 | int rdrchked; |
| 1994 | int rdrchker; |
| 1995 | struct rcu_torture_reader_check *rtrcp; // Me. |
| 1996 | struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. |
| 1997 | struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. |
| 1998 | struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. |
| 1999 | |
| 2000 | if (myid < 0) |
| 2001 | return; // Don't try this from timer handlers. |
| 2002 | |
| 2003 | // Increment my counter. |
| 2004 | rtrcp = &rcu_torture_reader_mbchk[myid]; |
| 2005 | WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); |
| 2006 | |
| 2007 | // Attempt to assign someone else some checking work. |
| 2008 | rdrchked = torture_random(trsp) % nrealreaders; |
| 2009 | rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; |
| 2010 | rdrchker = torture_random(trsp) % nrealreaders; |
| 2011 | rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; |
| 2012 | if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && |
| 2013 | smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. |
| 2014 | !READ_ONCE(rtp->rtort_chkp) && |
| 2015 | !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. |
| 2016 | rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); |
| 2017 | WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); |
| 2018 | rtrcp->rtc_chkrdr = rdrchked; |
| 2019 | WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. |
| 2020 | if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || |
| 2021 | cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) |
| 2022 | (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. |
| 2023 | } |
| 2024 | |
| 2025 | // If assigned some completed work, do it! |
| 2026 | rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); |
| 2027 | if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) |
| 2028 | return; // No work or work not yet ready. |
| 2029 | rdrchked = rtrcp_assigner->rtc_chkrdr; |
| 2030 | if (WARN_ON_ONCE(rdrchked < 0)) |
| 2031 | return; |
| 2032 | rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; |
| 2033 | loops = READ_ONCE(rtrcp_chked->rtc_myloops); |
| 2034 | atomic_inc(v: &n_rcu_torture_mbchk_tries); |
| 2035 | if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) |
| 2036 | atomic_inc(v: &n_rcu_torture_mbchk_fail); |
| 2037 | rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; |
| 2038 | rtrcp_assigner->rtc_ready = 0; |
| 2039 | smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. |
| 2040 | smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. |
| 2041 | } |
| 2042 | |
| 2043 | // Verify the specified RCUTORTURE_RDR* state. |
| 2044 | #define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count() |
| 2045 | static void rcutorture_one_extend_check(char *s, int curstate, int new, int old) |
| 2046 | { |
| 2047 | int mask; |
| 2048 | |
| 2049 | if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE) || in_nmi()) |
| 2050 | return; |
| 2051 | |
| 2052 | WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled() && !in_hardirq(), ROEC_ARGS); |
| 2053 | WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS); |
| 2054 | |
| 2055 | // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable. |
| 2056 | if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
| 2057 | return; |
| 2058 | |
| 2059 | WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && |
| 2060 | !softirq_count(), ROEC_ARGS); |
| 2061 | WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) && |
| 2062 | !(preempt_count() & PREEMPT_MASK), ROEC_ARGS); |
| 2063 | WARN_ONCE(cur_ops->readlock_nesting && |
| 2064 | (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) && |
| 2065 | cur_ops->readlock_nesting() == 0, ROEC_ARGS); |
| 2066 | |
| 2067 | // Interrupt handlers have all sorts of stuff disabled, so ignore |
| 2068 | // unintended disabling. |
| 2069 | if (in_serving_softirq() || in_hardirq()) |
| 2070 | return; |
| 2071 | |
| 2072 | WARN_ONCE(cur_ops->extendables && |
| 2073 | !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && |
| 2074 | softirq_count(), ROEC_ARGS); |
| 2075 | |
| 2076 | /* |
| 2077 | * non-preemptible RCU in a preemptible kernel uses preempt_disable() |
| 2078 | * as rcu_read_lock(). |
| 2079 | */ |
| 2080 | mask = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; |
| 2081 | if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) |
| 2082 | mask |= RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; |
| 2083 | |
| 2084 | WARN_ONCE(cur_ops->extendables && !(curstate & mask) && |
| 2085 | (preempt_count() & PREEMPT_MASK), ROEC_ARGS); |
| 2086 | |
| 2087 | /* |
| 2088 | * non-preemptible RCU in a preemptible kernel uses "preempt_count() & |
| 2089 | * PREEMPT_MASK" as ->readlock_nesting(). |
| 2090 | */ |
| 2091 | mask = RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; |
| 2092 | if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) |
| 2093 | mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; |
| 2094 | |
| 2095 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && softirq_count()) |
| 2096 | mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; |
| 2097 | |
| 2098 | WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) && |
| 2099 | cur_ops->readlock_nesting() > 0, ROEC_ARGS); |
| 2100 | } |
| 2101 | |
| 2102 | /* |
| 2103 | * Do one extension of an RCU read-side critical section using the |
| 2104 | * current reader state in readstate (set to zero for initial entry |
| 2105 | * to extended critical section), set the new state as specified by |
| 2106 | * newstate (set to zero for final exit from extended critical section), |
| 2107 | * and random-number-generator state in trsp. If this is neither the |
| 2108 | * beginning or end of the critical section and if there was actually a |
| 2109 | * change, do a ->read_delay(). |
| 2110 | */ |
| 2111 | static void rcutorture_one_extend(int *readstate, int newstate, struct torture_random_state *trsp, |
| 2112 | struct rt_read_seg *rtrsp) |
| 2113 | { |
| 2114 | bool first; |
| 2115 | unsigned long flags; |
| 2116 | int idxnew1 = -1; |
| 2117 | int idxnew2 = -1; |
| 2118 | int idxold1 = *readstate; |
| 2119 | int idxold2 = idxold1; |
| 2120 | int statesnew = ~*readstate & newstate; |
| 2121 | int statesold = *readstate & ~newstate; |
| 2122 | |
| 2123 | first = idxold1 == 0; |
| 2124 | WARN_ON_ONCE(idxold2 < 0); |
| 2125 | WARN_ON_ONCE(idxold2 & ~(RCUTORTURE_RDR_ALLBITS | RCUTORTURE_RDR_UPDOWN)); |
| 2126 | rcutorture_one_extend_check(s: "before change" , curstate: idxold1, new: statesnew, old: statesold); |
| 2127 | rtrsp->rt_readstate = newstate; |
| 2128 | |
| 2129 | /* First, put new protection in place to avoid critical-section gap. */ |
| 2130 | if (statesnew & RCUTORTURE_RDR_BH) |
| 2131 | local_bh_disable(); |
| 2132 | if (statesnew & RCUTORTURE_RDR_RBH) |
| 2133 | rcu_read_lock_bh(); |
| 2134 | if (statesnew & RCUTORTURE_RDR_IRQ) |
| 2135 | local_irq_disable(); |
| 2136 | if (statesnew & RCUTORTURE_RDR_PREEMPT) |
| 2137 | preempt_disable(); |
| 2138 | if (statesnew & RCUTORTURE_RDR_SCHED) |
| 2139 | rcu_read_lock_sched(); |
| 2140 | if (statesnew & RCUTORTURE_RDR_RCU_1) |
| 2141 | idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; |
| 2142 | if (statesnew & RCUTORTURE_RDR_RCU_2) |
| 2143 | idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2; |
| 2144 | |
| 2145 | // Complain unless both the old and the new protection is in place. |
| 2146 | rcutorture_one_extend_check(s: "during change" , curstate: idxold1 | statesnew, new: statesnew, old: statesold); |
| 2147 | |
| 2148 | // Sample CPU under both sets of protections to reduce confusion. |
| 2149 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { |
| 2150 | int cpu = raw_smp_processor_id(); |
| 2151 | rtrsp->rt_cpu = cpu; |
| 2152 | if (!first) { |
| 2153 | rtrsp[-1].rt_end_cpu = cpu; |
| 2154 | if (cur_ops->reader_blocked) |
| 2155 | rtrsp[-1].rt_preempted = cur_ops->reader_blocked(); |
| 2156 | } |
| 2157 | } |
| 2158 | // Sample grace-period sequence number, as good a place as any. |
| 2159 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) { |
| 2160 | rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs(); |
| 2161 | rtrsp->rt_ts = ktime_get_mono_fast_ns(); |
| 2162 | if (!first) |
| 2163 | rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq; |
| 2164 | } |
| 2165 | |
| 2166 | /* |
| 2167 | * Next, remove old protection, in decreasing order of strength |
| 2168 | * to avoid unlock paths that aren't safe in the stronger |
| 2169 | * context. Namely: BH can not be enabled with disabled interrupts. |
| 2170 | * Additionally PREEMPT_RT requires that BH is enabled in preemptible |
| 2171 | * context. |
| 2172 | */ |
| 2173 | if (statesold & RCUTORTURE_RDR_IRQ) |
| 2174 | local_irq_enable(); |
| 2175 | if (statesold & RCUTORTURE_RDR_PREEMPT) |
| 2176 | preempt_enable(); |
| 2177 | if (statesold & RCUTORTURE_RDR_SCHED) |
| 2178 | rcu_read_unlock_sched(); |
| 2179 | if (statesold & RCUTORTURE_RDR_BH) |
| 2180 | local_bh_enable(); |
| 2181 | if (statesold & RCUTORTURE_RDR_RBH) |
| 2182 | rcu_read_unlock_bh(); |
| 2183 | if (statesold & RCUTORTURE_RDR_RCU_2) { |
| 2184 | cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2); |
| 2185 | WARN_ON_ONCE(idxnew2 != -1); |
| 2186 | idxold2 = 0; |
| 2187 | } |
| 2188 | if (statesold & RCUTORTURE_RDR_RCU_1) { |
| 2189 | bool lockit; |
| 2190 | |
| 2191 | lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); |
| 2192 | if (lockit) |
| 2193 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
| 2194 | cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); |
| 2195 | WARN_ON_ONCE(idxnew1 != -1); |
| 2196 | idxold1 = 0; |
| 2197 | if (lockit) |
| 2198 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 2199 | } |
| 2200 | if (statesold & RCUTORTURE_RDR_UPDOWN) { |
| 2201 | cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); |
| 2202 | WARN_ON_ONCE(idxnew1 != -1); |
| 2203 | idxold1 = 0; |
| 2204 | } |
| 2205 | |
| 2206 | /* Delay if neither beginning nor end and there was a change. */ |
| 2207 | if ((statesnew || statesold) && *readstate && newstate) |
| 2208 | cur_ops->read_delay(trsp, rtrsp); |
| 2209 | |
| 2210 | /* Update the reader state. */ |
| 2211 | if (idxnew1 == -1) |
| 2212 | idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; |
| 2213 | WARN_ON_ONCE(idxnew1 < 0); |
| 2214 | if (idxnew2 == -1) |
| 2215 | idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; |
| 2216 | WARN_ON_ONCE(idxnew2 < 0); |
| 2217 | *readstate = idxnew1 | idxnew2 | newstate; |
| 2218 | WARN_ON_ONCE(*readstate < 0); |
| 2219 | if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS)) |
| 2220 | pr_info("Unexpected readstate value of %#x\n" , *readstate); |
| 2221 | rcutorture_one_extend_check(s: "after change" , curstate: *readstate, new: statesnew, old: statesold); |
| 2222 | } |
| 2223 | |
| 2224 | /* Return the biggest extendables mask given current RCU and boot parameters. */ |
| 2225 | static int rcutorture_extend_mask_max(void) |
| 2226 | { |
| 2227 | int mask; |
| 2228 | |
| 2229 | WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); |
| 2230 | mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; |
| 2231 | mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; |
| 2232 | return mask; |
| 2233 | } |
| 2234 | |
| 2235 | /* Return a random protection state mask, but with at least one bit set. */ |
| 2236 | static int |
| 2237 | rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) |
| 2238 | { |
| 2239 | int mask = rcutorture_extend_mask_max(); |
| 2240 | unsigned long randmask1 = torture_random(trsp); |
| 2241 | unsigned long randmask2 = randmask1 >> 3; |
| 2242 | unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; |
| 2243 | unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; |
| 2244 | unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; |
| 2245 | |
| 2246 | WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits. |
| 2247 | /* Mostly only one bit (need preemption!), sometimes lots of bits. */ |
| 2248 | if (!(randmask1 & 0x7)) |
| 2249 | mask = mask & randmask2; |
| 2250 | else |
| 2251 | mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); |
| 2252 | |
| 2253 | // Can't have nested RCU reader without outer RCU reader. |
| 2254 | if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { |
| 2255 | if (oldmask & RCUTORTURE_RDR_RCU_1) |
| 2256 | mask &= ~RCUTORTURE_RDR_RCU_2; |
| 2257 | else |
| 2258 | mask |= RCUTORTURE_RDR_RCU_1; |
| 2259 | } |
| 2260 | |
| 2261 | /* |
| 2262 | * Can't enable bh w/irq disabled. |
| 2263 | */ |
| 2264 | if (mask & RCUTORTURE_RDR_IRQ) |
| 2265 | mask |= oldmask & bhs; |
| 2266 | |
| 2267 | /* |
| 2268 | * Ideally these sequences would be detected in debug builds |
| 2269 | * (regardless of RT), but until then don't stop testing |
| 2270 | * them on non-RT. |
| 2271 | */ |
| 2272 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
| 2273 | /* Can't modify BH in atomic context */ |
| 2274 | if (oldmask & preempts_irq) |
| 2275 | mask &= ~bhs; |
| 2276 | if ((oldmask | mask) & preempts_irq) |
| 2277 | mask |= oldmask & bhs; |
| 2278 | } |
| 2279 | |
| 2280 | return mask ?: RCUTORTURE_RDR_RCU_1; |
| 2281 | } |
| 2282 | |
| 2283 | /* |
| 2284 | * Do a randomly selected number of extensions of an existing RCU read-side |
| 2285 | * critical section. |
| 2286 | */ |
| 2287 | static struct rt_read_seg * |
| 2288 | rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, struct rt_read_seg *rtrsp) |
| 2289 | { |
| 2290 | int i; |
| 2291 | int j; |
| 2292 | int mask = rcutorture_extend_mask_max(); |
| 2293 | |
| 2294 | WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ |
| 2295 | if (!((mask - 1) & mask)) |
| 2296 | return rtrsp; /* Current RCU reader not extendable. */ |
| 2297 | /* Bias towards larger numbers of loops. */ |
| 2298 | i = torture_random(trsp); |
| 2299 | i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; |
| 2300 | for (j = 0; j < i; j++) { |
| 2301 | mask = rcutorture_extend_mask(oldmask: *readstate, trsp); |
| 2302 | WARN_ON_ONCE(mask & RCUTORTURE_RDR_UPDOWN); |
| 2303 | rcutorture_one_extend(readstate, newstate: mask, trsp, rtrsp: &rtrsp[j]); |
| 2304 | } |
| 2305 | return &rtrsp[j]; |
| 2306 | } |
| 2307 | |
| 2308 | struct rcu_torture_one_read_state { |
| 2309 | bool checkpolling; |
| 2310 | unsigned long cookie; |
| 2311 | struct rcu_gp_oldstate cookie_full; |
| 2312 | unsigned long started; |
| 2313 | struct rcu_torture *p; |
| 2314 | int readstate; |
| 2315 | struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS]; |
| 2316 | struct rt_read_seg *rtrsp; |
| 2317 | unsigned long long ts; |
| 2318 | }; |
| 2319 | |
| 2320 | static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp, |
| 2321 | struct torture_random_state *trsp) |
| 2322 | { |
| 2323 | memset(rtorsp, 0, sizeof(*rtorsp)); |
| 2324 | rtorsp->checkpolling = !(torture_random(trsp) & 0xfff); |
| 2325 | rtorsp->rtrsp = &rtorsp->rtseg[0]; |
| 2326 | } |
| 2327 | |
| 2328 | /* |
| 2329 | * Set up the first segment of a series of overlapping read-side |
| 2330 | * critical sections. The caller must have actually initiated the |
| 2331 | * outermost read-side critical section. |
| 2332 | */ |
| 2333 | static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp, |
| 2334 | struct torture_random_state *trsp, long myid) |
| 2335 | { |
| 2336 | if (rtorsp->checkpolling) { |
| 2337 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 2338 | rtorsp->cookie = cur_ops->get_gp_state(); |
| 2339 | if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) |
| 2340 | cur_ops->get_gp_state_full(&rtorsp->cookie_full); |
| 2341 | } |
| 2342 | rtorsp->started = cur_ops->get_gp_seq(); |
| 2343 | rtorsp->ts = rcu_trace_clock_local(); |
| 2344 | rtorsp->p = rcu_dereference_check(rcu_torture_current, |
| 2345 | !cur_ops->readlock_held || cur_ops->readlock_held() || |
| 2346 | (rtorsp->readstate & RCUTORTURE_RDR_UPDOWN)); |
| 2347 | if (rtorsp->p == NULL) { |
| 2348 | /* Wait for rcu_torture_writer to get underway */ |
| 2349 | rcutorture_one_extend(readstate: &rtorsp->readstate, newstate: 0, trsp, rtrsp: rtorsp->rtrsp); |
| 2350 | return false; |
| 2351 | } |
| 2352 | if (rtorsp->p->rtort_mbtest == 0) |
| 2353 | atomic_inc(v: &n_rcu_torture_mberror); |
| 2354 | rcu_torture_reader_do_mbchk(myid, rtp: rtorsp->p, trsp); |
| 2355 | return true; |
| 2356 | } |
| 2357 | |
| 2358 | /* |
| 2359 | * Complete the last segment of a series of overlapping read-side |
| 2360 | * critical sections and check for errors. |
| 2361 | */ |
| 2362 | static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp, |
| 2363 | struct torture_random_state *trsp) |
| 2364 | { |
| 2365 | int i; |
| 2366 | unsigned long completed; |
| 2367 | int pipe_count; |
| 2368 | bool preempted = false; |
| 2369 | struct rt_read_seg *rtrsp1; |
| 2370 | |
| 2371 | preempt_disable(); |
| 2372 | pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count); |
| 2373 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
| 2374 | // Should not happen in a correct RCU implementation, |
| 2375 | // happens quite often for torture_type=busted. |
| 2376 | pipe_count = RCU_TORTURE_PIPE_LEN; |
| 2377 | } |
| 2378 | completed = cur_ops->get_gp_seq(); |
| 2379 | if (pipe_count > 1) { |
| 2380 | do_trace_rcu_torture_read(rcutorturename: cur_ops->name, rhp: &rtorsp->p->rtort_rcu, |
| 2381 | secs: rtorsp->ts, c_old: rtorsp->started, c: completed); |
| 2382 | rcu_ftrace_dump(DUMP_ALL); |
| 2383 | } |
| 2384 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
| 2385 | completed = rcutorture_seq_diff(new: completed, old: rtorsp->started); |
| 2386 | if (completed > RCU_TORTURE_PIPE_LEN) { |
| 2387 | /* Should not happen, but... */ |
| 2388 | completed = RCU_TORTURE_PIPE_LEN; |
| 2389 | } |
| 2390 | __this_cpu_inc(rcu_torture_batch[completed]); |
| 2391 | preempt_enable(); |
| 2392 | if (rtorsp->checkpolling) { |
| 2393 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 2394 | WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie), |
| 2395 | "%s: Cookie check 2 failed %s(%d) %lu->%lu\n" , |
| 2396 | __func__, |
| 2397 | rcu_torture_writer_state_getname(), |
| 2398 | rcu_torture_writer_state, |
| 2399 | rtorsp->cookie, cur_ops->get_gp_state()); |
| 2400 | if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) |
| 2401 | WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full), |
| 2402 | "%s: Cookie check 6 failed %s(%d) online %*pbl\n" , |
| 2403 | __func__, |
| 2404 | rcu_torture_writer_state_getname(), |
| 2405 | rcu_torture_writer_state, |
| 2406 | cpumask_pr_args(cpu_online_mask)); |
| 2407 | } |
| 2408 | if (cur_ops->reader_blocked) |
| 2409 | preempted = cur_ops->reader_blocked(); |
| 2410 | rcutorture_one_extend(readstate: &rtorsp->readstate, newstate: 0, trsp, rtrsp: rtorsp->rtrsp); |
| 2411 | WARN_ON_ONCE(rtorsp->readstate); |
| 2412 | // This next splat is expected behavior if leakpointer, especially |
| 2413 | // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. |
| 2414 | WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1); |
| 2415 | |
| 2416 | /* If error or close call, record the sequence of reader protections. */ |
| 2417 | if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { |
| 2418 | i = 0; |
| 2419 | for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++) |
| 2420 | err_segs[i++] = *rtrsp1; |
| 2421 | rt_read_nsegs = i; |
| 2422 | rt_read_preempted = preempted; |
| 2423 | } |
| 2424 | } |
| 2425 | |
| 2426 | /* |
| 2427 | * Do one read-side critical section, returning false if there was |
| 2428 | * no data to read. Can be invoked both from process context and |
| 2429 | * from a timer handler. |
| 2430 | */ |
| 2431 | static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) |
| 2432 | { |
| 2433 | int newstate; |
| 2434 | struct rcu_torture_one_read_state rtors; |
| 2435 | |
| 2436 | WARN_ON_ONCE(!rcu_is_watching()); |
| 2437 | init_rcu_torture_one_read_state(rtorsp: &rtors, trsp); |
| 2438 | newstate = rcutorture_extend_mask(oldmask: rtors.readstate, trsp); |
| 2439 | WARN_ON_ONCE(newstate & RCUTORTURE_RDR_UPDOWN); |
| 2440 | rcutorture_one_extend(readstate: &rtors.readstate, newstate, trsp, rtrsp: rtors.rtrsp++); |
| 2441 | if (!rcu_torture_one_read_start(rtorsp: &rtors, trsp, myid)) |
| 2442 | return false; |
| 2443 | rtors.rtrsp = rcutorture_loop_extend(readstate: &rtors.readstate, trsp, rtrsp: rtors.rtrsp); |
| 2444 | rcu_torture_one_read_end(rtorsp: &rtors, trsp); |
| 2445 | return true; |
| 2446 | } |
| 2447 | |
| 2448 | static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); |
| 2449 | |
| 2450 | /* |
| 2451 | * RCU torture reader from timer handler. Dereferences rcu_torture_current, |
| 2452 | * incrementing the corresponding element of the pipeline array. The |
| 2453 | * counter in the element should never be greater than 1, otherwise, the |
| 2454 | * RCU implementation is broken. |
| 2455 | */ |
| 2456 | static void rcu_torture_timer(struct timer_list *unused) |
| 2457 | { |
| 2458 | atomic_long_inc(v: &n_rcu_torture_timers); |
| 2459 | (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), myid: -1); |
| 2460 | |
| 2461 | /* Test call_rcu() invocation from interrupt handler. */ |
| 2462 | if (cur_ops->call) { |
| 2463 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); |
| 2464 | |
| 2465 | if (rhp) |
| 2466 | cur_ops->call(rhp, rcu_torture_timer_cb); |
| 2467 | } |
| 2468 | } |
| 2469 | |
| 2470 | /* |
| 2471 | * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, |
| 2472 | * incrementing the corresponding element of the pipeline array. The |
| 2473 | * counter in the element should never be greater than 1, otherwise, the |
| 2474 | * RCU implementation is broken. |
| 2475 | */ |
| 2476 | static int |
| 2477 | rcu_torture_reader(void *arg) |
| 2478 | { |
| 2479 | unsigned long lastsleep = jiffies; |
| 2480 | long myid = (long)arg; |
| 2481 | int mynumonline = myid; |
| 2482 | DEFINE_TORTURE_RANDOM(rand); |
| 2483 | struct timer_list t; |
| 2484 | |
| 2485 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started" ); |
| 2486 | set_user_nice(current, MAX_NICE); |
| 2487 | if (irqreader && cur_ops->irq_capable) |
| 2488 | timer_setup_on_stack(&t, rcu_torture_timer, 0); |
| 2489 | tick_dep_set_task(current, bit: TICK_DEP_BIT_RCU); // CPU bound, so need tick. |
| 2490 | do { |
| 2491 | if (irqreader && cur_ops->irq_capable) { |
| 2492 | if (!timer_pending(timer: &t)) |
| 2493 | mod_timer(timer: &t, expires: jiffies + 1); |
| 2494 | } |
| 2495 | if (!rcu_torture_one_read(trsp: &rand, myid) && !torture_must_stop()) |
| 2496 | schedule_timeout_interruptible(HZ); |
| 2497 | if (time_after(jiffies, lastsleep) && !torture_must_stop()) { |
| 2498 | torture_hrtimeout_us(baset_us: 500, fuzzt_ns: 1000, trsp: &rand); |
| 2499 | lastsleep = jiffies + 10; |
| 2500 | } |
| 2501 | while (!torture_must_stop() && |
| 2502 | (torture_num_online_cpus() < mynumonline || !rcu_inkernel_boot_has_ended())) |
| 2503 | schedule_timeout_interruptible(HZ / 5); |
| 2504 | stutter_wait(title: "rcu_torture_reader" ); |
| 2505 | } while (!torture_must_stop()); |
| 2506 | if (irqreader && cur_ops->irq_capable) { |
| 2507 | timer_delete_sync(timer: &t); |
| 2508 | timer_destroy_on_stack(timer: &t); |
| 2509 | } |
| 2510 | tick_dep_clear_task(current, bit: TICK_DEP_BIT_RCU); |
| 2511 | torture_kthread_stopping(title: "rcu_torture_reader" ); |
| 2512 | return 0; |
| 2513 | } |
| 2514 | |
| 2515 | struct rcu_torture_one_read_state_updown { |
| 2516 | struct hrtimer rtorsu_hrt; |
| 2517 | bool rtorsu_inuse; |
| 2518 | ktime_t rtorsu_kt; |
| 2519 | int rtorsu_cpu; |
| 2520 | unsigned long rtorsu_j; |
| 2521 | unsigned long rtorsu_ndowns; |
| 2522 | unsigned long rtorsu_nups; |
| 2523 | unsigned long rtorsu_nmigrates; |
| 2524 | struct torture_random_state rtorsu_trs; |
| 2525 | struct rcu_torture_one_read_state rtorsu_rtors; |
| 2526 | }; |
| 2527 | |
| 2528 | static struct rcu_torture_one_read_state_updown *updownreaders; |
| 2529 | static DEFINE_TORTURE_RANDOM(rcu_torture_updown_rand); |
| 2530 | static int rcu_torture_updown(void *arg); |
| 2531 | |
| 2532 | static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp) |
| 2533 | { |
| 2534 | int cpu = raw_smp_processor_id(); |
| 2535 | struct rcu_torture_one_read_state_updown *rtorsup; |
| 2536 | |
| 2537 | rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt); |
| 2538 | rcu_torture_one_read_end(rtorsp: &rtorsup->rtorsu_rtors, trsp: &rtorsup->rtorsu_trs); |
| 2539 | WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n" , __func__, rtorsup - updownreaders); |
| 2540 | WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); |
| 2541 | WRITE_ONCE(rtorsup->rtorsu_nmigrates, |
| 2542 | rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu)); |
| 2543 | smp_store_release(&rtorsup->rtorsu_inuse, false); |
| 2544 | return HRTIMER_NORESTART; |
| 2545 | } |
| 2546 | |
| 2547 | static int rcu_torture_updown_init(void) |
| 2548 | { |
| 2549 | int i; |
| 2550 | struct torture_random_state *rand = &rcu_torture_updown_rand; |
| 2551 | int ret; |
| 2552 | |
| 2553 | if (n_up_down < 0) |
| 2554 | return 0; |
| 2555 | if (!srcu_torture_have_up_down()) { |
| 2556 | VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives" ); |
| 2557 | return 0; |
| 2558 | } |
| 2559 | updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL); |
| 2560 | if (!updownreaders) { |
| 2561 | VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests" ); |
| 2562 | return -ENOMEM; |
| 2563 | } |
| 2564 | for (i = 0; i < n_up_down; i++) { |
| 2565 | init_rcu_torture_one_read_state(rtorsp: &updownreaders[i].rtorsu_rtors, trsp: rand); |
| 2566 | hrtimer_setup(timer: &updownreaders[i].rtorsu_hrt, function: rcu_torture_updown_hrt, CLOCK_MONOTONIC, |
| 2567 | mode: HRTIMER_MODE_REL | HRTIMER_MODE_HARD); |
| 2568 | torture_random_init(trsp: &updownreaders[i].rtorsu_trs); |
| 2569 | init_rcu_torture_one_read_state(rtorsp: &updownreaders[i].rtorsu_rtors, |
| 2570 | trsp: &updownreaders[i].rtorsu_trs); |
| 2571 | } |
| 2572 | ret = torture_create_kthread(rcu_torture_updown, rand, updown_task); |
| 2573 | if (ret) { |
| 2574 | kfree(objp: updownreaders); |
| 2575 | updownreaders = NULL; |
| 2576 | } |
| 2577 | return ret; |
| 2578 | } |
| 2579 | |
| 2580 | static void rcu_torture_updown_cleanup(void) |
| 2581 | { |
| 2582 | struct rcu_torture_one_read_state_updown *rtorsup; |
| 2583 | |
| 2584 | for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { |
| 2585 | if (!smp_load_acquire(&rtorsup->rtorsu_inuse)) |
| 2586 | continue; |
| 2587 | if (hrtimer_cancel(timer: &rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) { |
| 2588 | rcu_torture_one_read_end(rtorsp: &rtorsup->rtorsu_rtors, trsp: &rtorsup->rtorsu_trs); |
| 2589 | WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n" , __func__, rtorsup - updownreaders); |
| 2590 | WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); |
| 2591 | smp_store_release(&rtorsup->rtorsu_inuse, false); |
| 2592 | } |
| 2593 | |
| 2594 | } |
| 2595 | kfree(objp: updownreaders); |
| 2596 | updownreaders = NULL; |
| 2597 | } |
| 2598 | |
| 2599 | // Do one reader for rcu_torture_updown(). |
| 2600 | static void rcu_torture_updown_one(struct rcu_torture_one_read_state_updown *rtorsup) |
| 2601 | { |
| 2602 | int idx; |
| 2603 | int rawidx; |
| 2604 | ktime_t t; |
| 2605 | |
| 2606 | init_rcu_torture_one_read_state(rtorsp: &rtorsup->rtorsu_rtors, trsp: &rtorsup->rtorsu_trs); |
| 2607 | rawidx = cur_ops->down_read(); |
| 2608 | WRITE_ONCE(rtorsup->rtorsu_ndowns, rtorsup->rtorsu_ndowns + 1); |
| 2609 | idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; |
| 2610 | rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN; |
| 2611 | rtorsup->rtorsu_rtors.rtrsp++; |
| 2612 | rtorsup->rtorsu_cpu = raw_smp_processor_id(); |
| 2613 | if (!rcu_torture_one_read_start(rtorsp: &rtorsup->rtorsu_rtors, trsp: &rtorsup->rtorsu_trs, myid: -1)) { |
| 2614 | WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n" , __func__, rtorsup - updownreaders); |
| 2615 | WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); |
| 2616 | schedule_timeout_idle(HZ); |
| 2617 | return; |
| 2618 | } |
| 2619 | smp_store_release(&rtorsup->rtorsu_inuse, true); |
| 2620 | t = torture_random(trsp: &rtorsup->rtorsu_trs) & 0xfffff; // One per million. |
| 2621 | if (t < 10 * 1000) |
| 2622 | t = 200 * 1000 * 1000; |
| 2623 | hrtimer_start(timer: &rtorsup->rtorsu_hrt, tim: t, mode: HRTIMER_MODE_REL | HRTIMER_MODE_HARD); |
| 2624 | smp_mb(); // Sample jiffies after posting hrtimer. |
| 2625 | rtorsup->rtorsu_j = jiffies; // Not used by hrtimer handler. |
| 2626 | rtorsup->rtorsu_kt = t; |
| 2627 | } |
| 2628 | |
| 2629 | /* |
| 2630 | * RCU torture up/down reader kthread, starting RCU readers in kthread |
| 2631 | * context and ending them in hrtimer handlers. Otherwise similar to |
| 2632 | * rcu_torture_reader(). |
| 2633 | */ |
| 2634 | static int |
| 2635 | rcu_torture_updown(void *arg) |
| 2636 | { |
| 2637 | unsigned long j; |
| 2638 | struct rcu_torture_one_read_state_updown *rtorsup; |
| 2639 | |
| 2640 | VERBOSE_TOROUT_STRING("rcu_torture_updown task started" ); |
| 2641 | do { |
| 2642 | for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { |
| 2643 | if (torture_must_stop()) |
| 2644 | break; |
| 2645 | j = smp_load_acquire(&jiffies); // Time before ->rtorsu_inuse. |
| 2646 | if (smp_load_acquire(&rtorsup->rtorsu_inuse)) { |
| 2647 | WARN_ONCE(time_after(j, rtorsup->rtorsu_j + 1 + HZ * 10), |
| 2648 | "hrtimer queued at jiffies %lu for %lld ns took %lu jiffies\n" , rtorsup->rtorsu_j, rtorsup->rtorsu_kt, j - rtorsup->rtorsu_j); |
| 2649 | continue; |
| 2650 | } |
| 2651 | rcu_torture_updown_one(rtorsup); |
| 2652 | } |
| 2653 | torture_hrtimeout_ms(baset_ms: 1, fuzzt_us: 1000, trsp: &rcu_torture_updown_rand); |
| 2654 | stutter_wait(title: "rcu_torture_updown" ); |
| 2655 | } while (!torture_must_stop()); |
| 2656 | rcu_torture_updown_cleanup(); |
| 2657 | torture_kthread_stopping(title: "rcu_torture_updown" ); |
| 2658 | return 0; |
| 2659 | } |
| 2660 | |
| 2661 | /* |
| 2662 | * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to |
| 2663 | * increase race probabilities and fuzzes the interval between toggling. |
| 2664 | */ |
| 2665 | static int rcu_nocb_toggle(void *arg) |
| 2666 | { |
| 2667 | int cpu; |
| 2668 | int maxcpu = -1; |
| 2669 | int oldnice = task_nice(current); |
| 2670 | long r; |
| 2671 | DEFINE_TORTURE_RANDOM(rand); |
| 2672 | ktime_t toggle_delay; |
| 2673 | unsigned long toggle_fuzz; |
| 2674 | ktime_t toggle_interval = ms_to_ktime(ms: nocbs_toggle); |
| 2675 | |
| 2676 | VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started" ); |
| 2677 | while (!rcu_inkernel_boot_has_ended()) |
| 2678 | schedule_timeout_interruptible(HZ / 10); |
| 2679 | for_each_possible_cpu(cpu) |
| 2680 | maxcpu = cpu; |
| 2681 | WARN_ON(maxcpu < 0); |
| 2682 | if (toggle_interval > ULONG_MAX) |
| 2683 | toggle_fuzz = ULONG_MAX >> 3; |
| 2684 | else |
| 2685 | toggle_fuzz = toggle_interval >> 3; |
| 2686 | if (toggle_fuzz <= 0) |
| 2687 | toggle_fuzz = NSEC_PER_USEC; |
| 2688 | do { |
| 2689 | r = torture_random(trsp: &rand); |
| 2690 | cpu = (r >> 1) % (maxcpu + 1); |
| 2691 | if (r & 0x1) { |
| 2692 | rcu_nocb_cpu_offload(cpu); |
| 2693 | atomic_long_inc(v: &n_nocb_offload); |
| 2694 | } else { |
| 2695 | rcu_nocb_cpu_deoffload(cpu); |
| 2696 | atomic_long_inc(v: &n_nocb_deoffload); |
| 2697 | } |
| 2698 | toggle_delay = torture_random(trsp: &rand) % toggle_fuzz + toggle_interval; |
| 2699 | set_current_state(TASK_INTERRUPTIBLE); |
| 2700 | schedule_hrtimeout(expires: &toggle_delay, mode: HRTIMER_MODE_REL); |
| 2701 | if (stutter_wait(title: "rcu_nocb_toggle" )) |
| 2702 | sched_set_normal(current, nice: oldnice); |
| 2703 | } while (!torture_must_stop()); |
| 2704 | torture_kthread_stopping(title: "rcu_nocb_toggle" ); |
| 2705 | return 0; |
| 2706 | } |
| 2707 | |
| 2708 | /* |
| 2709 | * Print torture statistics. Caller must ensure that there is only |
| 2710 | * one call to this function at a given time!!! This is normally |
| 2711 | * accomplished by relying on the module system to only have one copy |
| 2712 | * of the module loaded, and then by giving the rcu_torture_stats |
| 2713 | * kthread full control (or the init/cleanup functions when rcu_torture_stats |
| 2714 | * thread is not running). |
| 2715 | */ |
| 2716 | static void |
| 2717 | rcu_torture_stats_print(void) |
| 2718 | { |
| 2719 | int cpu; |
| 2720 | int i; |
| 2721 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
| 2722 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
| 2723 | long n_gpwraps = 0; |
| 2724 | unsigned long ndowns = 0; |
| 2725 | unsigned long nunexpired = 0; |
| 2726 | unsigned long nmigrates = 0; |
| 2727 | unsigned long nups = 0; |
| 2728 | struct rcu_torture *rtcp; |
| 2729 | static unsigned long rtcv_snap = ULONG_MAX; |
| 2730 | static bool splatted; |
| 2731 | struct task_struct *wtp; |
| 2732 | |
| 2733 | for_each_possible_cpu(cpu) { |
| 2734 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
| 2735 | pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); |
| 2736 | batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); |
| 2737 | } |
| 2738 | if (cur_ops->get_gpwrap_count) |
| 2739 | n_gpwraps += cur_ops->get_gpwrap_count(cpu); |
| 2740 | } |
| 2741 | if (updownreaders) { |
| 2742 | for (i = 0; i < n_up_down; i++) { |
| 2743 | ndowns += READ_ONCE(updownreaders[i].rtorsu_ndowns); |
| 2744 | nups += READ_ONCE(updownreaders[i].rtorsu_nups); |
| 2745 | nunexpired += READ_ONCE(updownreaders[i].rtorsu_inuse); |
| 2746 | nmigrates += READ_ONCE(updownreaders[i].rtorsu_nmigrates); |
| 2747 | } |
| 2748 | } |
| 2749 | for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { |
| 2750 | if (pipesummary[i] != 0) |
| 2751 | break; |
| 2752 | } // The value of variable "i" is used later, so don't clobber it! |
| 2753 | |
| 2754 | pr_alert("%s%s " , torture_type, TORTURE_FLAG); |
| 2755 | rtcp = rcu_access_pointer(rcu_torture_current); |
| 2756 | pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d " , |
| 2757 | rtcp, |
| 2758 | rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER" , |
| 2759 | rcu_torture_current_version, |
| 2760 | list_empty(&rcu_torture_freelist), |
| 2761 | atomic_read(&n_rcu_torture_alloc), |
| 2762 | atomic_read(&n_rcu_torture_alloc_fail), |
| 2763 | atomic_read(&n_rcu_torture_free)); |
| 2764 | pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld " , |
| 2765 | atomic_read(&n_rcu_torture_mberror), |
| 2766 | atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), |
| 2767 | n_rcu_torture_barrier_error, |
| 2768 | n_rcu_torture_boost_ktrerror); |
| 2769 | pr_cont("rtbf: %ld rtb: %ld nt: %ld " , |
| 2770 | n_rcu_torture_boost_failure, |
| 2771 | n_rcu_torture_boosts, |
| 2772 | atomic_long_read(&n_rcu_torture_timers)); |
| 2773 | if (updownreaders) |
| 2774 | pr_cont("ndowns: %lu nups: %lu nhrt: %lu nmigrates: %lu " , ndowns, nups, nunexpired, nmigrates); |
| 2775 | torture_onoff_stats(); |
| 2776 | pr_cont("barrier: %ld/%ld:%ld " , |
| 2777 | data_race(n_barrier_successes), |
| 2778 | data_race(n_barrier_attempts), |
| 2779 | data_race(n_rcu_torture_barrier_error)); |
| 2780 | pr_cont("read-exits: %ld " , data_race(n_read_exits)); // Statistic. |
| 2781 | pr_cont("nocb-toggles: %ld:%ld " , |
| 2782 | atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); |
| 2783 | pr_cont("gpwraps: %ld\n" , n_gpwraps); |
| 2784 | |
| 2785 | pr_alert("%s%s " , torture_type, TORTURE_FLAG); |
| 2786 | if (atomic_read(v: &n_rcu_torture_mberror) || |
| 2787 | atomic_read(v: &n_rcu_torture_mbchk_fail) || |
| 2788 | n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || |
| 2789 | n_rcu_torture_boost_failure || i > 1) { |
| 2790 | pr_cont("%s" , "!!! " ); |
| 2791 | atomic_inc(v: &n_rcu_torture_error); |
| 2792 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); |
| 2793 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); |
| 2794 | WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() |
| 2795 | WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread |
| 2796 | WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) |
| 2797 | WARN_ON_ONCE(i > 1); // Too-short grace period |
| 2798 | } |
| 2799 | pr_cont("Reader Pipe: " ); |
| 2800 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
| 2801 | pr_cont(" %ld" , pipesummary[i]); |
| 2802 | pr_cont("\n" ); |
| 2803 | |
| 2804 | pr_alert("%s%s " , torture_type, TORTURE_FLAG); |
| 2805 | pr_cont("Reader Batch: " ); |
| 2806 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
| 2807 | pr_cont(" %ld" , batchsummary[i]); |
| 2808 | pr_cont("\n" ); |
| 2809 | |
| 2810 | pr_alert("%s%s " , torture_type, TORTURE_FLAG); |
| 2811 | pr_cont("Free-Block Circulation: " ); |
| 2812 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
| 2813 | pr_cont(" %d" , atomic_read(&rcu_torture_wcount[i])); |
| 2814 | } |
| 2815 | pr_cont("\n" ); |
| 2816 | |
| 2817 | if (cur_ops->stats) |
| 2818 | cur_ops->stats(); |
| 2819 | if (rtcv_snap == rcu_torture_current_version && |
| 2820 | rcu_access_pointer(rcu_torture_current) && |
| 2821 | !rcu_stall_is_suppressed() && |
| 2822 | rcu_inkernel_boot_has_ended()) { |
| 2823 | int __maybe_unused flags = 0; |
| 2824 | unsigned long __maybe_unused gp_seq = 0; |
| 2825 | |
| 2826 | if (cur_ops->get_gp_data) |
| 2827 | cur_ops->get_gp_data(&flags, &gp_seq); |
| 2828 | wtp = READ_ONCE(writer_task); |
| 2829 | pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n" , |
| 2830 | rcu_torture_writer_state_getname(), |
| 2831 | rcu_torture_writer_state, gp_seq, flags, |
| 2832 | wtp == NULL ? ~0U : wtp->__state, |
| 2833 | wtp == NULL ? -1 : (int)task_cpu(wtp)); |
| 2834 | if (!splatted && wtp) { |
| 2835 | sched_show_task(p: wtp); |
| 2836 | splatted = true; |
| 2837 | } |
| 2838 | if (cur_ops->gp_kthread_dbg) |
| 2839 | cur_ops->gp_kthread_dbg(); |
| 2840 | rcu_ftrace_dump(DUMP_ALL); |
| 2841 | } |
| 2842 | rtcv_snap = rcu_torture_current_version; |
| 2843 | } |
| 2844 | |
| 2845 | /* |
| 2846 | * Periodically prints torture statistics, if periodic statistics printing |
| 2847 | * was specified via the stat_interval module parameter. |
| 2848 | */ |
| 2849 | static int |
| 2850 | rcu_torture_stats(void *arg) |
| 2851 | { |
| 2852 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started" ); |
| 2853 | do { |
| 2854 | schedule_timeout_interruptible(timeout: stat_interval * HZ); |
| 2855 | rcu_torture_stats_print(); |
| 2856 | torture_shutdown_absorb(title: "rcu_torture_stats" ); |
| 2857 | } while (!torture_must_stop()); |
| 2858 | torture_kthread_stopping(title: "rcu_torture_stats" ); |
| 2859 | return 0; |
| 2860 | } |
| 2861 | |
| 2862 | /* Test mem_dump_obj() and friends. */ |
| 2863 | static void rcu_torture_mem_dump_obj(void) |
| 2864 | { |
| 2865 | struct rcu_head *rhp; |
| 2866 | struct kmem_cache *kcp; |
| 2867 | static int z; |
| 2868 | |
| 2869 | kcp = kmem_cache_create("rcuscale" , 136, 8, SLAB_STORE_USER, NULL); |
| 2870 | if (WARN_ON_ONCE(!kcp)) |
| 2871 | return; |
| 2872 | rhp = kmem_cache_alloc(kcp, GFP_KERNEL); |
| 2873 | if (WARN_ON_ONCE(!rhp)) { |
| 2874 | kmem_cache_destroy(s: kcp); |
| 2875 | return; |
| 2876 | } |
| 2877 | pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n" , stats_task, &rhp, rhp, &z); |
| 2878 | pr_alert("mem_dump_obj(ZERO_SIZE_PTR):" ); |
| 2879 | mem_dump_obj(ZERO_SIZE_PTR); |
| 2880 | pr_alert("mem_dump_obj(NULL):" ); |
| 2881 | mem_dump_obj(NULL); |
| 2882 | pr_alert("mem_dump_obj(%px):" , &rhp); |
| 2883 | mem_dump_obj(object: &rhp); |
| 2884 | pr_alert("mem_dump_obj(%px):" , rhp); |
| 2885 | mem_dump_obj(object: rhp); |
| 2886 | pr_alert("mem_dump_obj(%px):" , &rhp->func); |
| 2887 | mem_dump_obj(object: &rhp->func); |
| 2888 | pr_alert("mem_dump_obj(%px):" , &z); |
| 2889 | mem_dump_obj(object: &z); |
| 2890 | kmem_cache_free(s: kcp, objp: rhp); |
| 2891 | kmem_cache_destroy(s: kcp); |
| 2892 | rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
| 2893 | if (WARN_ON_ONCE(!rhp)) |
| 2894 | return; |
| 2895 | pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n" , stats_task, &rhp, rhp); |
| 2896 | pr_alert("mem_dump_obj(kmalloc %px):" , rhp); |
| 2897 | mem_dump_obj(object: rhp); |
| 2898 | pr_alert("mem_dump_obj(kmalloc %px):" , &rhp->func); |
| 2899 | mem_dump_obj(object: &rhp->func); |
| 2900 | kfree(objp: rhp); |
| 2901 | rhp = vmalloc(4096); |
| 2902 | if (WARN_ON_ONCE(!rhp)) |
| 2903 | return; |
| 2904 | pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n" , stats_task, &rhp, rhp); |
| 2905 | pr_alert("mem_dump_obj(vmalloc %px):" , rhp); |
| 2906 | mem_dump_obj(object: rhp); |
| 2907 | pr_alert("mem_dump_obj(vmalloc %px):" , &rhp->func); |
| 2908 | mem_dump_obj(object: &rhp->func); |
| 2909 | vfree(addr: rhp); |
| 2910 | } |
| 2911 | |
| 2912 | static void |
| 2913 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) |
| 2914 | { |
| 2915 | pr_alert("%s" TORTURE_FLAG |
| 2916 | "--- %s: nreaders=%d nfakewriters=%d " |
| 2917 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
| 2918 | "shuffle_interval=%d stutter=%d irqreader=%d " |
| 2919 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " |
| 2920 | "test_boost=%d/%d test_boost_interval=%d " |
| 2921 | "test_boost_duration=%d test_boost_holdoff=%d shutdown_secs=%d " |
| 2922 | "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " |
| 2923 | "stall_cpu_block=%d stall_cpu_repeat=%d " |
| 2924 | "n_barrier_cbs=%d " |
| 2925 | "onoff_interval=%d onoff_holdoff=%d " |
| 2926 | "read_exit_delay=%d read_exit_burst=%d " |
| 2927 | "reader_flavor=%x " |
| 2928 | "nocbs_nthreads=%d nocbs_toggle=%d " |
| 2929 | "test_nmis=%d " |
| 2930 | "preempt_duration=%d preempt_interval=%d n_up_down=%d\n" , |
| 2931 | torture_type, tag, nrealreaders, nrealfakewriters, |
| 2932 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
| 2933 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
| 2934 | test_boost, cur_ops->can_boost, |
| 2935 | test_boost_interval, test_boost_duration, test_boost_holdoff, shutdown_secs, |
| 2936 | stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, |
| 2937 | stall_cpu_block, stall_cpu_repeat, |
| 2938 | n_barrier_cbs, |
| 2939 | onoff_interval, onoff_holdoff, |
| 2940 | read_exit_delay, read_exit_burst, |
| 2941 | reader_flavor, |
| 2942 | nocbs_nthreads, nocbs_toggle, |
| 2943 | test_nmis, |
| 2944 | preempt_duration, preempt_interval, n_up_down); |
| 2945 | } |
| 2946 | |
| 2947 | static int rcutorture_booster_cleanup(unsigned int cpu) |
| 2948 | { |
| 2949 | struct task_struct *t; |
| 2950 | |
| 2951 | if (boost_tasks[cpu] == NULL) |
| 2952 | return 0; |
| 2953 | mutex_lock(&boost_mutex); |
| 2954 | t = boost_tasks[cpu]; |
| 2955 | boost_tasks[cpu] = NULL; |
| 2956 | rcu_torture_enable_rt_throttle(); |
| 2957 | mutex_unlock(lock: &boost_mutex); |
| 2958 | |
| 2959 | /* This must be outside of the mutex, otherwise deadlock! */ |
| 2960 | torture_stop_kthread(rcu_torture_boost, t); |
| 2961 | return 0; |
| 2962 | } |
| 2963 | |
| 2964 | static int rcutorture_booster_init(unsigned int cpu) |
| 2965 | { |
| 2966 | int retval; |
| 2967 | |
| 2968 | if (boost_tasks[cpu] != NULL) |
| 2969 | return 0; /* Already created, nothing more to do. */ |
| 2970 | |
| 2971 | // Testing RCU priority boosting requires rcutorture do |
| 2972 | // some serious abuse. Counter this by running ksoftirqd |
| 2973 | // at higher priority. |
| 2974 | if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { |
| 2975 | struct sched_param sp; |
| 2976 | struct task_struct *t; |
| 2977 | |
| 2978 | t = per_cpu(ksoftirqd, cpu); |
| 2979 | WARN_ON_ONCE(!t); |
| 2980 | sp.sched_priority = 2; |
| 2981 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| 2982 | #ifdef CONFIG_IRQ_FORCED_THREADING |
| 2983 | if (force_irqthreads()) { |
| 2984 | t = per_cpu(ktimerd, cpu); |
| 2985 | WARN_ON_ONCE(!t); |
| 2986 | sp.sched_priority = 2; |
| 2987 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| 2988 | } |
| 2989 | #endif |
| 2990 | } |
| 2991 | |
| 2992 | /* Don't allow time recalculation while creating a new task. */ |
| 2993 | mutex_lock(&boost_mutex); |
| 2994 | rcu_torture_disable_rt_throttle(); |
| 2995 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task" ); |
| 2996 | boost_tasks[cpu] = kthread_run_on_cpu(threadfn: rcu_torture_boost, NULL, |
| 2997 | cpu, namefmt: "rcu_torture_boost_%u" ); |
| 2998 | if (IS_ERR(ptr: boost_tasks[cpu])) { |
| 2999 | retval = PTR_ERR(ptr: boost_tasks[cpu]); |
| 3000 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed" ); |
| 3001 | n_rcu_torture_boost_ktrerror++; |
| 3002 | boost_tasks[cpu] = NULL; |
| 3003 | mutex_unlock(lock: &boost_mutex); |
| 3004 | return retval; |
| 3005 | } |
| 3006 | mutex_unlock(lock: &boost_mutex); |
| 3007 | return 0; |
| 3008 | } |
| 3009 | |
| 3010 | static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) |
| 3011 | { |
| 3012 | pr_info("%s: v=%lu, duration=%lu.\n" , __func__, v, (unsigned long)ptr); |
| 3013 | return NOTIFY_OK; |
| 3014 | } |
| 3015 | |
| 3016 | static struct notifier_block rcu_torture_stall_block = { |
| 3017 | .notifier_call = rcu_torture_stall_nf, |
| 3018 | }; |
| 3019 | |
| 3020 | /* |
| 3021 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
| 3022 | * induces a CPU stall for the time specified by stall_cpu. If a new |
| 3023 | * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. |
| 3024 | */ |
| 3025 | static void rcu_torture_stall_one(int rep, int irqsoff) |
| 3026 | { |
| 3027 | int idx; |
| 3028 | unsigned long stop_at; |
| 3029 | |
| 3030 | if (stall_cpu_holdoff > 0) { |
| 3031 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff" ); |
| 3032 | schedule_timeout_interruptible(timeout: stall_cpu_holdoff * HZ); |
| 3033 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff" ); |
| 3034 | } |
| 3035 | if (!kthread_should_stop() && stall_gp_kthread > 0) { |
| 3036 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall" ); |
| 3037 | rcu_gp_set_torture_wait(duration: stall_gp_kthread * HZ); |
| 3038 | for (idx = 0; idx < stall_gp_kthread + 2; idx++) { |
| 3039 | if (kthread_should_stop()) |
| 3040 | break; |
| 3041 | schedule_timeout_uninterruptible(HZ); |
| 3042 | } |
| 3043 | } |
| 3044 | if (!kthread_should_stop() && stall_cpu > 0) { |
| 3045 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall" ); |
| 3046 | stop_at = ktime_get_seconds() + stall_cpu; |
| 3047 | /* RCU CPU stall is expected behavior in following code. */ |
| 3048 | idx = cur_ops->readlock(); |
| 3049 | if (irqsoff) |
| 3050 | local_irq_disable(); |
| 3051 | else if (!stall_cpu_block) |
| 3052 | preempt_disable(); |
| 3053 | pr_alert("%s start stall episode %d on CPU %d.\n" , |
| 3054 | __func__, rep + 1, raw_smp_processor_id()); |
| 3055 | while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && |
| 3056 | !kthread_should_stop()) |
| 3057 | if (stall_cpu_block) { |
| 3058 | #ifdef CONFIG_PREEMPTION |
| 3059 | preempt_schedule(); |
| 3060 | #else |
| 3061 | schedule_timeout_uninterruptible(HZ); |
| 3062 | #endif |
| 3063 | } else if (stall_no_softlockup) { |
| 3064 | touch_softlockup_watchdog(); |
| 3065 | } |
| 3066 | if (irqsoff) |
| 3067 | local_irq_enable(); |
| 3068 | else if (!stall_cpu_block) |
| 3069 | preempt_enable(); |
| 3070 | cur_ops->readunlock(idx); |
| 3071 | } |
| 3072 | } |
| 3073 | |
| 3074 | /* |
| 3075 | * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many |
| 3076 | * additional times as specified by the stall_cpu_repeat module parameter. |
| 3077 | * Note that stall_cpu_irqsoff is ignored on the second and subsequent |
| 3078 | * stall. |
| 3079 | */ |
| 3080 | static int rcu_torture_stall(void *args) |
| 3081 | { |
| 3082 | int i; |
| 3083 | int repeat = stall_cpu_repeat; |
| 3084 | int ret; |
| 3085 | |
| 3086 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started" ); |
| 3087 | if (repeat < 0) { |
| 3088 | repeat = 0; |
| 3089 | WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); |
| 3090 | } |
| 3091 | if (rcu_cpu_stall_notifiers) { |
| 3092 | ret = rcu_stall_chain_notifier_register(n: &rcu_torture_stall_block); |
| 3093 | if (ret) |
| 3094 | pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n" , |
| 3095 | __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "" ); |
| 3096 | } |
| 3097 | for (i = 0; i <= repeat; i++) { |
| 3098 | if (kthread_should_stop()) |
| 3099 | break; |
| 3100 | rcu_torture_stall_one(rep: i, irqsoff: i == 0 ? stall_cpu_irqsoff : 0); |
| 3101 | } |
| 3102 | pr_alert("%s end.\n" , __func__); |
| 3103 | if (rcu_cpu_stall_notifiers && !ret) { |
| 3104 | ret = rcu_stall_chain_notifier_unregister(n: &rcu_torture_stall_block); |
| 3105 | if (ret) |
| 3106 | pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n" , __func__, ret); |
| 3107 | } |
| 3108 | torture_shutdown_absorb(title: "rcu_torture_stall" ); |
| 3109 | while (!kthread_should_stop()) |
| 3110 | schedule_timeout_interruptible(timeout: 10 * HZ); |
| 3111 | return 0; |
| 3112 | } |
| 3113 | |
| 3114 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
| 3115 | static int __init rcu_torture_stall_init(void) |
| 3116 | { |
| 3117 | if (stall_cpu <= 0 && stall_gp_kthread <= 0) |
| 3118 | return 0; |
| 3119 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
| 3120 | } |
| 3121 | |
| 3122 | /* State structure for forward-progress self-propagating RCU callback. */ |
| 3123 | struct fwd_cb_state { |
| 3124 | struct rcu_head rh; |
| 3125 | int stop; |
| 3126 | }; |
| 3127 | |
| 3128 | /* |
| 3129 | * Forward-progress self-propagating RCU callback function. Because |
| 3130 | * callbacks run from softirq, this function is an implicit RCU read-side |
| 3131 | * critical section. |
| 3132 | */ |
| 3133 | static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) |
| 3134 | { |
| 3135 | struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); |
| 3136 | |
| 3137 | if (READ_ONCE(fcsp->stop)) { |
| 3138 | WRITE_ONCE(fcsp->stop, 2); |
| 3139 | return; |
| 3140 | } |
| 3141 | cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); |
| 3142 | } |
| 3143 | |
| 3144 | /* State for continuous-flood RCU callbacks. */ |
| 3145 | struct rcu_fwd_cb { |
| 3146 | struct rcu_head rh; |
| 3147 | struct rcu_fwd_cb *rfc_next; |
| 3148 | struct rcu_fwd *rfc_rfp; |
| 3149 | int rfc_gps; |
| 3150 | }; |
| 3151 | |
| 3152 | #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ |
| 3153 | #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ |
| 3154 | #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ |
| 3155 | #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ |
| 3156 | #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) |
| 3157 | |
| 3158 | struct rcu_launder_hist { |
| 3159 | long n_launders; |
| 3160 | unsigned long launder_gp_seq; |
| 3161 | }; |
| 3162 | |
| 3163 | struct rcu_fwd { |
| 3164 | spinlock_t rcu_fwd_lock; |
| 3165 | struct rcu_fwd_cb *rcu_fwd_cb_head; |
| 3166 | struct rcu_fwd_cb **rcu_fwd_cb_tail; |
| 3167 | long n_launders_cb; |
| 3168 | unsigned long rcu_fwd_startat; |
| 3169 | struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; |
| 3170 | unsigned long rcu_launder_gp_seq_start; |
| 3171 | int rcu_fwd_id; |
| 3172 | }; |
| 3173 | |
| 3174 | static DEFINE_MUTEX(rcu_fwd_mutex); |
| 3175 | static struct rcu_fwd *rcu_fwds; |
| 3176 | static unsigned long rcu_fwd_seq; |
| 3177 | static atomic_long_t rcu_fwd_max_cbs; |
| 3178 | static bool rcu_fwd_emergency_stop; |
| 3179 | |
| 3180 | static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) |
| 3181 | { |
| 3182 | unsigned long gps; |
| 3183 | unsigned long gps_old; |
| 3184 | int i; |
| 3185 | int j; |
| 3186 | |
| 3187 | for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) |
| 3188 | if (rfp->n_launders_hist[i].n_launders > 0) |
| 3189 | break; |
| 3190 | pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):" , |
| 3191 | __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); |
| 3192 | gps_old = rfp->rcu_launder_gp_seq_start; |
| 3193 | for (j = 0; j <= i; j++) { |
| 3194 | gps = rfp->n_launders_hist[j].launder_gp_seq; |
| 3195 | pr_cont(" %ds/%d: %ld:%ld" , |
| 3196 | j + 1, FWD_CBS_HIST_DIV, |
| 3197 | rfp->n_launders_hist[j].n_launders, |
| 3198 | rcutorture_seq_diff(gps, gps_old)); |
| 3199 | gps_old = gps; |
| 3200 | } |
| 3201 | pr_cont("\n" ); |
| 3202 | } |
| 3203 | |
| 3204 | /* Callback function for continuous-flood RCU callbacks. */ |
| 3205 | static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) |
| 3206 | { |
| 3207 | unsigned long flags; |
| 3208 | int i; |
| 3209 | struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); |
| 3210 | struct rcu_fwd_cb **rfcpp; |
| 3211 | struct rcu_fwd *rfp = rfcp->rfc_rfp; |
| 3212 | |
| 3213 | rfcp->rfc_next = NULL; |
| 3214 | rfcp->rfc_gps++; |
| 3215 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 3216 | rfcpp = rfp->rcu_fwd_cb_tail; |
| 3217 | rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; |
| 3218 | smp_store_release(rfcpp, rfcp); |
| 3219 | WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); |
| 3220 | i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); |
| 3221 | if (i >= ARRAY_SIZE(rfp->n_launders_hist)) |
| 3222 | i = ARRAY_SIZE(rfp->n_launders_hist) - 1; |
| 3223 | rfp->n_launders_hist[i].n_launders++; |
| 3224 | rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); |
| 3225 | spin_unlock_irqrestore(lock: &rfp->rcu_fwd_lock, flags); |
| 3226 | } |
| 3227 | |
| 3228 | // Give the scheduler a chance, even on nohz_full CPUs. |
| 3229 | static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) |
| 3230 | { |
| 3231 | if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { |
| 3232 | // Real call_rcu() floods hit userspace, so emulate that. |
| 3233 | if (need_resched() || (iter & 0xfff)) |
| 3234 | schedule(); |
| 3235 | return; |
| 3236 | } |
| 3237 | // No userspace emulation: CB invocation throttles call_rcu() |
| 3238 | cond_resched(); |
| 3239 | } |
| 3240 | |
| 3241 | /* |
| 3242 | * Free all callbacks on the rcu_fwd_cb_head list, either because the |
| 3243 | * test is over or because we hit an OOM event. |
| 3244 | */ |
| 3245 | static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) |
| 3246 | { |
| 3247 | unsigned long flags; |
| 3248 | unsigned long freed = 0; |
| 3249 | struct rcu_fwd_cb *rfcp; |
| 3250 | |
| 3251 | for (;;) { |
| 3252 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 3253 | rfcp = rfp->rcu_fwd_cb_head; |
| 3254 | if (!rfcp) { |
| 3255 | spin_unlock_irqrestore(lock: &rfp->rcu_fwd_lock, flags); |
| 3256 | break; |
| 3257 | } |
| 3258 | rfp->rcu_fwd_cb_head = rfcp->rfc_next; |
| 3259 | if (!rfp->rcu_fwd_cb_head) |
| 3260 | rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
| 3261 | spin_unlock_irqrestore(lock: &rfp->rcu_fwd_lock, flags); |
| 3262 | kfree(objp: rfcp); |
| 3263 | freed++; |
| 3264 | rcu_torture_fwd_prog_cond_resched(iter: freed); |
| 3265 | if (tick_nohz_full_enabled()) { |
| 3266 | local_irq_save(flags); |
| 3267 | rcu_momentary_eqs(); |
| 3268 | local_irq_restore(flags); |
| 3269 | } |
| 3270 | } |
| 3271 | return freed; |
| 3272 | } |
| 3273 | |
| 3274 | /* Carry out need_resched()/cond_resched() forward-progress testing. */ |
| 3275 | static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, |
| 3276 | int *tested, int *tested_tries) |
| 3277 | { |
| 3278 | unsigned long cver; |
| 3279 | unsigned long dur; |
| 3280 | struct fwd_cb_state fcs; |
| 3281 | unsigned long gps; |
| 3282 | int idx; |
| 3283 | int sd; |
| 3284 | int sd4; |
| 3285 | bool selfpropcb = false; |
| 3286 | unsigned long stopat; |
| 3287 | static DEFINE_TORTURE_RANDOM(trs); |
| 3288 | |
| 3289 | pr_alert("%s: Starting forward-progress test %d\n" , __func__, rfp->rcu_fwd_id); |
| 3290 | if (!cur_ops->sync) |
| 3291 | return; // Cannot do need_resched() forward progress testing without ->sync. |
| 3292 | if (cur_ops->call && cur_ops->cb_barrier) { |
| 3293 | init_rcu_head_on_stack(head: &fcs.rh); |
| 3294 | selfpropcb = true; |
| 3295 | } |
| 3296 | |
| 3297 | /* Tight loop containing cond_resched(). */ |
| 3298 | atomic_inc(v: &rcu_fwd_cb_nodelay); |
| 3299 | cur_ops->sync(); /* Later readers see above write. */ |
| 3300 | if (selfpropcb) { |
| 3301 | WRITE_ONCE(fcs.stop, 0); |
| 3302 | cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); |
| 3303 | } |
| 3304 | cver = READ_ONCE(rcu_torture_current_version); |
| 3305 | gps = cur_ops->get_gp_seq(); |
| 3306 | sd = cur_ops->stall_dur() + 1; |
| 3307 | sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; |
| 3308 | dur = sd4 + torture_random(trsp: &trs) % (sd - sd4); |
| 3309 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 3310 | stopat = rfp->rcu_fwd_startat + dur; |
| 3311 | while (time_before(jiffies, stopat) && |
| 3312 | !shutdown_time_arrived() && |
| 3313 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
| 3314 | idx = cur_ops->readlock(); |
| 3315 | udelay(usec: 10); |
| 3316 | cur_ops->readunlock(idx); |
| 3317 | if (!fwd_progress_need_resched || need_resched()) |
| 3318 | cond_resched(); |
| 3319 | } |
| 3320 | (*tested_tries)++; |
| 3321 | if (!time_before(jiffies, stopat) && |
| 3322 | !shutdown_time_arrived() && |
| 3323 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
| 3324 | (*tested)++; |
| 3325 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 3326 | gps = rcutorture_seq_diff(new: cur_ops->get_gp_seq(), old: gps); |
| 3327 | WARN_ON(!cver && gps < 2); |
| 3328 | pr_alert("%s: %d Duration %ld cver %ld gps %ld\n" , __func__, |
| 3329 | rfp->rcu_fwd_id, dur, cver, gps); |
| 3330 | } |
| 3331 | if (selfpropcb) { |
| 3332 | WRITE_ONCE(fcs.stop, 1); |
| 3333 | cur_ops->sync(); /* Wait for running CB to complete. */ |
| 3334 | pr_alert("%s: Waiting for CBs: %pS() %d\n" , __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); |
| 3335 | cur_ops->cb_barrier(); /* Wait for queued callbacks. */ |
| 3336 | } |
| 3337 | |
| 3338 | if (selfpropcb) { |
| 3339 | WARN_ON(READ_ONCE(fcs.stop) != 2); |
| 3340 | destroy_rcu_head_on_stack(head: &fcs.rh); |
| 3341 | } |
| 3342 | schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ |
| 3343 | atomic_dec(v: &rcu_fwd_cb_nodelay); |
| 3344 | } |
| 3345 | |
| 3346 | /* Carry out call_rcu() forward-progress testing. */ |
| 3347 | static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) |
| 3348 | { |
| 3349 | unsigned long cver; |
| 3350 | unsigned long flags; |
| 3351 | unsigned long gps; |
| 3352 | int i; |
| 3353 | long n_launders; |
| 3354 | long n_launders_cb_snap; |
| 3355 | long n_launders_sa; |
| 3356 | long n_max_cbs; |
| 3357 | long n_max_gps; |
| 3358 | struct rcu_fwd_cb *rfcp; |
| 3359 | struct rcu_fwd_cb *rfcpn; |
| 3360 | unsigned long stopat; |
| 3361 | unsigned long stoppedat; |
| 3362 | |
| 3363 | pr_alert("%s: Starting forward-progress test %d\n" , __func__, rfp->rcu_fwd_id); |
| 3364 | if (READ_ONCE(rcu_fwd_emergency_stop)) |
| 3365 | return; /* Get out of the way quickly, no GP wait! */ |
| 3366 | if (!cur_ops->call) |
| 3367 | return; /* Can't do call_rcu() fwd prog without ->call. */ |
| 3368 | |
| 3369 | /* Loop continuously posting RCU callbacks. */ |
| 3370 | atomic_inc(v: &rcu_fwd_cb_nodelay); |
| 3371 | cur_ops->sync(); /* Later readers see above write. */ |
| 3372 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 3373 | stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; |
| 3374 | n_launders = 0; |
| 3375 | rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread |
| 3376 | n_launders_sa = 0; |
| 3377 | n_max_cbs = 0; |
| 3378 | n_max_gps = 0; |
| 3379 | for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) |
| 3380 | rfp->n_launders_hist[i].n_launders = 0; |
| 3381 | cver = READ_ONCE(rcu_torture_current_version); |
| 3382 | gps = cur_ops->get_gp_seq(); |
| 3383 | rfp->rcu_launder_gp_seq_start = gps; |
| 3384 | tick_dep_set_task(current, bit: TICK_DEP_BIT_RCU); // CPU bound, so need tick. |
| 3385 | while (time_before(jiffies, stopat) && |
| 3386 | !shutdown_time_arrived() && |
| 3387 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
| 3388 | rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); |
| 3389 | rfcpn = NULL; |
| 3390 | if (rfcp) |
| 3391 | rfcpn = READ_ONCE(rfcp->rfc_next); |
| 3392 | if (rfcpn) { |
| 3393 | if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && |
| 3394 | ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) |
| 3395 | break; |
| 3396 | rfp->rcu_fwd_cb_head = rfcpn; |
| 3397 | n_launders++; |
| 3398 | n_launders_sa++; |
| 3399 | } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { |
| 3400 | rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); |
| 3401 | if (WARN_ON_ONCE(!rfcp)) { |
| 3402 | schedule_timeout_interruptible(timeout: 1); |
| 3403 | continue; |
| 3404 | } |
| 3405 | n_max_cbs++; |
| 3406 | n_launders_sa = 0; |
| 3407 | rfcp->rfc_gps = 0; |
| 3408 | rfcp->rfc_rfp = rfp; |
| 3409 | } else { |
| 3410 | rfcp = NULL; |
| 3411 | } |
| 3412 | if (rfcp) |
| 3413 | cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); |
| 3414 | rcu_torture_fwd_prog_cond_resched(iter: n_launders + n_max_cbs); |
| 3415 | if (tick_nohz_full_enabled()) { |
| 3416 | local_irq_save(flags); |
| 3417 | rcu_momentary_eqs(); |
| 3418 | local_irq_restore(flags); |
| 3419 | } |
| 3420 | } |
| 3421 | stoppedat = jiffies; |
| 3422 | n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); |
| 3423 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 3424 | gps = rcutorture_seq_diff(new: cur_ops->get_gp_seq(), old: gps); |
| 3425 | pr_alert("%s: Waiting for CBs: %pS() %d\n" , __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); |
| 3426 | cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ |
| 3427 | (void)rcu_torture_fwd_prog_cbfree(rfp); |
| 3428 | |
| 3429 | if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && |
| 3430 | !shutdown_time_arrived()) { |
| 3431 | if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) |
| 3432 | cur_ops->gp_kthread_dbg(); |
| 3433 | pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n" , |
| 3434 | __func__, |
| 3435 | stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, |
| 3436 | n_launders + n_max_cbs - n_launders_cb_snap, |
| 3437 | n_launders, n_launders_sa, |
| 3438 | n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); |
| 3439 | atomic_long_add(i: n_max_cbs, v: &rcu_fwd_max_cbs); |
| 3440 | mutex_lock(&rcu_fwd_mutex); // Serialize histograms. |
| 3441 | rcu_torture_fwd_cb_hist(rfp); |
| 3442 | mutex_unlock(lock: &rcu_fwd_mutex); |
| 3443 | } |
| 3444 | schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ |
| 3445 | tick_dep_clear_task(current, bit: TICK_DEP_BIT_RCU); |
| 3446 | atomic_dec(v: &rcu_fwd_cb_nodelay); |
| 3447 | } |
| 3448 | |
| 3449 | |
| 3450 | /* |
| 3451 | * OOM notifier, but this only prints diagnostic information for the |
| 3452 | * current forward-progress test. |
| 3453 | */ |
| 3454 | static int rcutorture_oom_notify(struct notifier_block *self, |
| 3455 | unsigned long notused, void *nfreed) |
| 3456 | { |
| 3457 | int i; |
| 3458 | long ncbs; |
| 3459 | struct rcu_fwd *rfp; |
| 3460 | |
| 3461 | mutex_lock(&rcu_fwd_mutex); |
| 3462 | rfp = rcu_fwds; |
| 3463 | if (!rfp) { |
| 3464 | mutex_unlock(lock: &rcu_fwd_mutex); |
| 3465 | return NOTIFY_OK; |
| 3466 | } |
| 3467 | WARN(1, "%s invoked upon OOM during forward-progress testing.\n" , |
| 3468 | __func__); |
| 3469 | for (i = 0; i < fwd_progress; i++) { |
| 3470 | rcu_torture_fwd_cb_hist(rfp: &rfp[i]); |
| 3471 | rcu_fwd_progress_check(j: 1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); |
| 3472 | } |
| 3473 | WRITE_ONCE(rcu_fwd_emergency_stop, true); |
| 3474 | smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ |
| 3475 | ncbs = 0; |
| 3476 | for (i = 0; i < fwd_progress; i++) |
| 3477 | ncbs += rcu_torture_fwd_prog_cbfree(rfp: &rfp[i]); |
| 3478 | pr_info("%s: Freed %lu RCU callbacks.\n" , __func__, ncbs); |
| 3479 | cur_ops->cb_barrier(); |
| 3480 | ncbs = 0; |
| 3481 | for (i = 0; i < fwd_progress; i++) |
| 3482 | ncbs += rcu_torture_fwd_prog_cbfree(rfp: &rfp[i]); |
| 3483 | pr_info("%s: Freed %lu RCU callbacks.\n" , __func__, ncbs); |
| 3484 | cur_ops->cb_barrier(); |
| 3485 | ncbs = 0; |
| 3486 | for (i = 0; i < fwd_progress; i++) |
| 3487 | ncbs += rcu_torture_fwd_prog_cbfree(rfp: &rfp[i]); |
| 3488 | pr_info("%s: Freed %lu RCU callbacks.\n" , __func__, ncbs); |
| 3489 | smp_mb(); /* Frees before return to avoid redoing OOM. */ |
| 3490 | (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ |
| 3491 | pr_info("%s returning after OOM processing.\n" , __func__); |
| 3492 | mutex_unlock(lock: &rcu_fwd_mutex); |
| 3493 | return NOTIFY_OK; |
| 3494 | } |
| 3495 | |
| 3496 | static struct notifier_block rcutorture_oom_nb = { |
| 3497 | .notifier_call = rcutorture_oom_notify |
| 3498 | }; |
| 3499 | |
| 3500 | /* Carry out grace-period forward-progress testing. */ |
| 3501 | static int rcu_torture_fwd_prog(void *args) |
| 3502 | { |
| 3503 | bool firsttime = true; |
| 3504 | long max_cbs; |
| 3505 | int oldnice = task_nice(current); |
| 3506 | unsigned long oldseq = READ_ONCE(rcu_fwd_seq); |
| 3507 | struct rcu_fwd *rfp = args; |
| 3508 | int tested = 0; |
| 3509 | int tested_tries = 0; |
| 3510 | |
| 3511 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started" ); |
| 3512 | while (!rcu_inkernel_boot_has_ended()) |
| 3513 | schedule_timeout_interruptible(HZ / 10); |
| 3514 | rcu_bind_current_to_nocb(); |
| 3515 | if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) |
| 3516 | set_user_nice(current, MAX_NICE); |
| 3517 | do { |
| 3518 | if (!rfp->rcu_fwd_id) { |
| 3519 | schedule_timeout_interruptible(timeout: fwd_progress_holdoff * HZ); |
| 3520 | WRITE_ONCE(rcu_fwd_emergency_stop, false); |
| 3521 | if (!firsttime) { |
| 3522 | max_cbs = atomic_long_xchg(v: &rcu_fwd_max_cbs, new: 0); |
| 3523 | pr_alert("%s n_max_cbs: %ld\n" , __func__, max_cbs); |
| 3524 | } |
| 3525 | firsttime = false; |
| 3526 | WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); |
| 3527 | } else { |
| 3528 | while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) |
| 3529 | schedule_timeout_interruptible(HZ / 20); |
| 3530 | oldseq = READ_ONCE(rcu_fwd_seq); |
| 3531 | } |
| 3532 | pr_alert("%s: Starting forward-progress test %d\n" , __func__, rfp->rcu_fwd_id); |
| 3533 | if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) |
| 3534 | rcu_torture_fwd_prog_cr(rfp); |
| 3535 | if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && |
| 3536 | (!IS_ENABLED(CONFIG_TINY_RCU) || |
| 3537 | (rcu_inkernel_boot_has_ended() && |
| 3538 | torture_num_online_cpus() > rfp->rcu_fwd_id))) |
| 3539 | rcu_torture_fwd_prog_nr(rfp, tested: &tested, tested_tries: &tested_tries); |
| 3540 | |
| 3541 | /* Avoid slow periods, better to test when busy. */ |
| 3542 | if (stutter_wait(title: "rcu_torture_fwd_prog" )) |
| 3543 | sched_set_normal(current, nice: oldnice); |
| 3544 | } while (!torture_must_stop()); |
| 3545 | /* Short runs might not contain a valid forward-progress attempt. */ |
| 3546 | if (!rfp->rcu_fwd_id) { |
| 3547 | WARN_ON(!tested && tested_tries >= 5); |
| 3548 | pr_alert("%s: tested %d tested_tries %d\n" , __func__, tested, tested_tries); |
| 3549 | } |
| 3550 | torture_kthread_stopping(title: "rcu_torture_fwd_prog" ); |
| 3551 | return 0; |
| 3552 | } |
| 3553 | |
| 3554 | /* If forward-progress checking is requested and feasible, spawn the thread. */ |
| 3555 | static int __init rcu_torture_fwd_prog_init(void) |
| 3556 | { |
| 3557 | int i; |
| 3558 | int ret = 0; |
| 3559 | struct rcu_fwd *rfp; |
| 3560 | |
| 3561 | if (!fwd_progress) |
| 3562 | return 0; /* Not requested, so don't do it. */ |
| 3563 | if (fwd_progress >= nr_cpu_ids) { |
| 3564 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n" ); |
| 3565 | fwd_progress = nr_cpu_ids; |
| 3566 | } else if (fwd_progress < 0) { |
| 3567 | fwd_progress = nr_cpu_ids; |
| 3568 | } |
| 3569 | if ((!cur_ops->sync && !cur_ops->call) || |
| 3570 | (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || |
| 3571 | cur_ops == &rcu_busted_ops) { |
| 3572 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test" ); |
| 3573 | fwd_progress = 0; |
| 3574 | return 0; |
| 3575 | } |
| 3576 | if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { |
| 3577 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing" ); |
| 3578 | fwd_progress = 0; |
| 3579 | if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) |
| 3580 | return -EINVAL; /* In module, can fail back to user. */ |
| 3581 | WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */ |
| 3582 | return 0; |
| 3583 | } |
| 3584 | if (fwd_progress_holdoff <= 0) |
| 3585 | fwd_progress_holdoff = 1; |
| 3586 | if (fwd_progress_div <= 0) |
| 3587 | fwd_progress_div = 4; |
| 3588 | rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); |
| 3589 | fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); |
| 3590 | if (!rfp || !fwd_prog_tasks) { |
| 3591 | kfree(objp: rfp); |
| 3592 | kfree(objp: fwd_prog_tasks); |
| 3593 | fwd_prog_tasks = NULL; |
| 3594 | fwd_progress = 0; |
| 3595 | return -ENOMEM; |
| 3596 | } |
| 3597 | for (i = 0; i < fwd_progress; i++) { |
| 3598 | spin_lock_init(&rfp[i].rcu_fwd_lock); |
| 3599 | rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; |
| 3600 | rfp[i].rcu_fwd_id = i; |
| 3601 | } |
| 3602 | mutex_lock(&rcu_fwd_mutex); |
| 3603 | rcu_fwds = rfp; |
| 3604 | mutex_unlock(lock: &rcu_fwd_mutex); |
| 3605 | register_oom_notifier(nb: &rcutorture_oom_nb); |
| 3606 | for (i = 0; i < fwd_progress; i++) { |
| 3607 | ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); |
| 3608 | if (ret) { |
| 3609 | fwd_progress = i; |
| 3610 | return ret; |
| 3611 | } |
| 3612 | } |
| 3613 | return 0; |
| 3614 | } |
| 3615 | |
| 3616 | static void rcu_torture_fwd_prog_cleanup(void) |
| 3617 | { |
| 3618 | int i; |
| 3619 | struct rcu_fwd *rfp; |
| 3620 | |
| 3621 | if (!rcu_fwds || !fwd_prog_tasks) |
| 3622 | return; |
| 3623 | for (i = 0; i < fwd_progress; i++) |
| 3624 | torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); |
| 3625 | unregister_oom_notifier(nb: &rcutorture_oom_nb); |
| 3626 | mutex_lock(&rcu_fwd_mutex); |
| 3627 | rfp = rcu_fwds; |
| 3628 | rcu_fwds = NULL; |
| 3629 | mutex_unlock(lock: &rcu_fwd_mutex); |
| 3630 | kfree(objp: rfp); |
| 3631 | kfree(objp: fwd_prog_tasks); |
| 3632 | fwd_prog_tasks = NULL; |
| 3633 | } |
| 3634 | |
| 3635 | /* Callback function for RCU barrier testing. */ |
| 3636 | static void rcu_torture_barrier_cbf(struct rcu_head *rcu) |
| 3637 | { |
| 3638 | atomic_inc(v: &barrier_cbs_invoked); |
| 3639 | } |
| 3640 | |
| 3641 | /* IPI handler to get callback posted on desired CPU, if online. */ |
| 3642 | static int rcu_torture_barrier1cb(void *rcu_void) |
| 3643 | { |
| 3644 | struct rcu_head *rhp = rcu_void; |
| 3645 | |
| 3646 | cur_ops->call(rhp, rcu_torture_barrier_cbf); |
| 3647 | return 0; |
| 3648 | } |
| 3649 | |
| 3650 | /* kthread function to register callbacks used to test RCU barriers. */ |
| 3651 | static int rcu_torture_barrier_cbs(void *arg) |
| 3652 | { |
| 3653 | long myid = (long)arg; |
| 3654 | bool lastphase = false; |
| 3655 | bool newphase; |
| 3656 | struct rcu_head rcu; |
| 3657 | |
| 3658 | init_rcu_head_on_stack(head: &rcu); |
| 3659 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started" ); |
| 3660 | set_user_nice(current, MAX_NICE); |
| 3661 | do { |
| 3662 | wait_event(barrier_cbs_wq[myid], |
| 3663 | (newphase = |
| 3664 | smp_load_acquire(&barrier_phase)) != lastphase || |
| 3665 | torture_must_stop()); |
| 3666 | lastphase = newphase; |
| 3667 | if (torture_must_stop()) |
| 3668 | break; |
| 3669 | /* |
| 3670 | * The above smp_load_acquire() ensures barrier_phase load |
| 3671 | * is ordered before the following ->call(). |
| 3672 | */ |
| 3673 | if (smp_call_on_cpu(cpu: myid, func: rcu_torture_barrier1cb, par: &rcu, phys: 1)) |
| 3674 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
| 3675 | |
| 3676 | if (atomic_dec_and_test(v: &barrier_cbs_count)) |
| 3677 | wake_up(&barrier_wq); |
| 3678 | } while (!torture_must_stop()); |
| 3679 | if (cur_ops->cb_barrier != NULL) |
| 3680 | cur_ops->cb_barrier(); |
| 3681 | destroy_rcu_head_on_stack(head: &rcu); |
| 3682 | torture_kthread_stopping(title: "rcu_torture_barrier_cbs" ); |
| 3683 | return 0; |
| 3684 | } |
| 3685 | |
| 3686 | /* kthread function to drive and coordinate RCU barrier testing. */ |
| 3687 | static int rcu_torture_barrier(void *arg) |
| 3688 | { |
| 3689 | int i; |
| 3690 | |
| 3691 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting" ); |
| 3692 | do { |
| 3693 | atomic_set(v: &barrier_cbs_invoked, i: 0); |
| 3694 | atomic_set(v: &barrier_cbs_count, i: n_barrier_cbs); |
| 3695 | /* Ensure barrier_phase ordered after prior assignments. */ |
| 3696 | smp_store_release(&barrier_phase, !barrier_phase); |
| 3697 | for (i = 0; i < n_barrier_cbs; i++) |
| 3698 | wake_up(&barrier_cbs_wq[i]); |
| 3699 | wait_event(barrier_wq, |
| 3700 | atomic_read(&barrier_cbs_count) == 0 || |
| 3701 | torture_must_stop()); |
| 3702 | if (torture_must_stop()) |
| 3703 | break; |
| 3704 | n_barrier_attempts++; |
| 3705 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
| 3706 | if (atomic_read(v: &barrier_cbs_invoked) != n_barrier_cbs) { |
| 3707 | n_rcu_torture_barrier_error++; |
| 3708 | pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n" , |
| 3709 | atomic_read(&barrier_cbs_invoked), |
| 3710 | n_barrier_cbs); |
| 3711 | WARN_ON(1); |
| 3712 | // Wait manually for the remaining callbacks |
| 3713 | i = 0; |
| 3714 | do { |
| 3715 | if (WARN_ON(i++ > HZ)) |
| 3716 | i = INT_MIN; |
| 3717 | schedule_timeout_interruptible(timeout: 1); |
| 3718 | cur_ops->cb_barrier(); |
| 3719 | } while (atomic_read(v: &barrier_cbs_invoked) != |
| 3720 | n_barrier_cbs && |
| 3721 | !torture_must_stop()); |
| 3722 | smp_mb(); // Can't trust ordering if broken. |
| 3723 | if (!torture_must_stop()) |
| 3724 | pr_err("Recovered: barrier_cbs_invoked = %d\n" , |
| 3725 | atomic_read(&barrier_cbs_invoked)); |
| 3726 | } else { |
| 3727 | n_barrier_successes++; |
| 3728 | } |
| 3729 | schedule_timeout_interruptible(HZ / 10); |
| 3730 | } while (!torture_must_stop()); |
| 3731 | torture_kthread_stopping(title: "rcu_torture_barrier" ); |
| 3732 | return 0; |
| 3733 | } |
| 3734 | |
| 3735 | /* Initialize RCU barrier testing. */ |
| 3736 | static int rcu_torture_barrier_init(void) |
| 3737 | { |
| 3738 | int i; |
| 3739 | int ret; |
| 3740 | |
| 3741 | if (n_barrier_cbs <= 0) |
| 3742 | return 0; |
| 3743 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { |
| 3744 | pr_alert("%s" TORTURE_FLAG |
| 3745 | " Call or barrier ops missing for %s,\n" , |
| 3746 | torture_type, cur_ops->name); |
| 3747 | pr_alert("%s" TORTURE_FLAG |
| 3748 | " RCU barrier testing omitted from run.\n" , |
| 3749 | torture_type); |
| 3750 | return 0; |
| 3751 | } |
| 3752 | atomic_set(v: &barrier_cbs_count, i: 0); |
| 3753 | atomic_set(v: &barrier_cbs_invoked, i: 0); |
| 3754 | barrier_cbs_tasks = |
| 3755 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), |
| 3756 | GFP_KERNEL); |
| 3757 | barrier_cbs_wq = |
| 3758 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); |
| 3759 | if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) |
| 3760 | return -ENOMEM; |
| 3761 | for (i = 0; i < n_barrier_cbs; i++) { |
| 3762 | init_waitqueue_head(&barrier_cbs_wq[i]); |
| 3763 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
| 3764 | (void *)(long)i, |
| 3765 | barrier_cbs_tasks[i]); |
| 3766 | if (ret) |
| 3767 | return ret; |
| 3768 | } |
| 3769 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
| 3770 | } |
| 3771 | |
| 3772 | /* Clean up after RCU barrier testing. */ |
| 3773 | static void rcu_torture_barrier_cleanup(void) |
| 3774 | { |
| 3775 | int i; |
| 3776 | |
| 3777 | torture_stop_kthread(rcu_torture_barrier, barrier_task); |
| 3778 | if (barrier_cbs_tasks != NULL) { |
| 3779 | for (i = 0; i < n_barrier_cbs; i++) |
| 3780 | torture_stop_kthread(rcu_torture_barrier_cbs, |
| 3781 | barrier_cbs_tasks[i]); |
| 3782 | kfree(objp: barrier_cbs_tasks); |
| 3783 | barrier_cbs_tasks = NULL; |
| 3784 | } |
| 3785 | if (barrier_cbs_wq != NULL) { |
| 3786 | kfree(objp: barrier_cbs_wq); |
| 3787 | barrier_cbs_wq = NULL; |
| 3788 | } |
| 3789 | } |
| 3790 | |
| 3791 | static bool rcu_torture_can_boost(void) |
| 3792 | { |
| 3793 | static int boost_warn_once; |
| 3794 | int prio; |
| 3795 | |
| 3796 | if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) |
| 3797 | return false; |
| 3798 | if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) |
| 3799 | return false; |
| 3800 | |
| 3801 | prio = rcu_get_gp_kthreads_prio(); |
| 3802 | if (!prio) |
| 3803 | return false; |
| 3804 | |
| 3805 | if (prio < 2) { |
| 3806 | if (boost_warn_once == 1) |
| 3807 | return false; |
| 3808 | |
| 3809 | pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n" , KBUILD_MODNAME); |
| 3810 | boost_warn_once = 1; |
| 3811 | return false; |
| 3812 | } |
| 3813 | |
| 3814 | return true; |
| 3815 | } |
| 3816 | |
| 3817 | static bool read_exit_child_stop; |
| 3818 | static bool read_exit_child_stopped; |
| 3819 | static wait_queue_head_t read_exit_wq; |
| 3820 | |
| 3821 | // Child kthread which just does an rcutorture reader and exits. |
| 3822 | static int rcu_torture_read_exit_child(void *trsp_in) |
| 3823 | { |
| 3824 | struct torture_random_state *trsp = trsp_in; |
| 3825 | |
| 3826 | set_user_nice(current, MAX_NICE); |
| 3827 | // Minimize time between reading and exiting. |
| 3828 | while (!kthread_should_stop()) |
| 3829 | schedule_timeout_uninterruptible(HZ / 20); |
| 3830 | (void)rcu_torture_one_read(trsp, myid: -1); |
| 3831 | return 0; |
| 3832 | } |
| 3833 | |
| 3834 | // Parent kthread which creates and destroys read-exit child kthreads. |
| 3835 | static int rcu_torture_read_exit(void *unused) |
| 3836 | { |
| 3837 | bool errexit = false; |
| 3838 | int i; |
| 3839 | struct task_struct *tsp; |
| 3840 | DEFINE_TORTURE_RANDOM(trs); |
| 3841 | |
| 3842 | // Allocate and initialize. |
| 3843 | set_user_nice(current, MAX_NICE); |
| 3844 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test" ); |
| 3845 | |
| 3846 | // Each pass through this loop does one read-exit episode. |
| 3847 | do { |
| 3848 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode" ); |
| 3849 | for (i = 0; i < read_exit_burst; i++) { |
| 3850 | if (READ_ONCE(read_exit_child_stop)) |
| 3851 | break; |
| 3852 | stutter_wait(title: "rcu_torture_read_exit" ); |
| 3853 | // Spawn child. |
| 3854 | tsp = kthread_run(rcu_torture_read_exit_child, |
| 3855 | &trs, "%s" , "rcu_torture_read_exit_child" ); |
| 3856 | if (IS_ERR(ptr: tsp)) { |
| 3857 | TOROUT_ERRSTRING("out of memory" ); |
| 3858 | errexit = true; |
| 3859 | break; |
| 3860 | } |
| 3861 | cond_resched(); |
| 3862 | kthread_stop(k: tsp); |
| 3863 | n_read_exits++; |
| 3864 | } |
| 3865 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode" ); |
| 3866 | rcu_barrier(); // Wait for task_struct free, avoid OOM. |
| 3867 | i = 0; |
| 3868 | for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) |
| 3869 | schedule_timeout_uninterruptible(HZ); |
| 3870 | } while (!errexit && !READ_ONCE(read_exit_child_stop)); |
| 3871 | |
| 3872 | // Clean up and exit. |
| 3873 | smp_store_release(&read_exit_child_stopped, true); // After reaping. |
| 3874 | smp_mb(); // Store before wakeup. |
| 3875 | wake_up(&read_exit_wq); |
| 3876 | while (!torture_must_stop()) |
| 3877 | schedule_timeout_uninterruptible(HZ / 20); |
| 3878 | torture_kthread_stopping(title: "rcu_torture_read_exit" ); |
| 3879 | return 0; |
| 3880 | } |
| 3881 | |
| 3882 | static int rcu_torture_read_exit_init(void) |
| 3883 | { |
| 3884 | if (read_exit_burst <= 0) |
| 3885 | return 0; |
| 3886 | init_waitqueue_head(&read_exit_wq); |
| 3887 | read_exit_child_stop = false; |
| 3888 | read_exit_child_stopped = false; |
| 3889 | return torture_create_kthread(rcu_torture_read_exit, NULL, |
| 3890 | read_exit_task); |
| 3891 | } |
| 3892 | |
| 3893 | static void rcu_torture_read_exit_cleanup(void) |
| 3894 | { |
| 3895 | if (!read_exit_task) |
| 3896 | return; |
| 3897 | WRITE_ONCE(read_exit_child_stop, true); |
| 3898 | smp_mb(); // Above write before wait. |
| 3899 | wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); |
| 3900 | torture_stop_kthread(rcutorture_read_exit, read_exit_task); |
| 3901 | } |
| 3902 | |
| 3903 | static void rcutorture_test_nmis(int n) |
| 3904 | { |
| 3905 | #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) |
| 3906 | int cpu; |
| 3907 | int dumpcpu; |
| 3908 | int i; |
| 3909 | |
| 3910 | for (i = 0; i < n; i++) { |
| 3911 | preempt_disable(); |
| 3912 | cpu = smp_processor_id(); |
| 3913 | dumpcpu = cpu + 1; |
| 3914 | if (dumpcpu >= nr_cpu_ids) |
| 3915 | dumpcpu = 0; |
| 3916 | pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n" , __func__, cpu, dumpcpu); |
| 3917 | dump_cpu_task(cpu: dumpcpu); |
| 3918 | preempt_enable(); |
| 3919 | schedule_timeout_uninterruptible(timeout: 15 * HZ); |
| 3920 | } |
| 3921 | #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) |
| 3922 | WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n" , test_nmis); |
| 3923 | #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) |
| 3924 | } |
| 3925 | |
| 3926 | // Randomly preempt online CPUs. |
| 3927 | static int rcu_torture_preempt(void *unused) |
| 3928 | { |
| 3929 | int cpu = -1; |
| 3930 | DEFINE_TORTURE_RANDOM(rand); |
| 3931 | |
| 3932 | schedule_timeout_idle(timeout: stall_cpu_holdoff); |
| 3933 | do { |
| 3934 | // Wait for preempt_interval ms with up to 100us fuzz. |
| 3935 | torture_hrtimeout_ms(baset_ms: preempt_interval, fuzzt_us: 100, trsp: &rand); |
| 3936 | // Select online CPU. |
| 3937 | cpu = cpumask_next(n: cpu, cpu_online_mask); |
| 3938 | if (cpu >= nr_cpu_ids) |
| 3939 | cpu = cpumask_next(n: -1, cpu_online_mask); |
| 3940 | WARN_ON_ONCE(cpu >= nr_cpu_ids); |
| 3941 | // Move to that CPU, if can't do so, retry later. |
| 3942 | if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), dowarn: false)) |
| 3943 | continue; |
| 3944 | // Preempt at high-ish priority, then reset to normal. |
| 3945 | sched_set_fifo(current); |
| 3946 | torture_sched_setaffinity(current->pid, cpu_present_mask, dowarn: true); |
| 3947 | mdelay(preempt_duration); |
| 3948 | sched_set_normal(current, nice: 0); |
| 3949 | stutter_wait(title: "rcu_torture_preempt" ); |
| 3950 | } while (!torture_must_stop()); |
| 3951 | torture_kthread_stopping(title: "rcu_torture_preempt" ); |
| 3952 | return 0; |
| 3953 | } |
| 3954 | |
| 3955 | static enum cpuhp_state rcutor_hp; |
| 3956 | |
| 3957 | static struct hrtimer gpwrap_lag_timer; |
| 3958 | static bool gpwrap_lag_active; |
| 3959 | |
| 3960 | /* Timer handler for toggling RCU grace-period sequence overflow test lag value */ |
| 3961 | static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer) |
| 3962 | { |
| 3963 | ktime_t next_delay; |
| 3964 | |
| 3965 | if (gpwrap_lag_active) { |
| 3966 | pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n" ); |
| 3967 | cur_ops->set_gpwrap_lag(0); |
| 3968 | gpwrap_lag_active = false; |
| 3969 | next_delay = ktime_set(secs: (gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, nsecs: 0); |
| 3970 | } else { |
| 3971 | pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n" , gpwrap_lag_gps); |
| 3972 | cur_ops->set_gpwrap_lag(gpwrap_lag_gps); |
| 3973 | gpwrap_lag_active = true; |
| 3974 | next_delay = ktime_set(secs: gpwrap_lag_active_mins * 60, nsecs: 0); |
| 3975 | } |
| 3976 | |
| 3977 | if (torture_must_stop_irq()) |
| 3978 | return HRTIMER_NORESTART; |
| 3979 | |
| 3980 | hrtimer_forward_now(timer, interval: next_delay); |
| 3981 | return HRTIMER_RESTART; |
| 3982 | } |
| 3983 | |
| 3984 | static int rcu_gpwrap_lag_init(void) |
| 3985 | { |
| 3986 | if (!gpwrap_lag) |
| 3987 | return 0; |
| 3988 | |
| 3989 | if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) { |
| 3990 | pr_alert("rcu-torture: lag timing parameters must be positive\n" ); |
| 3991 | return -EINVAL; |
| 3992 | } |
| 3993 | |
| 3994 | hrtimer_setup(timer: &gpwrap_lag_timer, function: rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
| 3995 | gpwrap_lag_active = false; |
| 3996 | hrtimer_start(timer: &gpwrap_lag_timer, |
| 3997 | tim: ktime_set(secs: (gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, nsecs: 0), mode: HRTIMER_MODE_REL); |
| 3998 | |
| 3999 | return 0; |
| 4000 | } |
| 4001 | |
| 4002 | static void rcu_gpwrap_lag_cleanup(void) |
| 4003 | { |
| 4004 | hrtimer_cancel(timer: &gpwrap_lag_timer); |
| 4005 | cur_ops->set_gpwrap_lag(0); |
| 4006 | gpwrap_lag_active = false; |
| 4007 | } |
| 4008 | static void |
| 4009 | rcu_torture_cleanup(void) |
| 4010 | { |
| 4011 | int firsttime; |
| 4012 | int flags = 0; |
| 4013 | unsigned long gp_seq = 0; |
| 4014 | int i; |
| 4015 | int j; |
| 4016 | |
| 4017 | if (torture_cleanup_begin()) { |
| 4018 | if (cur_ops->cb_barrier != NULL) { |
| 4019 | pr_info("%s: Invoking %pS().\n" , __func__, cur_ops->cb_barrier); |
| 4020 | cur_ops->cb_barrier(); |
| 4021 | } |
| 4022 | if (cur_ops->gp_slow_unregister) |
| 4023 | cur_ops->gp_slow_unregister(NULL); |
| 4024 | return; |
| 4025 | } |
| 4026 | if (!cur_ops) { |
| 4027 | torture_cleanup_end(); |
| 4028 | return; |
| 4029 | } |
| 4030 | |
| 4031 | rcutorture_test_nmis(n: test_nmis); |
| 4032 | |
| 4033 | if (cur_ops->gp_kthread_dbg) |
| 4034 | cur_ops->gp_kthread_dbg(); |
| 4035 | torture_stop_kthread(rcu_torture_preempt, preempt_task); |
| 4036 | rcu_torture_read_exit_cleanup(); |
| 4037 | rcu_torture_barrier_cleanup(); |
| 4038 | rcu_torture_fwd_prog_cleanup(); |
| 4039 | torture_stop_kthread(rcu_torture_stall, stall_task); |
| 4040 | torture_stop_kthread(rcu_torture_writer, writer_task); |
| 4041 | |
| 4042 | if (nocb_tasks) { |
| 4043 | for (i = 0; i < nrealnocbers; i++) |
| 4044 | torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); |
| 4045 | kfree(objp: nocb_tasks); |
| 4046 | nocb_tasks = NULL; |
| 4047 | } |
| 4048 | |
| 4049 | if (updown_task) { |
| 4050 | torture_stop_kthread(rcu_torture_updown, updown_task); |
| 4051 | updown_task = NULL; |
| 4052 | } |
| 4053 | if (reader_tasks) { |
| 4054 | for (i = 0; i < nrealreaders; i++) |
| 4055 | torture_stop_kthread(rcu_torture_reader, |
| 4056 | reader_tasks[i]); |
| 4057 | kfree(objp: reader_tasks); |
| 4058 | reader_tasks = NULL; |
| 4059 | } |
| 4060 | kfree(objp: rcu_torture_reader_mbchk); |
| 4061 | rcu_torture_reader_mbchk = NULL; |
| 4062 | |
| 4063 | if (fakewriter_tasks) { |
| 4064 | for (i = 0; i < nrealfakewriters; i++) |
| 4065 | torture_stop_kthread(rcu_torture_fakewriter, |
| 4066 | fakewriter_tasks[i]); |
| 4067 | kfree(objp: fakewriter_tasks); |
| 4068 | fakewriter_tasks = NULL; |
| 4069 | } |
| 4070 | |
| 4071 | if (cur_ops->get_gp_data) |
| 4072 | cur_ops->get_gp_data(&flags, &gp_seq); |
| 4073 | pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n" , |
| 4074 | cur_ops->name, (long)gp_seq, flags, |
| 4075 | rcutorture_seq_diff(gp_seq, start_gp_seq)); |
| 4076 | torture_stop_kthread(rcu_torture_stats, stats_task); |
| 4077 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
| 4078 | if (rcu_torture_can_boost() && rcutor_hp >= 0) |
| 4079 | cpuhp_remove_state(state: rcutor_hp); |
| 4080 | |
| 4081 | /* |
| 4082 | * Wait for all RCU callbacks to fire, then do torture-type-specific |
| 4083 | * cleanup operations. |
| 4084 | */ |
| 4085 | if (cur_ops->cb_barrier != NULL) { |
| 4086 | pr_info("%s: Invoking %pS().\n" , __func__, cur_ops->cb_barrier); |
| 4087 | cur_ops->cb_barrier(); |
| 4088 | } |
| 4089 | if (cur_ops->cleanup != NULL) |
| 4090 | cur_ops->cleanup(); |
| 4091 | |
| 4092 | rcu_torture_mem_dump_obj(); |
| 4093 | |
| 4094 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
| 4095 | |
| 4096 | if (err_segs_recorded) { |
| 4097 | pr_alert("Failure/close-call rcutorture reader segments:\n" ); |
| 4098 | if (rt_read_nsegs == 0) |
| 4099 | pr_alert("\t: No segments recorded!!!\n" ); |
| 4100 | firsttime = 1; |
| 4101 | for (i = 0; i < rt_read_nsegs; i++) { |
| 4102 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP)) |
| 4103 | pr_alert("\t%lluus " , div64_u64(err_segs[i].rt_ts, 1000ULL)); |
| 4104 | else |
| 4105 | pr_alert("\t" ); |
| 4106 | pr_cont("%d: %#4x" , i, err_segs[i].rt_readstate); |
| 4107 | if (err_segs[i].rt_delay_jiffies != 0) { |
| 4108 | pr_cont("%s%ldjiffies" , firsttime ? "" : "+" , |
| 4109 | err_segs[i].rt_delay_jiffies); |
| 4110 | firsttime = 0; |
| 4111 | } |
| 4112 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { |
| 4113 | pr_cont(" CPU %2d" , err_segs[i].rt_cpu); |
| 4114 | if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu) |
| 4115 | pr_cont("->%-2d" , err_segs[i].rt_end_cpu); |
| 4116 | else |
| 4117 | pr_cont(" ..." ); |
| 4118 | } |
| 4119 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && |
| 4120 | cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) { |
| 4121 | char buf1[20+1]; |
| 4122 | char buf2[20+1]; |
| 4123 | char sepchar = '-'; |
| 4124 | |
| 4125 | cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq, |
| 4126 | buf1, ARRAY_SIZE(buf1)); |
| 4127 | cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end, |
| 4128 | buf2, ARRAY_SIZE(buf2)); |
| 4129 | if (err_segs[i].rt_gp_seq == err_segs[i].rt_gp_seq_end) { |
| 4130 | if (buf2[0]) { |
| 4131 | for (j = 0; buf2[j]; j++) |
| 4132 | buf2[j] = '.'; |
| 4133 | if (j) |
| 4134 | buf2[j - 1] = ' '; |
| 4135 | } |
| 4136 | sepchar = ' '; |
| 4137 | } |
| 4138 | pr_cont(" %s%c%s" , buf1, sepchar, buf2); |
| 4139 | } |
| 4140 | if (err_segs[i].rt_delay_ms != 0) { |
| 4141 | pr_cont(" %s%ldms" , firsttime ? "" : "+" , |
| 4142 | err_segs[i].rt_delay_ms); |
| 4143 | firsttime = 0; |
| 4144 | } |
| 4145 | if (err_segs[i].rt_delay_us != 0) { |
| 4146 | pr_cont(" %s%ldus" , firsttime ? "" : "+" , |
| 4147 | err_segs[i].rt_delay_us); |
| 4148 | firsttime = 0; |
| 4149 | } |
| 4150 | pr_cont("%s" , err_segs[i].rt_preempted ? " preempted" : "" ); |
| 4151 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH) |
| 4152 | pr_cont(" BH" ); |
| 4153 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ) |
| 4154 | pr_cont(" IRQ" ); |
| 4155 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT) |
| 4156 | pr_cont(" PREEMPT" ); |
| 4157 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH) |
| 4158 | pr_cont(" RBH" ); |
| 4159 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED) |
| 4160 | pr_cont(" SCHED" ); |
| 4161 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1) |
| 4162 | pr_cont(" RCU_1" ); |
| 4163 | if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2) |
| 4164 | pr_cont(" RCU_2" ); |
| 4165 | pr_cont("\n" ); |
| 4166 | |
| 4167 | } |
| 4168 | if (rt_read_preempted) |
| 4169 | pr_alert("\tReader was preempted.\n" ); |
| 4170 | } |
| 4171 | if (atomic_read(v: &n_rcu_torture_error) || n_rcu_torture_barrier_error) |
| 4172 | rcu_torture_print_module_parms(cur_ops, tag: "End of test: FAILURE" ); |
| 4173 | else if (torture_onoff_failures()) |
| 4174 | rcu_torture_print_module_parms(cur_ops, |
| 4175 | tag: "End of test: RCU_HOTPLUG" ); |
| 4176 | else |
| 4177 | rcu_torture_print_module_parms(cur_ops, tag: "End of test: SUCCESS" ); |
| 4178 | torture_cleanup_end(); |
| 4179 | if (cur_ops->gp_slow_unregister) |
| 4180 | cur_ops->gp_slow_unregister(NULL); |
| 4181 | |
| 4182 | if (gpwrap_lag && cur_ops->set_gpwrap_lag) |
| 4183 | rcu_gpwrap_lag_cleanup(); |
| 4184 | } |
| 4185 | |
| 4186 | static void rcu_torture_leak_cb(struct rcu_head *rhp) |
| 4187 | { |
| 4188 | } |
| 4189 | |
| 4190 | static void rcu_torture_err_cb(struct rcu_head *rhp) |
| 4191 | { |
| 4192 | /* |
| 4193 | * This -might- happen due to race conditions, but is unlikely. |
| 4194 | * The scenario that leads to this happening is that the |
| 4195 | * first of the pair of duplicate callbacks is queued, |
| 4196 | * someone else starts a grace period that includes that |
| 4197 | * callback, then the second of the pair must wait for the |
| 4198 | * next grace period. Unlikely, but can happen. If it |
| 4199 | * does happen, the debug-objects subsystem won't have splatted. |
| 4200 | */ |
| 4201 | pr_alert("%s: duplicated callback was invoked.\n" , KBUILD_MODNAME); |
| 4202 | } |
| 4203 | |
| 4204 | /* |
| 4205 | * Verify that double-free causes debug-objects to complain, but only |
| 4206 | * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test |
| 4207 | * cannot be carried out. |
| 4208 | */ |
| 4209 | static void rcu_test_debug_objects(void) |
| 4210 | { |
| 4211 | struct rcu_head rh1; |
| 4212 | struct rcu_head rh2; |
| 4213 | int idx; |
| 4214 | |
| 4215 | if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { |
| 4216 | pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n" , |
| 4217 | KBUILD_MODNAME, cur_ops->name); |
| 4218 | return; |
| 4219 | } |
| 4220 | |
| 4221 | if (WARN_ON_ONCE(cur_ops->debug_objects && |
| 4222 | (!cur_ops->call || !cur_ops->cb_barrier))) |
| 4223 | return; |
| 4224 | |
| 4225 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
| 4226 | |
| 4227 | init_rcu_head_on_stack(head: &rh1); |
| 4228 | init_rcu_head_on_stack(head: &rh2); |
| 4229 | pr_alert("%s: WARN: Duplicate call_%s() test starting.\n" , KBUILD_MODNAME, cur_ops->name); |
| 4230 | |
| 4231 | /* Try to queue the rh2 pair of callbacks for the same grace period. */ |
| 4232 | idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ |
| 4233 | cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ |
| 4234 | cur_ops->call(&rh2, rcu_torture_leak_cb); |
| 4235 | cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ |
| 4236 | if (rhp) { |
| 4237 | cur_ops->call(rhp, rcu_torture_leak_cb); |
| 4238 | cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ |
| 4239 | } |
| 4240 | cur_ops->readunlock(idx); |
| 4241 | |
| 4242 | /* Wait for them all to get done so we can safely return. */ |
| 4243 | cur_ops->cb_barrier(); |
| 4244 | pr_alert("%s: WARN: Duplicate call_%s() test complete.\n" , KBUILD_MODNAME, cur_ops->name); |
| 4245 | destroy_rcu_head_on_stack(head: &rh1); |
| 4246 | destroy_rcu_head_on_stack(head: &rh2); |
| 4247 | kfree(objp: rhp); |
| 4248 | } |
| 4249 | |
| 4250 | static void rcutorture_sync(void) |
| 4251 | { |
| 4252 | static unsigned long n; |
| 4253 | |
| 4254 | if (cur_ops->sync && !(++n & 0xfff)) |
| 4255 | cur_ops->sync(); |
| 4256 | } |
| 4257 | |
| 4258 | static DEFINE_MUTEX(mut0); |
| 4259 | static DEFINE_MUTEX(mut1); |
| 4260 | static DEFINE_MUTEX(mut2); |
| 4261 | static DEFINE_MUTEX(mut3); |
| 4262 | static DEFINE_MUTEX(mut4); |
| 4263 | static DEFINE_MUTEX(mut5); |
| 4264 | static DEFINE_MUTEX(mut6); |
| 4265 | static DEFINE_MUTEX(mut7); |
| 4266 | static DEFINE_MUTEX(mut8); |
| 4267 | static DEFINE_MUTEX(mut9); |
| 4268 | |
| 4269 | static DECLARE_RWSEM(rwsem0); |
| 4270 | static DECLARE_RWSEM(rwsem1); |
| 4271 | static DECLARE_RWSEM(rwsem2); |
| 4272 | static DECLARE_RWSEM(rwsem3); |
| 4273 | static DECLARE_RWSEM(rwsem4); |
| 4274 | static DECLARE_RWSEM(rwsem5); |
| 4275 | static DECLARE_RWSEM(rwsem6); |
| 4276 | static DECLARE_RWSEM(rwsem7); |
| 4277 | static DECLARE_RWSEM(rwsem8); |
| 4278 | static DECLARE_RWSEM(rwsem9); |
| 4279 | |
| 4280 | DEFINE_STATIC_SRCU(srcu0); |
| 4281 | DEFINE_STATIC_SRCU(srcu1); |
| 4282 | DEFINE_STATIC_SRCU(srcu2); |
| 4283 | DEFINE_STATIC_SRCU(srcu3); |
| 4284 | DEFINE_STATIC_SRCU(srcu4); |
| 4285 | DEFINE_STATIC_SRCU(srcu5); |
| 4286 | DEFINE_STATIC_SRCU(srcu6); |
| 4287 | DEFINE_STATIC_SRCU(srcu7); |
| 4288 | DEFINE_STATIC_SRCU(srcu8); |
| 4289 | DEFINE_STATIC_SRCU(srcu9); |
| 4290 | |
| 4291 | static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, |
| 4292 | int cyclelen, int deadlock) |
| 4293 | { |
| 4294 | int j = i + 1; |
| 4295 | |
| 4296 | if (j >= cyclelen) |
| 4297 | j = deadlock ? 0 : -1; |
| 4298 | if (j >= 0) |
| 4299 | pr_info("%s: %s(%d), %s(%d), %s(%d)\n" , f, fl, i, fs, j, fu, i); |
| 4300 | else |
| 4301 | pr_info("%s: %s(%d), %s(%d)\n" , f, fl, i, fu, i); |
| 4302 | return j; |
| 4303 | } |
| 4304 | |
| 4305 | // Test lockdep on SRCU-based deadlock scenarios. |
| 4306 | static void rcu_torture_init_srcu_lockdep(void) |
| 4307 | { |
| 4308 | int cyclelen; |
| 4309 | int deadlock; |
| 4310 | bool err = false; |
| 4311 | int i; |
| 4312 | int j; |
| 4313 | int idx; |
| 4314 | struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, |
| 4315 | &mut5, &mut6, &mut7, &mut8, &mut9 }; |
| 4316 | struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, |
| 4317 | &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; |
| 4318 | struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, |
| 4319 | &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; |
| 4320 | int testtype; |
| 4321 | |
| 4322 | if (!test_srcu_lockdep) |
| 4323 | return; |
| 4324 | |
| 4325 | deadlock = test_srcu_lockdep / 1000; |
| 4326 | testtype = (test_srcu_lockdep / 10) % 100; |
| 4327 | cyclelen = test_srcu_lockdep % 10; |
| 4328 | WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); |
| 4329 | if (WARN_ONCE(deadlock != !!deadlock, |
| 4330 | "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n" , |
| 4331 | __func__, test_srcu_lockdep, deadlock)) |
| 4332 | err = true; |
| 4333 | if (WARN_ONCE(cyclelen <= 0, |
| 4334 | "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n" , |
| 4335 | __func__, test_srcu_lockdep, cyclelen)) |
| 4336 | err = true; |
| 4337 | if (err) |
| 4338 | goto err_out; |
| 4339 | |
| 4340 | if (testtype == 0) { |
| 4341 | pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n" , |
| 4342 | __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-" ); |
| 4343 | if (deadlock && cyclelen == 1) |
| 4344 | pr_info("%s: Expect hang.\n" , __func__); |
| 4345 | for (i = 0; i < cyclelen; i++) { |
| 4346 | j = srcu_lockdep_next(f: __func__, fl: "srcu_read_lock" , fs: "synchronize_srcu" , |
| 4347 | fu: "srcu_read_unlock" , i, cyclelen, deadlock); |
| 4348 | idx = srcu_read_lock(ssp: srcus[i]); |
| 4349 | if (j >= 0) |
| 4350 | synchronize_srcu(ssp: srcus[j]); |
| 4351 | srcu_read_unlock(ssp: srcus[i], idx); |
| 4352 | } |
| 4353 | return; |
| 4354 | } |
| 4355 | |
| 4356 | if (testtype == 1) { |
| 4357 | pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n" , |
| 4358 | __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-" ); |
| 4359 | for (i = 0; i < cyclelen; i++) { |
| 4360 | pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n" , |
| 4361 | __func__, i, i, i, i); |
| 4362 | idx = srcu_read_lock(ssp: srcus[i]); |
| 4363 | mutex_lock(muts[i]); |
| 4364 | mutex_unlock(lock: muts[i]); |
| 4365 | srcu_read_unlock(ssp: srcus[i], idx); |
| 4366 | |
| 4367 | j = srcu_lockdep_next(f: __func__, fl: "mutex_lock" , fs: "synchronize_srcu" , |
| 4368 | fu: "mutex_unlock" , i, cyclelen, deadlock); |
| 4369 | mutex_lock(muts[i]); |
| 4370 | if (j >= 0) |
| 4371 | synchronize_srcu(ssp: srcus[j]); |
| 4372 | mutex_unlock(lock: muts[i]); |
| 4373 | } |
| 4374 | return; |
| 4375 | } |
| 4376 | |
| 4377 | if (testtype == 2) { |
| 4378 | pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n" , |
| 4379 | __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-" ); |
| 4380 | for (i = 0; i < cyclelen; i++) { |
| 4381 | pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n" , |
| 4382 | __func__, i, i, i, i); |
| 4383 | idx = srcu_read_lock(ssp: srcus[i]); |
| 4384 | down_read(sem: rwsems[i]); |
| 4385 | up_read(sem: rwsems[i]); |
| 4386 | srcu_read_unlock(ssp: srcus[i], idx); |
| 4387 | |
| 4388 | j = srcu_lockdep_next(f: __func__, fl: "down_write" , fs: "synchronize_srcu" , |
| 4389 | fu: "up_write" , i, cyclelen, deadlock); |
| 4390 | down_write(sem: rwsems[i]); |
| 4391 | if (j >= 0) |
| 4392 | synchronize_srcu(ssp: srcus[j]); |
| 4393 | up_write(sem: rwsems[i]); |
| 4394 | } |
| 4395 | return; |
| 4396 | } |
| 4397 | |
| 4398 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 4399 | if (testtype == 3) { |
| 4400 | pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n" , |
| 4401 | __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-" ); |
| 4402 | if (deadlock && cyclelen == 1) |
| 4403 | pr_info("%s: Expect hang.\n" , __func__); |
| 4404 | for (i = 0; i < cyclelen; i++) { |
| 4405 | char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock" ; |
| 4406 | char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" |
| 4407 | : "synchronize_srcu" ; |
| 4408 | char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock" ; |
| 4409 | |
| 4410 | j = srcu_lockdep_next(f: __func__, fl, fs, fu, i, cyclelen, deadlock); |
| 4411 | if (i == 0) |
| 4412 | rcu_read_lock_trace(); |
| 4413 | else |
| 4414 | idx = srcu_read_lock(ssp: srcus[i]); |
| 4415 | if (j >= 0) { |
| 4416 | if (i == cyclelen - 1) |
| 4417 | synchronize_rcu_tasks_trace(); |
| 4418 | else |
| 4419 | synchronize_srcu(ssp: srcus[j]); |
| 4420 | } |
| 4421 | if (i == 0) |
| 4422 | rcu_read_unlock_trace(); |
| 4423 | else |
| 4424 | srcu_read_unlock(ssp: srcus[i], idx); |
| 4425 | } |
| 4426 | return; |
| 4427 | } |
| 4428 | #endif // #ifdef CONFIG_TASKS_TRACE_RCU |
| 4429 | |
| 4430 | err_out: |
| 4431 | pr_info("%s: test_srcu_lockdep = %05d does nothing.\n" , __func__, test_srcu_lockdep); |
| 4432 | pr_info("%s: test_srcu_lockdep = DNNL.\n" , __func__); |
| 4433 | pr_info("%s: D: Deadlock if nonzero.\n" , __func__); |
| 4434 | pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n" , __func__); |
| 4435 | pr_info("%s: L: Cycle length.\n" , __func__); |
| 4436 | if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) |
| 4437 | pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n" , __func__); |
| 4438 | } |
| 4439 | |
| 4440 | static int __init |
| 4441 | rcu_torture_init(void) |
| 4442 | { |
| 4443 | long i; |
| 4444 | int cpu; |
| 4445 | int firsterr = 0; |
| 4446 | int flags = 0; |
| 4447 | unsigned long gp_seq = 0; |
| 4448 | static struct rcu_torture_ops *torture_ops[] = { |
| 4449 | &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, |
| 4450 | TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS |
| 4451 | &trivial_ops, |
| 4452 | }; |
| 4453 | |
| 4454 | if (!torture_init_begin(ttype: torture_type, v: verbose)) |
| 4455 | return -EBUSY; |
| 4456 | |
| 4457 | /* Process args and tell the world that the torturer is on the job. */ |
| 4458 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
| 4459 | cur_ops = torture_ops[i]; |
| 4460 | if (strcmp(torture_type, cur_ops->name) == 0) |
| 4461 | break; |
| 4462 | } |
| 4463 | if (i == ARRAY_SIZE(torture_ops)) { |
| 4464 | pr_alert("rcu-torture: invalid torture type: \"%s\"\n" , |
| 4465 | torture_type); |
| 4466 | pr_alert("rcu-torture types:" ); |
| 4467 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
| 4468 | pr_cont(" %s" , torture_ops[i]->name); |
| 4469 | pr_cont("\n" ); |
| 4470 | firsterr = -EINVAL; |
| 4471 | cur_ops = NULL; |
| 4472 | goto unwind; |
| 4473 | } |
| 4474 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
| 4475 | pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n" ); |
| 4476 | fqs_duration = 0; |
| 4477 | } |
| 4478 | if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || |
| 4479 | !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { |
| 4480 | pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n" , |
| 4481 | cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); |
| 4482 | nocbs_nthreads = 0; |
| 4483 | } |
| 4484 | if (cur_ops->init) |
| 4485 | cur_ops->init(); |
| 4486 | |
| 4487 | rcu_torture_init_srcu_lockdep(); |
| 4488 | |
| 4489 | if (nfakewriters >= 0) { |
| 4490 | nrealfakewriters = nfakewriters; |
| 4491 | } else { |
| 4492 | nrealfakewriters = num_online_cpus() - 2 - nfakewriters; |
| 4493 | if (nrealfakewriters <= 0) |
| 4494 | nrealfakewriters = 1; |
| 4495 | } |
| 4496 | |
| 4497 | if (nreaders >= 0) { |
| 4498 | nrealreaders = nreaders; |
| 4499 | } else { |
| 4500 | nrealreaders = num_online_cpus() - 2 - nreaders; |
| 4501 | if (nrealreaders <= 0) |
| 4502 | nrealreaders = 1; |
| 4503 | } |
| 4504 | rcu_torture_print_module_parms(cur_ops, tag: "Start of test" ); |
| 4505 | if (cur_ops->get_gp_data) |
| 4506 | cur_ops->get_gp_data(&flags, &gp_seq); |
| 4507 | start_gp_seq = gp_seq; |
| 4508 | pr_alert("%s: Start-test grace-period state: g%ld f%#x\n" , |
| 4509 | cur_ops->name, (long)gp_seq, flags); |
| 4510 | |
| 4511 | /* Set up the freelist. */ |
| 4512 | |
| 4513 | INIT_LIST_HEAD(list: &rcu_torture_freelist); |
| 4514 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { |
| 4515 | rcu_tortures[i].rtort_mbtest = 0; |
| 4516 | list_add_tail(new: &rcu_tortures[i].rtort_free, |
| 4517 | head: &rcu_torture_freelist); |
| 4518 | } |
| 4519 | |
| 4520 | /* Initialize the statistics so that each run gets its own numbers. */ |
| 4521 | |
| 4522 | rcu_torture_current = NULL; |
| 4523 | rcu_torture_current_version = 0; |
| 4524 | atomic_set(v: &n_rcu_torture_alloc, i: 0); |
| 4525 | atomic_set(v: &n_rcu_torture_alloc_fail, i: 0); |
| 4526 | atomic_set(v: &n_rcu_torture_free, i: 0); |
| 4527 | atomic_set(v: &n_rcu_torture_mberror, i: 0); |
| 4528 | atomic_set(v: &n_rcu_torture_mbchk_fail, i: 0); |
| 4529 | atomic_set(v: &n_rcu_torture_mbchk_tries, i: 0); |
| 4530 | atomic_set(v: &n_rcu_torture_error, i: 0); |
| 4531 | n_rcu_torture_barrier_error = 0; |
| 4532 | n_rcu_torture_boost_ktrerror = 0; |
| 4533 | n_rcu_torture_boost_failure = 0; |
| 4534 | n_rcu_torture_boosts = 0; |
| 4535 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
| 4536 | atomic_set(v: &rcu_torture_wcount[i], i: 0); |
| 4537 | for_each_possible_cpu(cpu) { |
| 4538 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
| 4539 | per_cpu(rcu_torture_count, cpu)[i] = 0; |
| 4540 | per_cpu(rcu_torture_batch, cpu)[i] = 0; |
| 4541 | } |
| 4542 | } |
| 4543 | err_segs_recorded = 0; |
| 4544 | rt_read_nsegs = 0; |
| 4545 | |
| 4546 | /* Start up the kthreads. */ |
| 4547 | |
| 4548 | rcu_torture_write_types(); |
| 4549 | if (nrealfakewriters > 0) { |
| 4550 | fakewriter_tasks = kcalloc(nrealfakewriters, |
| 4551 | sizeof(fakewriter_tasks[0]), |
| 4552 | GFP_KERNEL); |
| 4553 | if (fakewriter_tasks == NULL) { |
| 4554 | TOROUT_ERRSTRING("out of memory" ); |
| 4555 | firsterr = -ENOMEM; |
| 4556 | goto unwind; |
| 4557 | } |
| 4558 | } |
| 4559 | for (i = 0; i < nrealfakewriters; i++) { |
| 4560 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
| 4561 | NULL, fakewriter_tasks[i]); |
| 4562 | if (torture_init_error(firsterr)) |
| 4563 | goto unwind; |
| 4564 | } |
| 4565 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), |
| 4566 | GFP_KERNEL); |
| 4567 | rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), |
| 4568 | GFP_KERNEL); |
| 4569 | if (!reader_tasks || !rcu_torture_reader_mbchk) { |
| 4570 | TOROUT_ERRSTRING("out of memory" ); |
| 4571 | firsterr = -ENOMEM; |
| 4572 | goto unwind; |
| 4573 | } |
| 4574 | for (i = 0; i < nrealreaders; i++) { |
| 4575 | rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; |
| 4576 | firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, |
| 4577 | reader_tasks[i]); |
| 4578 | if (torture_init_error(firsterr)) |
| 4579 | goto unwind; |
| 4580 | } |
| 4581 | |
| 4582 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
| 4583 | writer_task); |
| 4584 | if (torture_init_error(firsterr)) |
| 4585 | goto unwind; |
| 4586 | |
| 4587 | firsterr = rcu_torture_updown_init(); |
| 4588 | if (torture_init_error(firsterr)) |
| 4589 | goto unwind; |
| 4590 | nrealnocbers = nocbs_nthreads; |
| 4591 | if (WARN_ON(nrealnocbers < 0)) |
| 4592 | nrealnocbers = 1; |
| 4593 | if (WARN_ON(nocbs_toggle < 0)) |
| 4594 | nocbs_toggle = HZ; |
| 4595 | if (nrealnocbers > 0) { |
| 4596 | nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); |
| 4597 | if (nocb_tasks == NULL) { |
| 4598 | TOROUT_ERRSTRING("out of memory" ); |
| 4599 | firsterr = -ENOMEM; |
| 4600 | goto unwind; |
| 4601 | } |
| 4602 | } else { |
| 4603 | nocb_tasks = NULL; |
| 4604 | } |
| 4605 | for (i = 0; i < nrealnocbers; i++) { |
| 4606 | firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); |
| 4607 | if (torture_init_error(firsterr)) |
| 4608 | goto unwind; |
| 4609 | } |
| 4610 | if (stat_interval > 0) { |
| 4611 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
| 4612 | stats_task); |
| 4613 | if (torture_init_error(firsterr)) |
| 4614 | goto unwind; |
| 4615 | } |
| 4616 | if (test_no_idle_hz && shuffle_interval > 0) { |
| 4617 | firsterr = torture_shuffle_init(shuffint: shuffle_interval * HZ); |
| 4618 | if (torture_init_error(firsterr)) |
| 4619 | goto unwind; |
| 4620 | } |
| 4621 | if (stutter < 0) |
| 4622 | stutter = 0; |
| 4623 | if (stutter) { |
| 4624 | int t; |
| 4625 | |
| 4626 | t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; |
| 4627 | firsterr = torture_stutter_init(s: stutter * HZ, sgap: t); |
| 4628 | if (torture_init_error(firsterr)) |
| 4629 | goto unwind; |
| 4630 | } |
| 4631 | if (fqs_duration < 0) |
| 4632 | fqs_duration = 0; |
| 4633 | if (fqs_holdoff < 0) |
| 4634 | fqs_holdoff = 0; |
| 4635 | if (fqs_duration && fqs_holdoff) { |
| 4636 | /* Create the fqs thread */ |
| 4637 | firsterr = torture_create_kthread(rcu_torture_fqs, NULL, |
| 4638 | fqs_task); |
| 4639 | if (torture_init_error(firsterr)) |
| 4640 | goto unwind; |
| 4641 | } |
| 4642 | if (test_boost_interval < 1) |
| 4643 | test_boost_interval = 1; |
| 4644 | if (test_boost_duration < 2) |
| 4645 | test_boost_duration = 2; |
| 4646 | if (rcu_torture_can_boost()) { |
| 4647 | |
| 4648 | boost_starttime = jiffies + test_boost_interval * HZ; |
| 4649 | |
| 4650 | firsterr = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "RCU_TORTURE" , |
| 4651 | startup: rcutorture_booster_init, |
| 4652 | teardown: rcutorture_booster_cleanup); |
| 4653 | rcutor_hp = firsterr; |
| 4654 | if (torture_init_error(firsterr)) |
| 4655 | goto unwind; |
| 4656 | } |
| 4657 | shutdown_jiffies = jiffies + shutdown_secs * HZ; |
| 4658 | firsterr = torture_shutdown_init(ssecs: shutdown_secs, cleanup: rcu_torture_cleanup); |
| 4659 | if (torture_init_error(firsterr)) |
| 4660 | goto unwind; |
| 4661 | firsterr = torture_onoff_init(ooholdoff: onoff_holdoff * HZ, oointerval: onoff_interval, |
| 4662 | f: rcutorture_sync); |
| 4663 | if (torture_init_error(firsterr)) |
| 4664 | goto unwind; |
| 4665 | firsterr = rcu_torture_stall_init(); |
| 4666 | if (torture_init_error(firsterr)) |
| 4667 | goto unwind; |
| 4668 | firsterr = rcu_torture_fwd_prog_init(); |
| 4669 | if (torture_init_error(firsterr)) |
| 4670 | goto unwind; |
| 4671 | firsterr = rcu_torture_barrier_init(); |
| 4672 | if (torture_init_error(firsterr)) |
| 4673 | goto unwind; |
| 4674 | firsterr = rcu_torture_read_exit_init(); |
| 4675 | if (torture_init_error(firsterr)) |
| 4676 | goto unwind; |
| 4677 | if (preempt_duration > 0) { |
| 4678 | firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task); |
| 4679 | if (torture_init_error(firsterr)) |
| 4680 | goto unwind; |
| 4681 | } |
| 4682 | if (object_debug) |
| 4683 | rcu_test_debug_objects(); |
| 4684 | |
| 4685 | if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) |
| 4686 | cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); |
| 4687 | |
| 4688 | if (gpwrap_lag && cur_ops->set_gpwrap_lag) { |
| 4689 | firsterr = rcu_gpwrap_lag_init(); |
| 4690 | if (torture_init_error(firsterr)) |
| 4691 | goto unwind; |
| 4692 | } |
| 4693 | |
| 4694 | torture_init_end(); |
| 4695 | return 0; |
| 4696 | |
| 4697 | unwind: |
| 4698 | torture_init_end(); |
| 4699 | rcu_torture_cleanup(); |
| 4700 | if (shutdown_secs) { |
| 4701 | WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); |
| 4702 | kernel_power_off(); |
| 4703 | } |
| 4704 | return firsterr; |
| 4705 | } |
| 4706 | |
| 4707 | module_init(rcu_torture_init); |
| 4708 | module_exit(rcu_torture_cleanup); |
| 4709 | |