1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Module-based torture test facility for locking |
4 | * |
5 | * Copyright (C) IBM Corporation, 2014 |
6 | * |
7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
8 | * Davidlohr Bueso <dave@stgolabs.net> |
9 | * Based on kernel/rcu/torture.c. |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) fmt |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/kthread.h> |
17 | #include <linux/sched/rt.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/mutex.h> |
20 | #include <linux/rwsem.h> |
21 | #include <linux/smp.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/sched.h> |
24 | #include <uapi/linux/sched/types.h> |
25 | #include <linux/rtmutex.h> |
26 | #include <linux/atomic.h> |
27 | #include <linux/moduleparam.h> |
28 | #include <linux/delay.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/torture.h> |
31 | #include <linux/reboot.h> |
32 | |
33 | MODULE_LICENSE("GPL" ); |
34 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>" ); |
35 | |
36 | torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies)." ); |
37 | torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable)." ); |
38 | torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable" ); |
39 | torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)" ); |
40 | torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads" ); |
41 | torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads" ); |
42 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)" ); |
43 | torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable" ); |
44 | torture_param(int, rt_boost, 2, |
45 | "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types." ); |
46 | torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens." ); |
47 | torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable" ); |
48 | torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable." ); |
49 | torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s" ); |
50 | torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable" ); |
51 | torture_param(int, verbose, 1, "Enable verbose debugging printk()s" ); |
52 | torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority" ); |
53 | /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */ |
54 | #define MAX_NESTED_LOCKS 8 |
55 | |
56 | static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock" ; |
57 | module_param(torture_type, charp, 0444); |
58 | MODULE_PARM_DESC(torture_type, |
59 | "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)" ); |
60 | |
61 | static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs. |
62 | static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs. |
63 | |
64 | // Parse a cpumask kernel parameter. If there are more users later on, |
65 | // this might need to got to a more central location. |
66 | static int param_set_cpumask(const char *val, const struct kernel_param *kp) |
67 | { |
68 | cpumask_var_t *cm_bind = kp->arg; |
69 | int ret; |
70 | char *s; |
71 | |
72 | if (!alloc_cpumask_var(mask: cm_bind, GFP_KERNEL)) { |
73 | s = "Out of memory" ; |
74 | ret = -ENOMEM; |
75 | goto out_err; |
76 | } |
77 | ret = cpulist_parse(buf: val, dstp: *cm_bind); |
78 | if (!ret) |
79 | return ret; |
80 | s = "Bad CPU range" ; |
81 | out_err: |
82 | pr_warn("%s: %s, all CPUs set\n" , kp->name, s); |
83 | cpumask_setall(dstp: *cm_bind); |
84 | return ret; |
85 | } |
86 | |
87 | // Output a cpumask kernel parameter. |
88 | static int param_get_cpumask(char *buffer, const struct kernel_param *kp) |
89 | { |
90 | cpumask_var_t *cm_bind = kp->arg; |
91 | |
92 | return sprintf(buf: buffer, fmt: "%*pbl" , cpumask_pr_args(*cm_bind)); |
93 | } |
94 | |
95 | static bool cpumask_nonempty(cpumask_var_t mask) |
96 | { |
97 | return cpumask_available(mask) && !cpumask_empty(srcp: mask); |
98 | } |
99 | |
100 | static const struct kernel_param_ops lt_bind_ops = { |
101 | .set = param_set_cpumask, |
102 | .get = param_get_cpumask, |
103 | }; |
104 | |
105 | module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644); |
106 | module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644); |
107 | |
108 | long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); |
109 | |
110 | static struct task_struct *stats_task; |
111 | static struct task_struct **writer_tasks; |
112 | static struct task_struct **reader_tasks; |
113 | |
114 | static bool lock_is_write_held; |
115 | static atomic_t lock_is_read_held; |
116 | static unsigned long last_lock_release; |
117 | |
118 | struct lock_stress_stats { |
119 | long n_lock_fail; |
120 | long n_lock_acquired; |
121 | }; |
122 | |
123 | struct call_rcu_chain { |
124 | struct rcu_head crc_rh; |
125 | bool crc_stop; |
126 | }; |
127 | struct call_rcu_chain *call_rcu_chain_list; |
128 | |
129 | /* Forward reference. */ |
130 | static void lock_torture_cleanup(void); |
131 | |
132 | /* |
133 | * Operations vector for selecting different types of tests. |
134 | */ |
135 | struct lock_torture_ops { |
136 | void (*init)(void); |
137 | void (*exit)(void); |
138 | int (*nested_lock)(int tid, u32 lockset); |
139 | int (*writelock)(int tid); |
140 | void (*write_delay)(struct torture_random_state *trsp); |
141 | void (*task_boost)(struct torture_random_state *trsp); |
142 | void (*writeunlock)(int tid); |
143 | void (*nested_unlock)(int tid, u32 lockset); |
144 | int (*readlock)(int tid); |
145 | void (*read_delay)(struct torture_random_state *trsp); |
146 | void (*readunlock)(int tid); |
147 | |
148 | unsigned long flags; /* for irq spinlocks */ |
149 | const char *name; |
150 | }; |
151 | |
152 | struct lock_torture_cxt { |
153 | int nrealwriters_stress; |
154 | int nrealreaders_stress; |
155 | bool debug_lock; |
156 | bool init_called; |
157 | atomic_t n_lock_torture_errors; |
158 | struct lock_torture_ops *cur_ops; |
159 | struct lock_stress_stats *lwsa; /* writer statistics */ |
160 | struct lock_stress_stats *lrsa; /* reader statistics */ |
161 | }; |
162 | static struct lock_torture_cxt cxt = { 0, 0, false, false, |
163 | ATOMIC_INIT(0), |
164 | NULL, NULL}; |
165 | /* |
166 | * Definitions for lock torture testing. |
167 | */ |
168 | |
169 | static int torture_lock_busted_write_lock(int tid __maybe_unused) |
170 | { |
171 | return 0; /* BUGGY, do not use in real life!!! */ |
172 | } |
173 | |
174 | static void torture_lock_busted_write_delay(struct torture_random_state *trsp) |
175 | { |
176 | /* We want a long delay occasionally to force massive contention. */ |
177 | if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) |
178 | mdelay(long_hold); |
179 | if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) |
180 | torture_preempt_schedule(); /* Allow test to be preempted. */ |
181 | } |
182 | |
183 | static void torture_lock_busted_write_unlock(int tid __maybe_unused) |
184 | { |
185 | /* BUGGY, do not use in real life!!! */ |
186 | } |
187 | |
188 | static void __torture_rt_boost(struct torture_random_state *trsp) |
189 | { |
190 | const unsigned int factor = rt_boost_factor; |
191 | |
192 | if (!rt_task(current)) { |
193 | /* |
194 | * Boost priority once every rt_boost_factor operations. When |
195 | * the task tries to take the lock, the rtmutex it will account |
196 | * for the new priority, and do any corresponding pi-dance. |
197 | */ |
198 | if (trsp && !(torture_random(trsp) % |
199 | (cxt.nrealwriters_stress * factor))) { |
200 | sched_set_fifo(current); |
201 | } else /* common case, do nothing */ |
202 | return; |
203 | } else { |
204 | /* |
205 | * The task will remain boosted for another 10 * rt_boost_factor |
206 | * operations, then restored back to its original prio, and so |
207 | * forth. |
208 | * |
209 | * When @trsp is nil, we want to force-reset the task for |
210 | * stopping the kthread. |
211 | */ |
212 | if (!trsp || !(torture_random(trsp) % |
213 | (cxt.nrealwriters_stress * factor * 2))) { |
214 | sched_set_normal(current, nice: 0); |
215 | } else /* common case, do nothing */ |
216 | return; |
217 | } |
218 | } |
219 | |
220 | static void torture_rt_boost(struct torture_random_state *trsp) |
221 | { |
222 | if (rt_boost != 2) |
223 | return; |
224 | |
225 | __torture_rt_boost(trsp); |
226 | } |
227 | |
228 | static struct lock_torture_ops lock_busted_ops = { |
229 | .writelock = torture_lock_busted_write_lock, |
230 | .write_delay = torture_lock_busted_write_delay, |
231 | .task_boost = torture_rt_boost, |
232 | .writeunlock = torture_lock_busted_write_unlock, |
233 | .readlock = NULL, |
234 | .read_delay = NULL, |
235 | .readunlock = NULL, |
236 | .name = "lock_busted" |
237 | }; |
238 | |
239 | static DEFINE_SPINLOCK(torture_spinlock); |
240 | |
241 | static int torture_spin_lock_write_lock(int tid __maybe_unused) |
242 | __acquires(torture_spinlock) |
243 | { |
244 | spin_lock(lock: &torture_spinlock); |
245 | return 0; |
246 | } |
247 | |
248 | static void torture_spin_lock_write_delay(struct torture_random_state *trsp) |
249 | { |
250 | const unsigned long shortdelay_us = 2; |
251 | unsigned long j; |
252 | |
253 | /* We want a short delay mostly to emulate likely code, and |
254 | * we want a long delay occasionally to force massive contention. |
255 | */ |
256 | if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) { |
257 | j = jiffies; |
258 | mdelay(long_hold); |
259 | pr_alert("%s: delay = %lu jiffies.\n" , __func__, jiffies - j); |
260 | } |
261 | if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us))) |
262 | udelay(shortdelay_us); |
263 | if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) |
264 | torture_preempt_schedule(); /* Allow test to be preempted. */ |
265 | } |
266 | |
267 | static void torture_spin_lock_write_unlock(int tid __maybe_unused) |
268 | __releases(torture_spinlock) |
269 | { |
270 | spin_unlock(lock: &torture_spinlock); |
271 | } |
272 | |
273 | static struct lock_torture_ops spin_lock_ops = { |
274 | .writelock = torture_spin_lock_write_lock, |
275 | .write_delay = torture_spin_lock_write_delay, |
276 | .task_boost = torture_rt_boost, |
277 | .writeunlock = torture_spin_lock_write_unlock, |
278 | .readlock = NULL, |
279 | .read_delay = NULL, |
280 | .readunlock = NULL, |
281 | .name = "spin_lock" |
282 | }; |
283 | |
284 | static int torture_spin_lock_write_lock_irq(int tid __maybe_unused) |
285 | __acquires(torture_spinlock) |
286 | { |
287 | unsigned long flags; |
288 | |
289 | spin_lock_irqsave(&torture_spinlock, flags); |
290 | cxt.cur_ops->flags = flags; |
291 | return 0; |
292 | } |
293 | |
294 | static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused) |
295 | __releases(torture_spinlock) |
296 | { |
297 | spin_unlock_irqrestore(lock: &torture_spinlock, flags: cxt.cur_ops->flags); |
298 | } |
299 | |
300 | static struct lock_torture_ops spin_lock_irq_ops = { |
301 | .writelock = torture_spin_lock_write_lock_irq, |
302 | .write_delay = torture_spin_lock_write_delay, |
303 | .task_boost = torture_rt_boost, |
304 | .writeunlock = torture_lock_spin_write_unlock_irq, |
305 | .readlock = NULL, |
306 | .read_delay = NULL, |
307 | .readunlock = NULL, |
308 | .name = "spin_lock_irq" |
309 | }; |
310 | |
311 | static DEFINE_RAW_SPINLOCK(torture_raw_spinlock); |
312 | |
313 | static int torture_raw_spin_lock_write_lock(int tid __maybe_unused) |
314 | __acquires(torture_raw_spinlock) |
315 | { |
316 | raw_spin_lock(&torture_raw_spinlock); |
317 | return 0; |
318 | } |
319 | |
320 | static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused) |
321 | __releases(torture_raw_spinlock) |
322 | { |
323 | raw_spin_unlock(&torture_raw_spinlock); |
324 | } |
325 | |
326 | static struct lock_torture_ops raw_spin_lock_ops = { |
327 | .writelock = torture_raw_spin_lock_write_lock, |
328 | .write_delay = torture_spin_lock_write_delay, |
329 | .task_boost = torture_rt_boost, |
330 | .writeunlock = torture_raw_spin_lock_write_unlock, |
331 | .readlock = NULL, |
332 | .read_delay = NULL, |
333 | .readunlock = NULL, |
334 | .name = "raw_spin_lock" |
335 | }; |
336 | |
337 | static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused) |
338 | __acquires(torture_raw_spinlock) |
339 | { |
340 | unsigned long flags; |
341 | |
342 | raw_spin_lock_irqsave(&torture_raw_spinlock, flags); |
343 | cxt.cur_ops->flags = flags; |
344 | return 0; |
345 | } |
346 | |
347 | static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused) |
348 | __releases(torture_raw_spinlock) |
349 | { |
350 | raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags); |
351 | } |
352 | |
353 | static struct lock_torture_ops raw_spin_lock_irq_ops = { |
354 | .writelock = torture_raw_spin_lock_write_lock_irq, |
355 | .write_delay = torture_spin_lock_write_delay, |
356 | .task_boost = torture_rt_boost, |
357 | .writeunlock = torture_raw_spin_lock_write_unlock_irq, |
358 | .readlock = NULL, |
359 | .read_delay = NULL, |
360 | .readunlock = NULL, |
361 | .name = "raw_spin_lock_irq" |
362 | }; |
363 | |
364 | static DEFINE_RWLOCK(torture_rwlock); |
365 | |
366 | static int torture_rwlock_write_lock(int tid __maybe_unused) |
367 | __acquires(torture_rwlock) |
368 | { |
369 | write_lock(&torture_rwlock); |
370 | return 0; |
371 | } |
372 | |
373 | static void torture_rwlock_write_delay(struct torture_random_state *trsp) |
374 | { |
375 | const unsigned long shortdelay_us = 2; |
376 | |
377 | /* We want a short delay mostly to emulate likely code, and |
378 | * we want a long delay occasionally to force massive contention. |
379 | */ |
380 | if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) |
381 | mdelay(long_hold); |
382 | else |
383 | udelay(shortdelay_us); |
384 | } |
385 | |
386 | static void torture_rwlock_write_unlock(int tid __maybe_unused) |
387 | __releases(torture_rwlock) |
388 | { |
389 | write_unlock(&torture_rwlock); |
390 | } |
391 | |
392 | static int torture_rwlock_read_lock(int tid __maybe_unused) |
393 | __acquires(torture_rwlock) |
394 | { |
395 | read_lock(&torture_rwlock); |
396 | return 0; |
397 | } |
398 | |
399 | static void torture_rwlock_read_delay(struct torture_random_state *trsp) |
400 | { |
401 | const unsigned long shortdelay_us = 10; |
402 | |
403 | /* We want a short delay mostly to emulate likely code, and |
404 | * we want a long delay occasionally to force massive contention. |
405 | */ |
406 | if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold))) |
407 | mdelay(long_hold); |
408 | else |
409 | udelay(shortdelay_us); |
410 | } |
411 | |
412 | static void torture_rwlock_read_unlock(int tid __maybe_unused) |
413 | __releases(torture_rwlock) |
414 | { |
415 | read_unlock(&torture_rwlock); |
416 | } |
417 | |
418 | static struct lock_torture_ops rw_lock_ops = { |
419 | .writelock = torture_rwlock_write_lock, |
420 | .write_delay = torture_rwlock_write_delay, |
421 | .task_boost = torture_rt_boost, |
422 | .writeunlock = torture_rwlock_write_unlock, |
423 | .readlock = torture_rwlock_read_lock, |
424 | .read_delay = torture_rwlock_read_delay, |
425 | .readunlock = torture_rwlock_read_unlock, |
426 | .name = "rw_lock" |
427 | }; |
428 | |
429 | static int torture_rwlock_write_lock_irq(int tid __maybe_unused) |
430 | __acquires(torture_rwlock) |
431 | { |
432 | unsigned long flags; |
433 | |
434 | write_lock_irqsave(&torture_rwlock, flags); |
435 | cxt.cur_ops->flags = flags; |
436 | return 0; |
437 | } |
438 | |
439 | static void torture_rwlock_write_unlock_irq(int tid __maybe_unused) |
440 | __releases(torture_rwlock) |
441 | { |
442 | write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); |
443 | } |
444 | |
445 | static int torture_rwlock_read_lock_irq(int tid __maybe_unused) |
446 | __acquires(torture_rwlock) |
447 | { |
448 | unsigned long flags; |
449 | |
450 | read_lock_irqsave(&torture_rwlock, flags); |
451 | cxt.cur_ops->flags = flags; |
452 | return 0; |
453 | } |
454 | |
455 | static void torture_rwlock_read_unlock_irq(int tid __maybe_unused) |
456 | __releases(torture_rwlock) |
457 | { |
458 | read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); |
459 | } |
460 | |
461 | static struct lock_torture_ops rw_lock_irq_ops = { |
462 | .writelock = torture_rwlock_write_lock_irq, |
463 | .write_delay = torture_rwlock_write_delay, |
464 | .task_boost = torture_rt_boost, |
465 | .writeunlock = torture_rwlock_write_unlock_irq, |
466 | .readlock = torture_rwlock_read_lock_irq, |
467 | .read_delay = torture_rwlock_read_delay, |
468 | .readunlock = torture_rwlock_read_unlock_irq, |
469 | .name = "rw_lock_irq" |
470 | }; |
471 | |
472 | static DEFINE_MUTEX(torture_mutex); |
473 | static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS]; |
474 | static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS]; |
475 | |
476 | static void torture_mutex_init(void) |
477 | { |
478 | int i; |
479 | |
480 | for (i = 0; i < MAX_NESTED_LOCKS; i++) |
481 | __mutex_init(lock: &torture_nested_mutexes[i], name: __func__, |
482 | key: &nested_mutex_keys[i]); |
483 | } |
484 | |
485 | static int torture_mutex_nested_lock(int tid __maybe_unused, |
486 | u32 lockset) |
487 | { |
488 | int i; |
489 | |
490 | for (i = 0; i < nested_locks; i++) |
491 | if (lockset & (1 << i)) |
492 | mutex_lock(&torture_nested_mutexes[i]); |
493 | return 0; |
494 | } |
495 | |
496 | static int torture_mutex_lock(int tid __maybe_unused) |
497 | __acquires(torture_mutex) |
498 | { |
499 | mutex_lock(&torture_mutex); |
500 | return 0; |
501 | } |
502 | |
503 | static void torture_mutex_delay(struct torture_random_state *trsp) |
504 | { |
505 | /* We want a long delay occasionally to force massive contention. */ |
506 | if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) |
507 | mdelay(long_hold * 5); |
508 | if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) |
509 | torture_preempt_schedule(); /* Allow test to be preempted. */ |
510 | } |
511 | |
512 | static void torture_mutex_unlock(int tid __maybe_unused) |
513 | __releases(torture_mutex) |
514 | { |
515 | mutex_unlock(lock: &torture_mutex); |
516 | } |
517 | |
518 | static void torture_mutex_nested_unlock(int tid __maybe_unused, |
519 | u32 lockset) |
520 | { |
521 | int i; |
522 | |
523 | for (i = nested_locks - 1; i >= 0; i--) |
524 | if (lockset & (1 << i)) |
525 | mutex_unlock(lock: &torture_nested_mutexes[i]); |
526 | } |
527 | |
528 | static struct lock_torture_ops mutex_lock_ops = { |
529 | .init = torture_mutex_init, |
530 | .nested_lock = torture_mutex_nested_lock, |
531 | .writelock = torture_mutex_lock, |
532 | .write_delay = torture_mutex_delay, |
533 | .task_boost = torture_rt_boost, |
534 | .writeunlock = torture_mutex_unlock, |
535 | .nested_unlock = torture_mutex_nested_unlock, |
536 | .readlock = NULL, |
537 | .read_delay = NULL, |
538 | .readunlock = NULL, |
539 | .name = "mutex_lock" |
540 | }; |
541 | |
542 | #include <linux/ww_mutex.h> |
543 | /* |
544 | * The torture ww_mutexes should belong to the same lock class as |
545 | * torture_ww_class to avoid lockdep problem. The ww_mutex_init() |
546 | * function is called for initialization to ensure that. |
547 | */ |
548 | static DEFINE_WD_CLASS(torture_ww_class); |
549 | static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2; |
550 | static struct ww_acquire_ctx *ww_acquire_ctxs; |
551 | |
552 | static void torture_ww_mutex_init(void) |
553 | { |
554 | ww_mutex_init(lock: &torture_ww_mutex_0, ww_class: &torture_ww_class); |
555 | ww_mutex_init(lock: &torture_ww_mutex_1, ww_class: &torture_ww_class); |
556 | ww_mutex_init(lock: &torture_ww_mutex_2, ww_class: &torture_ww_class); |
557 | |
558 | ww_acquire_ctxs = kmalloc_array(n: cxt.nrealwriters_stress, |
559 | size: sizeof(*ww_acquire_ctxs), |
560 | GFP_KERNEL); |
561 | if (!ww_acquire_ctxs) |
562 | VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory" ); |
563 | } |
564 | |
565 | static void torture_ww_mutex_exit(void) |
566 | { |
567 | kfree(objp: ww_acquire_ctxs); |
568 | } |
569 | |
570 | static int torture_ww_mutex_lock(int tid) |
571 | __acquires(torture_ww_mutex_0) |
572 | __acquires(torture_ww_mutex_1) |
573 | __acquires(torture_ww_mutex_2) |
574 | { |
575 | LIST_HEAD(list); |
576 | struct reorder_lock { |
577 | struct list_head link; |
578 | struct ww_mutex *lock; |
579 | } locks[3], *ll, *ln; |
580 | struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; |
581 | |
582 | locks[0].lock = &torture_ww_mutex_0; |
583 | list_add(new: &locks[0].link, head: &list); |
584 | |
585 | locks[1].lock = &torture_ww_mutex_1; |
586 | list_add(new: &locks[1].link, head: &list); |
587 | |
588 | locks[2].lock = &torture_ww_mutex_2; |
589 | list_add(new: &locks[2].link, head: &list); |
590 | |
591 | ww_acquire_init(ctx, ww_class: &torture_ww_class); |
592 | |
593 | list_for_each_entry(ll, &list, link) { |
594 | int err; |
595 | |
596 | err = ww_mutex_lock(lock: ll->lock, ctx); |
597 | if (!err) |
598 | continue; |
599 | |
600 | ln = ll; |
601 | list_for_each_entry_continue_reverse(ln, &list, link) |
602 | ww_mutex_unlock(lock: ln->lock); |
603 | |
604 | if (err != -EDEADLK) |
605 | return err; |
606 | |
607 | ww_mutex_lock_slow(lock: ll->lock, ctx); |
608 | list_move(list: &ll->link, head: &list); |
609 | } |
610 | |
611 | return 0; |
612 | } |
613 | |
614 | static void torture_ww_mutex_unlock(int tid) |
615 | __releases(torture_ww_mutex_0) |
616 | __releases(torture_ww_mutex_1) |
617 | __releases(torture_ww_mutex_2) |
618 | { |
619 | struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; |
620 | |
621 | ww_mutex_unlock(lock: &torture_ww_mutex_0); |
622 | ww_mutex_unlock(lock: &torture_ww_mutex_1); |
623 | ww_mutex_unlock(lock: &torture_ww_mutex_2); |
624 | ww_acquire_fini(ctx); |
625 | } |
626 | |
627 | static struct lock_torture_ops ww_mutex_lock_ops = { |
628 | .init = torture_ww_mutex_init, |
629 | .exit = torture_ww_mutex_exit, |
630 | .writelock = torture_ww_mutex_lock, |
631 | .write_delay = torture_mutex_delay, |
632 | .task_boost = torture_rt_boost, |
633 | .writeunlock = torture_ww_mutex_unlock, |
634 | .readlock = NULL, |
635 | .read_delay = NULL, |
636 | .readunlock = NULL, |
637 | .name = "ww_mutex_lock" |
638 | }; |
639 | |
640 | #ifdef CONFIG_RT_MUTEXES |
641 | static DEFINE_RT_MUTEX(torture_rtmutex); |
642 | static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS]; |
643 | static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS]; |
644 | |
645 | static void torture_rtmutex_init(void) |
646 | { |
647 | int i; |
648 | |
649 | for (i = 0; i < MAX_NESTED_LOCKS; i++) |
650 | __rt_mutex_init(lock: &torture_nested_rtmutexes[i], name: __func__, |
651 | key: &nested_rtmutex_keys[i]); |
652 | } |
653 | |
654 | static int torture_rtmutex_nested_lock(int tid __maybe_unused, |
655 | u32 lockset) |
656 | { |
657 | int i; |
658 | |
659 | for (i = 0; i < nested_locks; i++) |
660 | if (lockset & (1 << i)) |
661 | rt_mutex_lock(&torture_nested_rtmutexes[i]); |
662 | return 0; |
663 | } |
664 | |
665 | static int torture_rtmutex_lock(int tid __maybe_unused) |
666 | __acquires(torture_rtmutex) |
667 | { |
668 | rt_mutex_lock(&torture_rtmutex); |
669 | return 0; |
670 | } |
671 | |
672 | static void torture_rtmutex_delay(struct torture_random_state *trsp) |
673 | { |
674 | const unsigned long shortdelay_us = 2; |
675 | |
676 | /* |
677 | * We want a short delay mostly to emulate likely code, and |
678 | * we want a long delay occasionally to force massive contention. |
679 | */ |
680 | if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) |
681 | mdelay(long_hold); |
682 | if (!(torture_random(trsp) % |
683 | (cxt.nrealwriters_stress * 200 * shortdelay_us))) |
684 | udelay(shortdelay_us); |
685 | if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) |
686 | torture_preempt_schedule(); /* Allow test to be preempted. */ |
687 | } |
688 | |
689 | static void torture_rtmutex_unlock(int tid __maybe_unused) |
690 | __releases(torture_rtmutex) |
691 | { |
692 | rt_mutex_unlock(lock: &torture_rtmutex); |
693 | } |
694 | |
695 | static void torture_rt_boost_rtmutex(struct torture_random_state *trsp) |
696 | { |
697 | if (!rt_boost) |
698 | return; |
699 | |
700 | __torture_rt_boost(trsp); |
701 | } |
702 | |
703 | static void torture_rtmutex_nested_unlock(int tid __maybe_unused, |
704 | u32 lockset) |
705 | { |
706 | int i; |
707 | |
708 | for (i = nested_locks - 1; i >= 0; i--) |
709 | if (lockset & (1 << i)) |
710 | rt_mutex_unlock(lock: &torture_nested_rtmutexes[i]); |
711 | } |
712 | |
713 | static struct lock_torture_ops rtmutex_lock_ops = { |
714 | .init = torture_rtmutex_init, |
715 | .nested_lock = torture_rtmutex_nested_lock, |
716 | .writelock = torture_rtmutex_lock, |
717 | .write_delay = torture_rtmutex_delay, |
718 | .task_boost = torture_rt_boost_rtmutex, |
719 | .writeunlock = torture_rtmutex_unlock, |
720 | .nested_unlock = torture_rtmutex_nested_unlock, |
721 | .readlock = NULL, |
722 | .read_delay = NULL, |
723 | .readunlock = NULL, |
724 | .name = "rtmutex_lock" |
725 | }; |
726 | #endif |
727 | |
728 | static DECLARE_RWSEM(torture_rwsem); |
729 | static int torture_rwsem_down_write(int tid __maybe_unused) |
730 | __acquires(torture_rwsem) |
731 | { |
732 | down_write(sem: &torture_rwsem); |
733 | return 0; |
734 | } |
735 | |
736 | static void torture_rwsem_write_delay(struct torture_random_state *trsp) |
737 | { |
738 | /* We want a long delay occasionally to force massive contention. */ |
739 | if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) |
740 | mdelay(long_hold * 10); |
741 | if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) |
742 | torture_preempt_schedule(); /* Allow test to be preempted. */ |
743 | } |
744 | |
745 | static void torture_rwsem_up_write(int tid __maybe_unused) |
746 | __releases(torture_rwsem) |
747 | { |
748 | up_write(sem: &torture_rwsem); |
749 | } |
750 | |
751 | static int torture_rwsem_down_read(int tid __maybe_unused) |
752 | __acquires(torture_rwsem) |
753 | { |
754 | down_read(sem: &torture_rwsem); |
755 | return 0; |
756 | } |
757 | |
758 | static void torture_rwsem_read_delay(struct torture_random_state *trsp) |
759 | { |
760 | /* We want a long delay occasionally to force massive contention. */ |
761 | if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold))) |
762 | mdelay(long_hold * 2); |
763 | else |
764 | mdelay(long_hold / 2); |
765 | if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) |
766 | torture_preempt_schedule(); /* Allow test to be preempted. */ |
767 | } |
768 | |
769 | static void torture_rwsem_up_read(int tid __maybe_unused) |
770 | __releases(torture_rwsem) |
771 | { |
772 | up_read(sem: &torture_rwsem); |
773 | } |
774 | |
775 | static struct lock_torture_ops rwsem_lock_ops = { |
776 | .writelock = torture_rwsem_down_write, |
777 | .write_delay = torture_rwsem_write_delay, |
778 | .task_boost = torture_rt_boost, |
779 | .writeunlock = torture_rwsem_up_write, |
780 | .readlock = torture_rwsem_down_read, |
781 | .read_delay = torture_rwsem_read_delay, |
782 | .readunlock = torture_rwsem_up_read, |
783 | .name = "rwsem_lock" |
784 | }; |
785 | |
786 | #include <linux/percpu-rwsem.h> |
787 | static struct percpu_rw_semaphore pcpu_rwsem; |
788 | |
789 | static void torture_percpu_rwsem_init(void) |
790 | { |
791 | BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); |
792 | } |
793 | |
794 | static void torture_percpu_rwsem_exit(void) |
795 | { |
796 | percpu_free_rwsem(&pcpu_rwsem); |
797 | } |
798 | |
799 | static int torture_percpu_rwsem_down_write(int tid __maybe_unused) |
800 | __acquires(pcpu_rwsem) |
801 | { |
802 | percpu_down_write(&pcpu_rwsem); |
803 | return 0; |
804 | } |
805 | |
806 | static void torture_percpu_rwsem_up_write(int tid __maybe_unused) |
807 | __releases(pcpu_rwsem) |
808 | { |
809 | percpu_up_write(&pcpu_rwsem); |
810 | } |
811 | |
812 | static int torture_percpu_rwsem_down_read(int tid __maybe_unused) |
813 | __acquires(pcpu_rwsem) |
814 | { |
815 | percpu_down_read(sem: &pcpu_rwsem); |
816 | return 0; |
817 | } |
818 | |
819 | static void torture_percpu_rwsem_up_read(int tid __maybe_unused) |
820 | __releases(pcpu_rwsem) |
821 | { |
822 | percpu_up_read(sem: &pcpu_rwsem); |
823 | } |
824 | |
825 | static struct lock_torture_ops percpu_rwsem_lock_ops = { |
826 | .init = torture_percpu_rwsem_init, |
827 | .exit = torture_percpu_rwsem_exit, |
828 | .writelock = torture_percpu_rwsem_down_write, |
829 | .write_delay = torture_rwsem_write_delay, |
830 | .task_boost = torture_rt_boost, |
831 | .writeunlock = torture_percpu_rwsem_up_write, |
832 | .readlock = torture_percpu_rwsem_down_read, |
833 | .read_delay = torture_rwsem_read_delay, |
834 | .readunlock = torture_percpu_rwsem_up_read, |
835 | .name = "percpu_rwsem_lock" |
836 | }; |
837 | |
838 | /* |
839 | * Lock torture writer kthread. Repeatedly acquires and releases |
840 | * the lock, checking for duplicate acquisitions. |
841 | */ |
842 | static int lock_torture_writer(void *arg) |
843 | { |
844 | unsigned long j; |
845 | unsigned long j1; |
846 | u32 lockset_mask; |
847 | struct lock_stress_stats *lwsp = arg; |
848 | DEFINE_TORTURE_RANDOM(rand); |
849 | bool skip_main_lock; |
850 | int tid = lwsp - cxt.lwsa; |
851 | |
852 | VERBOSE_TOROUT_STRING("lock_torture_writer task started" ); |
853 | if (!rt_task(current)) |
854 | set_user_nice(current, MAX_NICE); |
855 | |
856 | do { |
857 | if ((torture_random(trsp: &rand) & 0xfffff) == 0) |
858 | schedule_timeout_uninterruptible(timeout: 1); |
859 | |
860 | lockset_mask = torture_random(trsp: &rand); |
861 | /* |
862 | * When using nested_locks, we want to occasionally |
863 | * skip the main lock so we can avoid always serializing |
864 | * the lock chains on that central lock. By skipping the |
865 | * main lock occasionally, we can create different |
866 | * contention patterns (allowing for multiple disjoint |
867 | * blocked trees) |
868 | */ |
869 | skip_main_lock = (nested_locks && |
870 | !(torture_random(trsp: &rand) % 100)); |
871 | |
872 | cxt.cur_ops->task_boost(&rand); |
873 | if (cxt.cur_ops->nested_lock) |
874 | cxt.cur_ops->nested_lock(tid, lockset_mask); |
875 | |
876 | if (!skip_main_lock) { |
877 | if (acq_writer_lim > 0) |
878 | j = jiffies; |
879 | cxt.cur_ops->writelock(tid); |
880 | if (WARN_ON_ONCE(lock_is_write_held)) |
881 | lwsp->n_lock_fail++; |
882 | lock_is_write_held = true; |
883 | if (WARN_ON_ONCE(atomic_read(&lock_is_read_held))) |
884 | lwsp->n_lock_fail++; /* rare, but... */ |
885 | if (acq_writer_lim > 0) { |
886 | j1 = jiffies; |
887 | WARN_ONCE(time_after(j1, j + acq_writer_lim), |
888 | "%s: Lock acquisition took %lu jiffies.\n" , |
889 | __func__, j1 - j); |
890 | } |
891 | lwsp->n_lock_acquired++; |
892 | |
893 | cxt.cur_ops->write_delay(&rand); |
894 | |
895 | lock_is_write_held = false; |
896 | WRITE_ONCE(last_lock_release, jiffies); |
897 | cxt.cur_ops->writeunlock(tid); |
898 | } |
899 | if (cxt.cur_ops->nested_unlock) |
900 | cxt.cur_ops->nested_unlock(tid, lockset_mask); |
901 | |
902 | stutter_wait(title: "lock_torture_writer" ); |
903 | } while (!torture_must_stop()); |
904 | |
905 | cxt.cur_ops->task_boost(NULL); /* reset prio */ |
906 | torture_kthread_stopping(title: "lock_torture_writer" ); |
907 | return 0; |
908 | } |
909 | |
910 | /* |
911 | * Lock torture reader kthread. Repeatedly acquires and releases |
912 | * the reader lock. |
913 | */ |
914 | static int lock_torture_reader(void *arg) |
915 | { |
916 | struct lock_stress_stats *lrsp = arg; |
917 | int tid = lrsp - cxt.lrsa; |
918 | DEFINE_TORTURE_RANDOM(rand); |
919 | |
920 | VERBOSE_TOROUT_STRING("lock_torture_reader task started" ); |
921 | set_user_nice(current, MAX_NICE); |
922 | |
923 | do { |
924 | if ((torture_random(trsp: &rand) & 0xfffff) == 0) |
925 | schedule_timeout_uninterruptible(timeout: 1); |
926 | |
927 | cxt.cur_ops->readlock(tid); |
928 | atomic_inc(v: &lock_is_read_held); |
929 | if (WARN_ON_ONCE(lock_is_write_held)) |
930 | lrsp->n_lock_fail++; /* rare, but... */ |
931 | |
932 | lrsp->n_lock_acquired++; |
933 | cxt.cur_ops->read_delay(&rand); |
934 | atomic_dec(v: &lock_is_read_held); |
935 | cxt.cur_ops->readunlock(tid); |
936 | |
937 | stutter_wait(title: "lock_torture_reader" ); |
938 | } while (!torture_must_stop()); |
939 | torture_kthread_stopping(title: "lock_torture_reader" ); |
940 | return 0; |
941 | } |
942 | |
943 | /* |
944 | * Create an lock-torture-statistics message in the specified buffer. |
945 | */ |
946 | static void __torture_print_stats(char *page, |
947 | struct lock_stress_stats *statp, bool write) |
948 | { |
949 | long cur; |
950 | bool fail = false; |
951 | int i, n_stress; |
952 | long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0; |
953 | long long sum = 0; |
954 | |
955 | n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; |
956 | for (i = 0; i < n_stress; i++) { |
957 | if (data_race(statp[i].n_lock_fail)) |
958 | fail = true; |
959 | cur = data_race(statp[i].n_lock_acquired); |
960 | sum += cur; |
961 | if (max < cur) |
962 | max = cur; |
963 | if (min > cur) |
964 | min = cur; |
965 | } |
966 | page += sprintf(buf: page, |
967 | fmt: "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n" , |
968 | write ? "Writes" : "Reads " , |
969 | sum, max, min, |
970 | !onoff_interval && max / 2 > min ? "???" : "" , |
971 | fail, fail ? "!!!" : "" ); |
972 | if (fail) |
973 | atomic_inc(v: &cxt.n_lock_torture_errors); |
974 | } |
975 | |
976 | /* |
977 | * Print torture statistics. Caller must ensure that there is only one |
978 | * call to this function at a given time!!! This is normally accomplished |
979 | * by relying on the module system to only have one copy of the module |
980 | * loaded, and then by giving the lock_torture_stats kthread full control |
981 | * (or the init/cleanup functions when lock_torture_stats thread is not |
982 | * running). |
983 | */ |
984 | static void lock_torture_stats_print(void) |
985 | { |
986 | int size = cxt.nrealwriters_stress * 200 + 8192; |
987 | char *buf; |
988 | |
989 | if (cxt.cur_ops->readlock) |
990 | size += cxt.nrealreaders_stress * 200 + 8192; |
991 | |
992 | buf = kmalloc(size, GFP_KERNEL); |
993 | if (!buf) { |
994 | pr_err("lock_torture_stats_print: Out of memory, need: %d" , |
995 | size); |
996 | return; |
997 | } |
998 | |
999 | __torture_print_stats(page: buf, statp: cxt.lwsa, write: true); |
1000 | pr_alert("%s" , buf); |
1001 | kfree(objp: buf); |
1002 | |
1003 | if (cxt.cur_ops->readlock) { |
1004 | buf = kmalloc(size, GFP_KERNEL); |
1005 | if (!buf) { |
1006 | pr_err("lock_torture_stats_print: Out of memory, need: %d" , |
1007 | size); |
1008 | return; |
1009 | } |
1010 | |
1011 | __torture_print_stats(page: buf, statp: cxt.lrsa, write: false); |
1012 | pr_alert("%s" , buf); |
1013 | kfree(objp: buf); |
1014 | } |
1015 | } |
1016 | |
1017 | /* |
1018 | * Periodically prints torture statistics, if periodic statistics printing |
1019 | * was specified via the stat_interval module parameter. |
1020 | * |
1021 | * No need to worry about fullstop here, since this one doesn't reference |
1022 | * volatile state or register callbacks. |
1023 | */ |
1024 | static int lock_torture_stats(void *arg) |
1025 | { |
1026 | VERBOSE_TOROUT_STRING("lock_torture_stats task started" ); |
1027 | do { |
1028 | schedule_timeout_interruptible(timeout: stat_interval * HZ); |
1029 | lock_torture_stats_print(); |
1030 | torture_shutdown_absorb(title: "lock_torture_stats" ); |
1031 | } while (!torture_must_stop()); |
1032 | torture_kthread_stopping(title: "lock_torture_stats" ); |
1033 | return 0; |
1034 | } |
1035 | |
1036 | |
1037 | static inline void |
1038 | lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, |
1039 | const char *tag) |
1040 | { |
1041 | static cpumask_t cpumask_all; |
1042 | cpumask_t *rcmp = cpumask_nonempty(mask: bind_readers) ? bind_readers : &cpumask_all; |
1043 | cpumask_t *wcmp = cpumask_nonempty(mask: bind_writers) ? bind_writers : &cpumask_all; |
1044 | |
1045 | cpumask_setall(dstp: &cpumask_all); |
1046 | pr_alert("%s" TORTURE_FLAG |
1047 | "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n" , |
1048 | torture_type, tag, cxt.debug_lock ? " [debug]" : "" , |
1049 | acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp), |
1050 | call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress, |
1051 | cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost, |
1052 | rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter, |
1053 | verbose, writer_fifo); |
1054 | } |
1055 | |
1056 | // If requested, maintain call_rcu() chains to keep a grace period always |
1057 | // in flight. These increase the probability of getting an RCU CPU stall |
1058 | // warning and associated diagnostics when a locking primitive stalls. |
1059 | |
1060 | static void call_rcu_chain_cb(struct rcu_head *rhp) |
1061 | { |
1062 | struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh); |
1063 | |
1064 | if (!smp_load_acquire(&crcp->crc_stop)) { |
1065 | (void)start_poll_synchronize_rcu(); // Start one grace period... |
1066 | call_rcu(head: &crcp->crc_rh, func: call_rcu_chain_cb); // ... and later start another. |
1067 | } |
1068 | } |
1069 | |
1070 | // Start the requested number of call_rcu() chains. |
1071 | static int call_rcu_chain_init(void) |
1072 | { |
1073 | int i; |
1074 | |
1075 | if (call_rcu_chains <= 0) |
1076 | return 0; |
1077 | call_rcu_chain_list = kcalloc(n: call_rcu_chains, size: sizeof(*call_rcu_chain_list), GFP_KERNEL); |
1078 | if (!call_rcu_chain_list) |
1079 | return -ENOMEM; |
1080 | for (i = 0; i < call_rcu_chains; i++) { |
1081 | call_rcu_chain_list[i].crc_stop = false; |
1082 | call_rcu(head: &call_rcu_chain_list[i].crc_rh, func: call_rcu_chain_cb); |
1083 | } |
1084 | return 0; |
1085 | } |
1086 | |
1087 | // Stop all of the call_rcu() chains. |
1088 | static void call_rcu_chain_cleanup(void) |
1089 | { |
1090 | int i; |
1091 | |
1092 | if (!call_rcu_chain_list) |
1093 | return; |
1094 | for (i = 0; i < call_rcu_chains; i++) |
1095 | smp_store_release(&call_rcu_chain_list[i].crc_stop, true); |
1096 | rcu_barrier(); |
1097 | kfree(objp: call_rcu_chain_list); |
1098 | call_rcu_chain_list = NULL; |
1099 | } |
1100 | |
1101 | static void lock_torture_cleanup(void) |
1102 | { |
1103 | int i; |
1104 | |
1105 | if (torture_cleanup_begin()) |
1106 | return; |
1107 | |
1108 | /* |
1109 | * Indicates early cleanup, meaning that the test has not run, |
1110 | * such as when passing bogus args when loading the module. |
1111 | * However cxt->cur_ops.init() may have been invoked, so beside |
1112 | * perform the underlying torture-specific cleanups, cur_ops.exit() |
1113 | * will be invoked if needed. |
1114 | */ |
1115 | if (!cxt.lwsa && !cxt.lrsa) |
1116 | goto end; |
1117 | |
1118 | if (writer_tasks) { |
1119 | for (i = 0; i < cxt.nrealwriters_stress; i++) |
1120 | torture_stop_kthread(lock_torture_writer, writer_tasks[i]); |
1121 | kfree(objp: writer_tasks); |
1122 | writer_tasks = NULL; |
1123 | } |
1124 | |
1125 | if (reader_tasks) { |
1126 | for (i = 0; i < cxt.nrealreaders_stress; i++) |
1127 | torture_stop_kthread(lock_torture_reader, |
1128 | reader_tasks[i]); |
1129 | kfree(objp: reader_tasks); |
1130 | reader_tasks = NULL; |
1131 | } |
1132 | |
1133 | torture_stop_kthread(lock_torture_stats, stats_task); |
1134 | lock_torture_stats_print(); /* -After- the stats thread is stopped! */ |
1135 | |
1136 | if (atomic_read(v: &cxt.n_lock_torture_errors)) |
1137 | lock_torture_print_module_parms(cur_ops: cxt.cur_ops, |
1138 | tag: "End of test: FAILURE" ); |
1139 | else if (torture_onoff_failures()) |
1140 | lock_torture_print_module_parms(cur_ops: cxt.cur_ops, |
1141 | tag: "End of test: LOCK_HOTPLUG" ); |
1142 | else |
1143 | lock_torture_print_module_parms(cur_ops: cxt.cur_ops, |
1144 | tag: "End of test: SUCCESS" ); |
1145 | |
1146 | kfree(objp: cxt.lwsa); |
1147 | cxt.lwsa = NULL; |
1148 | kfree(objp: cxt.lrsa); |
1149 | cxt.lrsa = NULL; |
1150 | |
1151 | call_rcu_chain_cleanup(); |
1152 | |
1153 | end: |
1154 | if (cxt.init_called) { |
1155 | if (cxt.cur_ops->exit) |
1156 | cxt.cur_ops->exit(); |
1157 | cxt.init_called = false; |
1158 | } |
1159 | torture_cleanup_end(); |
1160 | } |
1161 | |
1162 | static int __init lock_torture_init(void) |
1163 | { |
1164 | int i, j; |
1165 | int firsterr = 0; |
1166 | static struct lock_torture_ops *torture_ops[] = { |
1167 | &lock_busted_ops, |
1168 | &spin_lock_ops, &spin_lock_irq_ops, |
1169 | &raw_spin_lock_ops, &raw_spin_lock_irq_ops, |
1170 | &rw_lock_ops, &rw_lock_irq_ops, |
1171 | &mutex_lock_ops, |
1172 | &ww_mutex_lock_ops, |
1173 | #ifdef CONFIG_RT_MUTEXES |
1174 | &rtmutex_lock_ops, |
1175 | #endif |
1176 | &rwsem_lock_ops, |
1177 | &percpu_rwsem_lock_ops, |
1178 | }; |
1179 | |
1180 | if (!torture_init_begin(ttype: torture_type, v: verbose)) |
1181 | return -EBUSY; |
1182 | |
1183 | /* Process args and tell the world that the torturer is on the job. */ |
1184 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
1185 | cxt.cur_ops = torture_ops[i]; |
1186 | if (strcmp(torture_type, cxt.cur_ops->name) == 0) |
1187 | break; |
1188 | } |
1189 | if (i == ARRAY_SIZE(torture_ops)) { |
1190 | pr_alert("lock-torture: invalid torture type: \"%s\"\n" , |
1191 | torture_type); |
1192 | pr_alert("lock-torture types:" ); |
1193 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
1194 | pr_alert(" %s" , torture_ops[i]->name); |
1195 | pr_alert("\n" ); |
1196 | firsterr = -EINVAL; |
1197 | goto unwind; |
1198 | } |
1199 | |
1200 | if (nwriters_stress == 0 && |
1201 | (!cxt.cur_ops->readlock || nreaders_stress == 0)) { |
1202 | pr_alert("lock-torture: must run at least one locking thread\n" ); |
1203 | firsterr = -EINVAL; |
1204 | goto unwind; |
1205 | } |
1206 | |
1207 | if (nwriters_stress >= 0) |
1208 | cxt.nrealwriters_stress = nwriters_stress; |
1209 | else |
1210 | cxt.nrealwriters_stress = 2 * num_online_cpus(); |
1211 | |
1212 | if (cxt.cur_ops->init) { |
1213 | cxt.cur_ops->init(); |
1214 | cxt.init_called = true; |
1215 | } |
1216 | |
1217 | #ifdef CONFIG_DEBUG_MUTEXES |
1218 | if (str_has_prefix(str: torture_type, prefix: "mutex" )) |
1219 | cxt.debug_lock = true; |
1220 | #endif |
1221 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
1222 | if (str_has_prefix(str: torture_type, prefix: "rtmutex" )) |
1223 | cxt.debug_lock = true; |
1224 | #endif |
1225 | #ifdef CONFIG_DEBUG_SPINLOCK |
1226 | if ((str_has_prefix(str: torture_type, prefix: "spin" )) || |
1227 | (str_has_prefix(str: torture_type, prefix: "rw_lock" ))) |
1228 | cxt.debug_lock = true; |
1229 | #endif |
1230 | |
1231 | /* Initialize the statistics so that each run gets its own numbers. */ |
1232 | if (nwriters_stress) { |
1233 | lock_is_write_held = false; |
1234 | cxt.lwsa = kmalloc_array(n: cxt.nrealwriters_stress, |
1235 | size: sizeof(*cxt.lwsa), |
1236 | GFP_KERNEL); |
1237 | if (cxt.lwsa == NULL) { |
1238 | VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory" ); |
1239 | firsterr = -ENOMEM; |
1240 | goto unwind; |
1241 | } |
1242 | |
1243 | for (i = 0; i < cxt.nrealwriters_stress; i++) { |
1244 | cxt.lwsa[i].n_lock_fail = 0; |
1245 | cxt.lwsa[i].n_lock_acquired = 0; |
1246 | } |
1247 | } |
1248 | |
1249 | if (cxt.cur_ops->readlock) { |
1250 | if (nreaders_stress >= 0) |
1251 | cxt.nrealreaders_stress = nreaders_stress; |
1252 | else { |
1253 | /* |
1254 | * By default distribute evenly the number of |
1255 | * readers and writers. We still run the same number |
1256 | * of threads as the writer-only locks default. |
1257 | */ |
1258 | if (nwriters_stress < 0) /* user doesn't care */ |
1259 | cxt.nrealwriters_stress = num_online_cpus(); |
1260 | cxt.nrealreaders_stress = cxt.nrealwriters_stress; |
1261 | } |
1262 | |
1263 | if (nreaders_stress) { |
1264 | cxt.lrsa = kmalloc_array(n: cxt.nrealreaders_stress, |
1265 | size: sizeof(*cxt.lrsa), |
1266 | GFP_KERNEL); |
1267 | if (cxt.lrsa == NULL) { |
1268 | VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory" ); |
1269 | firsterr = -ENOMEM; |
1270 | kfree(objp: cxt.lwsa); |
1271 | cxt.lwsa = NULL; |
1272 | goto unwind; |
1273 | } |
1274 | |
1275 | for (i = 0; i < cxt.nrealreaders_stress; i++) { |
1276 | cxt.lrsa[i].n_lock_fail = 0; |
1277 | cxt.lrsa[i].n_lock_acquired = 0; |
1278 | } |
1279 | } |
1280 | } |
1281 | |
1282 | firsterr = call_rcu_chain_init(); |
1283 | if (torture_init_error(firsterr)) |
1284 | goto unwind; |
1285 | |
1286 | lock_torture_print_module_parms(cur_ops: cxt.cur_ops, tag: "Start of test" ); |
1287 | |
1288 | /* Prepare torture context. */ |
1289 | if (onoff_interval > 0) { |
1290 | firsterr = torture_onoff_init(ooholdoff: onoff_holdoff * HZ, |
1291 | oointerval: onoff_interval * HZ, NULL); |
1292 | if (torture_init_error(firsterr)) |
1293 | goto unwind; |
1294 | } |
1295 | if (shuffle_interval > 0) { |
1296 | firsterr = torture_shuffle_init(shuffint: shuffle_interval); |
1297 | if (torture_init_error(firsterr)) |
1298 | goto unwind; |
1299 | } |
1300 | if (shutdown_secs > 0) { |
1301 | firsterr = torture_shutdown_init(ssecs: shutdown_secs, |
1302 | cleanup: lock_torture_cleanup); |
1303 | if (torture_init_error(firsterr)) |
1304 | goto unwind; |
1305 | } |
1306 | if (stutter > 0) { |
1307 | firsterr = torture_stutter_init(s: stutter, sgap: stutter); |
1308 | if (torture_init_error(firsterr)) |
1309 | goto unwind; |
1310 | } |
1311 | |
1312 | if (nwriters_stress) { |
1313 | writer_tasks = kcalloc(n: cxt.nrealwriters_stress, |
1314 | size: sizeof(writer_tasks[0]), |
1315 | GFP_KERNEL); |
1316 | if (writer_tasks == NULL) { |
1317 | TOROUT_ERRSTRING("writer_tasks: Out of memory" ); |
1318 | firsterr = -ENOMEM; |
1319 | goto unwind; |
1320 | } |
1321 | } |
1322 | |
1323 | /* cap nested_locks to MAX_NESTED_LOCKS */ |
1324 | if (nested_locks > MAX_NESTED_LOCKS) |
1325 | nested_locks = MAX_NESTED_LOCKS; |
1326 | |
1327 | if (cxt.cur_ops->readlock) { |
1328 | reader_tasks = kcalloc(n: cxt.nrealreaders_stress, |
1329 | size: sizeof(reader_tasks[0]), |
1330 | GFP_KERNEL); |
1331 | if (reader_tasks == NULL) { |
1332 | TOROUT_ERRSTRING("reader_tasks: Out of memory" ); |
1333 | kfree(objp: writer_tasks); |
1334 | writer_tasks = NULL; |
1335 | firsterr = -ENOMEM; |
1336 | goto unwind; |
1337 | } |
1338 | } |
1339 | |
1340 | /* |
1341 | * Create the kthreads and start torturing (oh, those poor little locks). |
1342 | * |
1343 | * TODO: Note that we interleave writers with readers, giving writers a |
1344 | * slight advantage, by creating its kthread first. This can be modified |
1345 | * for very specific needs, or even let the user choose the policy, if |
1346 | * ever wanted. |
1347 | */ |
1348 | for (i = 0, j = 0; i < cxt.nrealwriters_stress || |
1349 | j < cxt.nrealreaders_stress; i++, j++) { |
1350 | if (i >= cxt.nrealwriters_stress) |
1351 | goto create_reader; |
1352 | |
1353 | /* Create writer. */ |
1354 | firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i], |
1355 | writer_tasks[i], |
1356 | writer_fifo ? sched_set_fifo : NULL); |
1357 | if (torture_init_error(firsterr)) |
1358 | goto unwind; |
1359 | if (cpumask_nonempty(mask: bind_writers)) |
1360 | torture_sched_setaffinity(pid: writer_tasks[i]->pid, in_mask: bind_writers); |
1361 | |
1362 | create_reader: |
1363 | if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) |
1364 | continue; |
1365 | /* Create reader. */ |
1366 | firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], |
1367 | reader_tasks[j]); |
1368 | if (torture_init_error(firsterr)) |
1369 | goto unwind; |
1370 | if (cpumask_nonempty(mask: bind_readers)) |
1371 | torture_sched_setaffinity(pid: reader_tasks[j]->pid, in_mask: bind_readers); |
1372 | } |
1373 | if (stat_interval > 0) { |
1374 | firsterr = torture_create_kthread(lock_torture_stats, NULL, |
1375 | stats_task); |
1376 | if (torture_init_error(firsterr)) |
1377 | goto unwind; |
1378 | } |
1379 | torture_init_end(); |
1380 | return 0; |
1381 | |
1382 | unwind: |
1383 | torture_init_end(); |
1384 | lock_torture_cleanup(); |
1385 | if (shutdown_secs) { |
1386 | WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST)); |
1387 | kernel_power_off(); |
1388 | } |
1389 | return firsterr; |
1390 | } |
1391 | |
1392 | module_init(lock_torture_init); |
1393 | module_exit(lock_torture_cleanup); |
1394 | |