1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Test module to generate lockups |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/module.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/sched.h> |
11 | #include <linux/sched/signal.h> |
12 | #include <linux/sched/clock.h> |
13 | #include <linux/cpu.h> |
14 | #include <linux/nmi.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/uaccess.h> |
17 | #include <linux/file.h> |
18 | |
19 | static unsigned int time_secs; |
20 | module_param(time_secs, uint, 0600); |
21 | MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0" ); |
22 | |
23 | static unsigned int time_nsecs; |
24 | module_param(time_nsecs, uint, 0600); |
25 | MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0" ); |
26 | |
27 | static unsigned int cooldown_secs; |
28 | module_param(cooldown_secs, uint, 0600); |
29 | MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0" ); |
30 | |
31 | static unsigned int cooldown_nsecs; |
32 | module_param(cooldown_nsecs, uint, 0600); |
33 | MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0" ); |
34 | |
35 | static unsigned int iterations = 1; |
36 | module_param(iterations, uint, 0600); |
37 | MODULE_PARM_DESC(iterations, "lockup iterations, default 1" ); |
38 | |
39 | static bool all_cpus; |
40 | module_param(all_cpus, bool, 0400); |
41 | MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once" ); |
42 | |
43 | static int wait_state; |
44 | static char *state = "R" ; |
45 | module_param(state, charp, 0400); |
46 | MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state" ); |
47 | |
48 | static bool use_hrtimer; |
49 | module_param(use_hrtimer, bool, 0400); |
50 | MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping" ); |
51 | |
52 | static bool iowait; |
53 | module_param(iowait, bool, 0400); |
54 | MODULE_PARM_DESC(iowait, "account sleep time as iowait" ); |
55 | |
56 | static bool lock_read; |
57 | module_param(lock_read, bool, 0400); |
58 | MODULE_PARM_DESC(lock_read, "lock read-write locks for read" ); |
59 | |
60 | static bool lock_single; |
61 | module_param(lock_single, bool, 0400); |
62 | MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu" ); |
63 | |
64 | static bool reacquire_locks; |
65 | module_param(reacquire_locks, bool, 0400); |
66 | MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations" ); |
67 | |
68 | static bool touch_softlockup; |
69 | module_param(touch_softlockup, bool, 0600); |
70 | MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations" ); |
71 | |
72 | static bool touch_hardlockup; |
73 | module_param(touch_hardlockup, bool, 0600); |
74 | MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations" ); |
75 | |
76 | static bool call_cond_resched; |
77 | module_param(call_cond_resched, bool, 0600); |
78 | MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations" ); |
79 | |
80 | static bool measure_lock_wait; |
81 | module_param(measure_lock_wait, bool, 0400); |
82 | MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time" ); |
83 | |
84 | static unsigned long lock_wait_threshold = ULONG_MAX; |
85 | module_param(lock_wait_threshold, ulong, 0400); |
86 | MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off" ); |
87 | |
88 | static bool test_disable_irq; |
89 | module_param_named(disable_irq, test_disable_irq, bool, 0400); |
90 | MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups" ); |
91 | |
92 | static bool disable_softirq; |
93 | module_param(disable_softirq, bool, 0400); |
94 | MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers" ); |
95 | |
96 | static bool disable_preempt; |
97 | module_param(disable_preempt, bool, 0400); |
98 | MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups" ); |
99 | |
100 | static bool lock_rcu; |
101 | module_param(lock_rcu, bool, 0400); |
102 | MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls" ); |
103 | |
104 | static bool lock_mmap_sem; |
105 | module_param(lock_mmap_sem, bool, 0400); |
106 | MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces" ); |
107 | |
108 | static unsigned long lock_rwsem_ptr; |
109 | module_param_unsafe(lock_rwsem_ptr, ulong, 0400); |
110 | MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address" ); |
111 | |
112 | static unsigned long lock_mutex_ptr; |
113 | module_param_unsafe(lock_mutex_ptr, ulong, 0400); |
114 | MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address" ); |
115 | |
116 | static unsigned long lock_spinlock_ptr; |
117 | module_param_unsafe(lock_spinlock_ptr, ulong, 0400); |
118 | MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address" ); |
119 | |
120 | static unsigned long lock_rwlock_ptr; |
121 | module_param_unsafe(lock_rwlock_ptr, ulong, 0400); |
122 | MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address" ); |
123 | |
124 | static unsigned int alloc_pages_nr; |
125 | module_param_unsafe(alloc_pages_nr, uint, 0600); |
126 | MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks" ); |
127 | |
128 | static unsigned int alloc_pages_order; |
129 | module_param(alloc_pages_order, uint, 0400); |
130 | MODULE_PARM_DESC(alloc_pages_order, "page order to allocate" ); |
131 | |
132 | static gfp_t alloc_pages_gfp = GFP_KERNEL; |
133 | module_param_unsafe(alloc_pages_gfp, uint, 0400); |
134 | MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL" ); |
135 | |
136 | static bool alloc_pages_atomic; |
137 | module_param(alloc_pages_atomic, bool, 0400); |
138 | MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC" ); |
139 | |
140 | static bool reallocate_pages; |
141 | module_param(reallocate_pages, bool, 0400); |
142 | MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations" ); |
143 | |
144 | struct file *test_file; |
145 | static struct inode *test_inode; |
146 | static char test_file_path[256]; |
147 | module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400); |
148 | MODULE_PARM_DESC(file_path, "file path to test" ); |
149 | |
150 | static bool test_lock_inode; |
151 | module_param_named(lock_inode, test_lock_inode, bool, 0400); |
152 | MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem" ); |
153 | |
154 | static bool test_lock_mapping; |
155 | module_param_named(lock_mapping, test_lock_mapping, bool, 0400); |
156 | MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem" ); |
157 | |
158 | static bool test_lock_sb_umount; |
159 | module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400); |
160 | MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount" ); |
161 | |
162 | static atomic_t alloc_pages_failed = ATOMIC_INIT(0); |
163 | |
164 | static atomic64_t max_lock_wait = ATOMIC64_INIT(0); |
165 | |
166 | static struct task_struct *main_task; |
167 | static int master_cpu; |
168 | |
169 | static void test_lock(bool master, bool verbose) |
170 | { |
171 | u64 wait_start; |
172 | |
173 | if (measure_lock_wait) |
174 | wait_start = local_clock(); |
175 | |
176 | if (lock_mutex_ptr && master) { |
177 | if (verbose) |
178 | pr_notice("lock mutex %ps\n" , (void *)lock_mutex_ptr); |
179 | mutex_lock((struct mutex *)lock_mutex_ptr); |
180 | } |
181 | |
182 | if (lock_rwsem_ptr && master) { |
183 | if (verbose) |
184 | pr_notice("lock rw_semaphore %ps\n" , |
185 | (void *)lock_rwsem_ptr); |
186 | if (lock_read) |
187 | down_read(sem: (struct rw_semaphore *)lock_rwsem_ptr); |
188 | else |
189 | down_write(sem: (struct rw_semaphore *)lock_rwsem_ptr); |
190 | } |
191 | |
192 | if (lock_mmap_sem && master) { |
193 | if (verbose) |
194 | pr_notice("lock mmap_lock pid=%d\n" , main_task->pid); |
195 | if (lock_read) |
196 | mmap_read_lock(mm: main_task->mm); |
197 | else |
198 | mmap_write_lock(mm: main_task->mm); |
199 | } |
200 | |
201 | if (test_disable_irq) |
202 | local_irq_disable(); |
203 | |
204 | if (disable_softirq) |
205 | local_bh_disable(); |
206 | |
207 | if (disable_preempt) |
208 | preempt_disable(); |
209 | |
210 | if (lock_rcu) |
211 | rcu_read_lock(); |
212 | |
213 | if (lock_spinlock_ptr && master) { |
214 | if (verbose) |
215 | pr_notice("lock spinlock %ps\n" , |
216 | (void *)lock_spinlock_ptr); |
217 | spin_lock(lock: (spinlock_t *)lock_spinlock_ptr); |
218 | } |
219 | |
220 | if (lock_rwlock_ptr && master) { |
221 | if (verbose) |
222 | pr_notice("lock rwlock %ps\n" , |
223 | (void *)lock_rwlock_ptr); |
224 | if (lock_read) |
225 | read_lock((rwlock_t *)lock_rwlock_ptr); |
226 | else |
227 | write_lock((rwlock_t *)lock_rwlock_ptr); |
228 | } |
229 | |
230 | if (measure_lock_wait) { |
231 | s64 cur_wait = local_clock() - wait_start; |
232 | s64 max_wait = atomic64_read(v: &max_lock_wait); |
233 | |
234 | do { |
235 | if (cur_wait < max_wait) |
236 | break; |
237 | max_wait = atomic64_cmpxchg(v: &max_lock_wait, |
238 | old: max_wait, new: cur_wait); |
239 | } while (max_wait != cur_wait); |
240 | |
241 | if (cur_wait > lock_wait_threshold) |
242 | pr_notice_ratelimited("lock wait %lld ns\n" , cur_wait); |
243 | } |
244 | } |
245 | |
246 | static void test_unlock(bool master, bool verbose) |
247 | { |
248 | if (lock_rwlock_ptr && master) { |
249 | if (lock_read) |
250 | read_unlock((rwlock_t *)lock_rwlock_ptr); |
251 | else |
252 | write_unlock((rwlock_t *)lock_rwlock_ptr); |
253 | if (verbose) |
254 | pr_notice("unlock rwlock %ps\n" , |
255 | (void *)lock_rwlock_ptr); |
256 | } |
257 | |
258 | if (lock_spinlock_ptr && master) { |
259 | spin_unlock(lock: (spinlock_t *)lock_spinlock_ptr); |
260 | if (verbose) |
261 | pr_notice("unlock spinlock %ps\n" , |
262 | (void *)lock_spinlock_ptr); |
263 | } |
264 | |
265 | if (lock_rcu) |
266 | rcu_read_unlock(); |
267 | |
268 | if (disable_preempt) |
269 | preempt_enable(); |
270 | |
271 | if (disable_softirq) |
272 | local_bh_enable(); |
273 | |
274 | if (test_disable_irq) |
275 | local_irq_enable(); |
276 | |
277 | if (lock_mmap_sem && master) { |
278 | if (lock_read) |
279 | mmap_read_unlock(mm: main_task->mm); |
280 | else |
281 | mmap_write_unlock(mm: main_task->mm); |
282 | if (verbose) |
283 | pr_notice("unlock mmap_lock pid=%d\n" , main_task->pid); |
284 | } |
285 | |
286 | if (lock_rwsem_ptr && master) { |
287 | if (lock_read) |
288 | up_read(sem: (struct rw_semaphore *)lock_rwsem_ptr); |
289 | else |
290 | up_write(sem: (struct rw_semaphore *)lock_rwsem_ptr); |
291 | if (verbose) |
292 | pr_notice("unlock rw_semaphore %ps\n" , |
293 | (void *)lock_rwsem_ptr); |
294 | } |
295 | |
296 | if (lock_mutex_ptr && master) { |
297 | mutex_unlock(lock: (struct mutex *)lock_mutex_ptr); |
298 | if (verbose) |
299 | pr_notice("unlock mutex %ps\n" , |
300 | (void *)lock_mutex_ptr); |
301 | } |
302 | } |
303 | |
304 | static void test_alloc_pages(struct list_head *pages) |
305 | { |
306 | struct page *page; |
307 | unsigned int i; |
308 | |
309 | for (i = 0; i < alloc_pages_nr; i++) { |
310 | page = alloc_pages(gfp: alloc_pages_gfp, order: alloc_pages_order); |
311 | if (!page) { |
312 | atomic_inc(v: &alloc_pages_failed); |
313 | break; |
314 | } |
315 | list_add(new: &page->lru, head: pages); |
316 | } |
317 | } |
318 | |
319 | static void test_free_pages(struct list_head *pages) |
320 | { |
321 | struct page *page, *next; |
322 | |
323 | list_for_each_entry_safe(page, next, pages, lru) |
324 | __free_pages(page, order: alloc_pages_order); |
325 | INIT_LIST_HEAD(list: pages); |
326 | } |
327 | |
328 | static void test_wait(unsigned int secs, unsigned int nsecs) |
329 | { |
330 | if (wait_state == TASK_RUNNING) { |
331 | if (secs) |
332 | mdelay(secs * MSEC_PER_SEC); |
333 | if (nsecs) |
334 | ndelay(nsecs); |
335 | return; |
336 | } |
337 | |
338 | __set_current_state(wait_state); |
339 | if (use_hrtimer) { |
340 | ktime_t time; |
341 | |
342 | time = ns_to_ktime(ns: (u64)secs * NSEC_PER_SEC + nsecs); |
343 | schedule_hrtimeout(expires: &time, mode: HRTIMER_MODE_REL); |
344 | } else { |
345 | schedule_timeout(timeout: secs * HZ + nsecs_to_jiffies(n: nsecs)); |
346 | } |
347 | } |
348 | |
349 | static void test_lockup(bool master) |
350 | { |
351 | u64 lockup_start = local_clock(); |
352 | unsigned int iter = 0; |
353 | LIST_HEAD(pages); |
354 | |
355 | pr_notice("Start on CPU%d\n" , raw_smp_processor_id()); |
356 | |
357 | test_lock(master, verbose: true); |
358 | |
359 | test_alloc_pages(pages: &pages); |
360 | |
361 | while (iter++ < iterations && !signal_pending(p: main_task)) { |
362 | |
363 | if (iowait) |
364 | current->in_iowait = 1; |
365 | |
366 | test_wait(secs: time_secs, nsecs: time_nsecs); |
367 | |
368 | if (iowait) |
369 | current->in_iowait = 0; |
370 | |
371 | if (reallocate_pages) |
372 | test_free_pages(pages: &pages); |
373 | |
374 | if (reacquire_locks) |
375 | test_unlock(master, verbose: false); |
376 | |
377 | if (touch_softlockup) |
378 | touch_softlockup_watchdog(); |
379 | |
380 | if (touch_hardlockup) |
381 | touch_nmi_watchdog(); |
382 | |
383 | if (call_cond_resched) |
384 | cond_resched(); |
385 | |
386 | test_wait(secs: cooldown_secs, nsecs: cooldown_nsecs); |
387 | |
388 | if (reacquire_locks) |
389 | test_lock(master, verbose: false); |
390 | |
391 | if (reallocate_pages) |
392 | test_alloc_pages(pages: &pages); |
393 | } |
394 | |
395 | pr_notice("Finish on CPU%d in %lld ns\n" , raw_smp_processor_id(), |
396 | local_clock() - lockup_start); |
397 | |
398 | test_free_pages(pages: &pages); |
399 | |
400 | test_unlock(master, verbose: true); |
401 | } |
402 | |
403 | static DEFINE_PER_CPU(struct work_struct, test_works); |
404 | |
405 | static void test_work_fn(struct work_struct *work) |
406 | { |
407 | test_lockup(master: !lock_single || |
408 | work == per_cpu_ptr(&test_works, master_cpu)); |
409 | } |
410 | |
411 | static bool test_kernel_ptr(unsigned long addr, int size) |
412 | { |
413 | void *ptr = (void *)addr; |
414 | char buf; |
415 | |
416 | if (!addr) |
417 | return false; |
418 | |
419 | /* should be at least readable kernel address */ |
420 | if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) && |
421 | (access_ok((void __user *)ptr, 1) || |
422 | access_ok((void __user *)ptr + size - 1, 1))) { |
423 | pr_err("user space ptr invalid in kernel: %#lx\n" , addr); |
424 | return true; |
425 | } |
426 | |
427 | if (get_kernel_nofault(buf, ptr) || |
428 | get_kernel_nofault(buf, ptr + size - 1)) { |
429 | pr_err("invalid kernel ptr: %#lx\n" , addr); |
430 | return true; |
431 | } |
432 | |
433 | return false; |
434 | } |
435 | |
436 | static bool __maybe_unused test_magic(unsigned long addr, int offset, |
437 | unsigned int expected) |
438 | { |
439 | void *ptr = (void *)addr + offset; |
440 | unsigned int magic = 0; |
441 | |
442 | if (!addr) |
443 | return false; |
444 | |
445 | if (get_kernel_nofault(magic, ptr) || magic != expected) { |
446 | pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n" , |
447 | addr, offset, magic, expected); |
448 | return true; |
449 | } |
450 | |
451 | return false; |
452 | } |
453 | |
454 | static int __init test_lockup_init(void) |
455 | { |
456 | u64 test_start = local_clock(); |
457 | |
458 | main_task = current; |
459 | |
460 | switch (state[0]) { |
461 | case 'S': |
462 | wait_state = TASK_INTERRUPTIBLE; |
463 | break; |
464 | case 'D': |
465 | wait_state = TASK_UNINTERRUPTIBLE; |
466 | break; |
467 | case 'K': |
468 | wait_state = TASK_KILLABLE; |
469 | break; |
470 | case 'R': |
471 | wait_state = TASK_RUNNING; |
472 | break; |
473 | default: |
474 | pr_err("unknown state=%s\n" , state); |
475 | return -EINVAL; |
476 | } |
477 | |
478 | if (alloc_pages_atomic) |
479 | alloc_pages_gfp = GFP_ATOMIC; |
480 | |
481 | if (test_kernel_ptr(addr: lock_spinlock_ptr, size: sizeof(spinlock_t)) || |
482 | test_kernel_ptr(addr: lock_rwlock_ptr, size: sizeof(rwlock_t)) || |
483 | test_kernel_ptr(addr: lock_mutex_ptr, size: sizeof(struct mutex)) || |
484 | test_kernel_ptr(addr: lock_rwsem_ptr, size: sizeof(struct rw_semaphore))) |
485 | return -EINVAL; |
486 | |
487 | #ifdef CONFIG_DEBUG_SPINLOCK |
488 | #ifdef CONFIG_PREEMPT_RT |
489 | if (test_magic(lock_spinlock_ptr, |
490 | offsetof(spinlock_t, lock.wait_lock.magic), |
491 | SPINLOCK_MAGIC) || |
492 | test_magic(lock_rwlock_ptr, |
493 | offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic), |
494 | SPINLOCK_MAGIC) || |
495 | test_magic(lock_mutex_ptr, |
496 | offsetof(struct mutex, rtmutex.wait_lock.magic), |
497 | SPINLOCK_MAGIC) || |
498 | test_magic(lock_rwsem_ptr, |
499 | offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic), |
500 | SPINLOCK_MAGIC)) |
501 | return -EINVAL; |
502 | #else |
503 | if (test_magic(addr: lock_spinlock_ptr, |
504 | offsetof(spinlock_t, rlock.magic), |
505 | SPINLOCK_MAGIC) || |
506 | test_magic(addr: lock_rwlock_ptr, |
507 | offsetof(rwlock_t, magic), |
508 | RWLOCK_MAGIC) || |
509 | test_magic(addr: lock_mutex_ptr, |
510 | offsetof(struct mutex, wait_lock.magic), |
511 | SPINLOCK_MAGIC) || |
512 | test_magic(addr: lock_rwsem_ptr, |
513 | offsetof(struct rw_semaphore, wait_lock.magic), |
514 | SPINLOCK_MAGIC)) |
515 | return -EINVAL; |
516 | #endif |
517 | #endif |
518 | |
519 | if ((wait_state != TASK_RUNNING || |
520 | (call_cond_resched && !reacquire_locks) || |
521 | (alloc_pages_nr && gfpflags_allow_blocking(gfp_flags: alloc_pages_gfp))) && |
522 | (test_disable_irq || disable_softirq || disable_preempt || |
523 | lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) { |
524 | pr_err("refuse to sleep in atomic context\n" ); |
525 | return -EINVAL; |
526 | } |
527 | |
528 | if (lock_mmap_sem && !main_task->mm) { |
529 | pr_err("no mm to lock mmap_lock\n" ); |
530 | return -EINVAL; |
531 | } |
532 | |
533 | if (test_file_path[0]) { |
534 | test_file = filp_open(test_file_path, O_RDONLY, 0); |
535 | if (IS_ERR(ptr: test_file)) { |
536 | pr_err("failed to open %s: %ld\n" , test_file_path, PTR_ERR(test_file)); |
537 | return PTR_ERR(ptr: test_file); |
538 | } |
539 | test_inode = file_inode(f: test_file); |
540 | } else if (test_lock_inode || |
541 | test_lock_mapping || |
542 | test_lock_sb_umount) { |
543 | pr_err("no file to lock\n" ); |
544 | return -EINVAL; |
545 | } |
546 | |
547 | if (test_lock_inode && test_inode) |
548 | lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem; |
549 | |
550 | if (test_lock_mapping && test_file && test_file->f_mapping) |
551 | lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem; |
552 | |
553 | if (test_lock_sb_umount && test_inode) |
554 | lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount; |
555 | |
556 | pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n" , |
557 | main_task->pid, time_secs, time_nsecs, |
558 | cooldown_secs, cooldown_nsecs, iterations, state, |
559 | all_cpus ? "all_cpus " : "" , |
560 | iowait ? "iowait " : "" , |
561 | test_disable_irq ? "disable_irq " : "" , |
562 | disable_softirq ? "disable_softirq " : "" , |
563 | disable_preempt ? "disable_preempt " : "" , |
564 | lock_rcu ? "lock_rcu " : "" , |
565 | lock_read ? "lock_read " : "" , |
566 | touch_softlockup ? "touch_softlockup " : "" , |
567 | touch_hardlockup ? "touch_hardlockup " : "" , |
568 | call_cond_resched ? "call_cond_resched " : "" , |
569 | reacquire_locks ? "reacquire_locks " : "" ); |
570 | |
571 | if (alloc_pages_nr) |
572 | pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n" , |
573 | alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp, |
574 | reallocate_pages ? "reallocate_pages " : "" ); |
575 | |
576 | if (all_cpus) { |
577 | unsigned int cpu; |
578 | |
579 | cpus_read_lock(); |
580 | |
581 | preempt_disable(); |
582 | master_cpu = smp_processor_id(); |
583 | for_each_online_cpu(cpu) { |
584 | INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn); |
585 | queue_work_on(cpu, wq: system_highpri_wq, |
586 | per_cpu_ptr(&test_works, cpu)); |
587 | } |
588 | preempt_enable(); |
589 | |
590 | for_each_online_cpu(cpu) |
591 | flush_work(per_cpu_ptr(&test_works, cpu)); |
592 | |
593 | cpus_read_unlock(); |
594 | } else { |
595 | test_lockup(master: true); |
596 | } |
597 | |
598 | if (measure_lock_wait) |
599 | pr_notice("Maximum lock wait: %lld ns\n" , |
600 | atomic64_read(&max_lock_wait)); |
601 | |
602 | if (alloc_pages_nr) |
603 | pr_notice("Page allocation failed %u times\n" , |
604 | atomic_read(&alloc_pages_failed)); |
605 | |
606 | pr_notice("FINISH in %llu ns\n" , local_clock() - test_start); |
607 | |
608 | if (test_file) |
609 | fput(test_file); |
610 | |
611 | if (signal_pending(p: main_task)) |
612 | return -EINTR; |
613 | |
614 | return -EAGAIN; |
615 | } |
616 | module_init(test_lockup_init); |
617 | |
618 | MODULE_LICENSE("GPL" ); |
619 | MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>" ); |
620 | MODULE_DESCRIPTION("Test module to generate lockups" ); |
621 | |