1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Detect hard and soft lockups on a system |
4 | * |
5 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
6 | * |
7 | * Note: Most of this code is borrowed heavily from the original softlockup |
8 | * detector, so thanks to Ingo for the initial implementation. |
9 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
10 | * to those contributors as well. |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) "watchdog: " fmt |
14 | |
15 | #include <linux/mm.h> |
16 | #include <linux/cpu.h> |
17 | #include <linux/nmi.h> |
18 | #include <linux/init.h> |
19 | #include <linux/module.h> |
20 | #include <linux/sysctl.h> |
21 | #include <linux/tick.h> |
22 | #include <linux/sched/clock.h> |
23 | #include <linux/sched/debug.h> |
24 | #include <linux/sched/isolation.h> |
25 | #include <linux/stop_machine.h> |
26 | |
27 | #include <asm/irq_regs.h> |
28 | #include <linux/kvm_para.h> |
29 | |
30 | static DEFINE_MUTEX(watchdog_mutex); |
31 | |
32 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64) |
33 | # define WATCHDOG_HARDLOCKUP_DEFAULT 1 |
34 | #else |
35 | # define WATCHDOG_HARDLOCKUP_DEFAULT 0 |
36 | #endif |
37 | |
38 | unsigned long __read_mostly watchdog_enabled; |
39 | int __read_mostly watchdog_user_enabled = 1; |
40 | static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT; |
41 | static int __read_mostly watchdog_softlockup_user_enabled = 1; |
42 | int __read_mostly watchdog_thresh = 10; |
43 | static int __read_mostly watchdog_hardlockup_available; |
44 | |
45 | struct cpumask watchdog_cpumask __read_mostly; |
46 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); |
47 | |
48 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
49 | |
50 | # ifdef CONFIG_SMP |
51 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
52 | # endif /* CONFIG_SMP */ |
53 | |
54 | /* |
55 | * Should we panic when a soft-lockup or hard-lockup occurs: |
56 | */ |
57 | unsigned int __read_mostly hardlockup_panic = |
58 | IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC); |
59 | /* |
60 | * We may not want to enable hard lockup detection by default in all cases, |
61 | * for example when running the kernel as a guest on a hypervisor. In these |
62 | * cases this function can be called to disable hard lockup detection. This |
63 | * function should only be executed once by the boot processor before the |
64 | * kernel command line parameters are parsed, because otherwise it is not |
65 | * possible to override this in hardlockup_panic_setup(). |
66 | */ |
67 | void __init hardlockup_detector_disable(void) |
68 | { |
69 | watchdog_hardlockup_user_enabled = 0; |
70 | } |
71 | |
72 | static int __init hardlockup_panic_setup(char *str) |
73 | { |
74 | if (!strncmp(str, "panic" , 5)) |
75 | hardlockup_panic = 1; |
76 | else if (!strncmp(str, "nopanic" , 7)) |
77 | hardlockup_panic = 0; |
78 | else if (!strncmp(str, "0" , 1)) |
79 | watchdog_hardlockup_user_enabled = 0; |
80 | else if (!strncmp(str, "1" , 1)) |
81 | watchdog_hardlockup_user_enabled = 1; |
82 | return 1; |
83 | } |
84 | __setup("nmi_watchdog=" , hardlockup_panic_setup); |
85 | |
86 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
87 | |
88 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER) |
89 | |
90 | static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts); |
91 | static DEFINE_PER_CPU(int, hrtimer_interrupts_saved); |
92 | static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned); |
93 | static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched); |
94 | static unsigned long watchdog_hardlockup_all_cpu_dumped; |
95 | |
96 | notrace void arch_touch_nmi_watchdog(void) |
97 | { |
98 | /* |
99 | * Using __raw here because some code paths have |
100 | * preemption enabled. If preemption is enabled |
101 | * then interrupts should be enabled too, in which |
102 | * case we shouldn't have to worry about the watchdog |
103 | * going off. |
104 | */ |
105 | raw_cpu_write(watchdog_hardlockup_touched, true); |
106 | } |
107 | EXPORT_SYMBOL(arch_touch_nmi_watchdog); |
108 | |
109 | void watchdog_hardlockup_touch_cpu(unsigned int cpu) |
110 | { |
111 | per_cpu(watchdog_hardlockup_touched, cpu) = true; |
112 | } |
113 | |
114 | static bool is_hardlockup(unsigned int cpu) |
115 | { |
116 | int hrint = atomic_read(v: &per_cpu(hrtimer_interrupts, cpu)); |
117 | |
118 | if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint) |
119 | return true; |
120 | |
121 | /* |
122 | * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE |
123 | * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is |
124 | * written/read by a single CPU. |
125 | */ |
126 | per_cpu(hrtimer_interrupts_saved, cpu) = hrint; |
127 | |
128 | return false; |
129 | } |
130 | |
131 | static void watchdog_hardlockup_kick(void) |
132 | { |
133 | int new_interrupts; |
134 | |
135 | new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts)); |
136 | watchdog_buddy_check_hardlockup(hrtimer_interrupts: new_interrupts); |
137 | } |
138 | |
139 | void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) |
140 | { |
141 | if (per_cpu(watchdog_hardlockup_touched, cpu)) { |
142 | per_cpu(watchdog_hardlockup_touched, cpu) = false; |
143 | return; |
144 | } |
145 | |
146 | /* |
147 | * Check for a hardlockup by making sure the CPU's timer |
148 | * interrupt is incrementing. The timer interrupt should have |
149 | * fired multiple times before we overflow'd. If it hasn't |
150 | * then this is a good indication the cpu is stuck |
151 | */ |
152 | if (is_hardlockup(cpu)) { |
153 | unsigned int this_cpu = smp_processor_id(); |
154 | |
155 | /* Only print hardlockups once. */ |
156 | if (per_cpu(watchdog_hardlockup_warned, cpu)) |
157 | return; |
158 | |
159 | pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n" , cpu); |
160 | print_modules(); |
161 | print_irqtrace_events(current); |
162 | if (cpu == this_cpu) { |
163 | if (regs) |
164 | show_regs(regs); |
165 | else |
166 | dump_stack(); |
167 | } else { |
168 | trigger_single_cpu_backtrace(cpu); |
169 | } |
170 | |
171 | /* |
172 | * Perform multi-CPU dump only once to avoid multiple |
173 | * hardlockups generating interleaving traces |
174 | */ |
175 | if (sysctl_hardlockup_all_cpu_backtrace && |
176 | !test_and_set_bit(nr: 0, addr: &watchdog_hardlockup_all_cpu_dumped)) |
177 | trigger_allbutcpu_cpu_backtrace(exclude_cpu: cpu); |
178 | |
179 | if (hardlockup_panic) |
180 | nmi_panic(regs, msg: "Hard LOCKUP" ); |
181 | |
182 | per_cpu(watchdog_hardlockup_warned, cpu) = true; |
183 | } else { |
184 | per_cpu(watchdog_hardlockup_warned, cpu) = false; |
185 | } |
186 | } |
187 | |
188 | #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */ |
189 | |
190 | static inline void watchdog_hardlockup_kick(void) { } |
191 | |
192 | #endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */ |
193 | |
194 | /* |
195 | * These functions can be overridden based on the configured hardlockdup detector. |
196 | * |
197 | * watchdog_hardlockup_enable/disable can be implemented to start and stop when |
198 | * softlockup watchdog start and stop. The detector must select the |
199 | * SOFTLOCKUP_DETECTOR Kconfig. |
200 | */ |
201 | void __weak watchdog_hardlockup_enable(unsigned int cpu) { } |
202 | |
203 | void __weak watchdog_hardlockup_disable(unsigned int cpu) { } |
204 | |
205 | /* |
206 | * Watchdog-detector specific API. |
207 | * |
208 | * Return 0 when hardlockup watchdog is available, negative value otherwise. |
209 | * Note that the negative value means that a delayed probe might |
210 | * succeed later. |
211 | */ |
212 | int __weak __init watchdog_hardlockup_probe(void) |
213 | { |
214 | return -ENODEV; |
215 | } |
216 | |
217 | /** |
218 | * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration |
219 | * |
220 | * The reconfiguration steps are: |
221 | * watchdog_hardlockup_stop(); |
222 | * update_variables(); |
223 | * watchdog_hardlockup_start(); |
224 | */ |
225 | void __weak watchdog_hardlockup_stop(void) { } |
226 | |
227 | /** |
228 | * watchdog_hardlockup_start - Start the watchdog after reconfiguration |
229 | * |
230 | * Counterpart to watchdog_hardlockup_stop(). |
231 | * |
232 | * The following variables have been updated in update_variables() and |
233 | * contain the currently valid configuration: |
234 | * - watchdog_enabled |
235 | * - watchdog_thresh |
236 | * - watchdog_cpumask |
237 | */ |
238 | void __weak watchdog_hardlockup_start(void) { } |
239 | |
240 | /** |
241 | * lockup_detector_update_enable - Update the sysctl enable bit |
242 | * |
243 | * Caller needs to make sure that the hard watchdogs are off, so this |
244 | * can't race with watchdog_hardlockup_disable(). |
245 | */ |
246 | static void lockup_detector_update_enable(void) |
247 | { |
248 | watchdog_enabled = 0; |
249 | if (!watchdog_user_enabled) |
250 | return; |
251 | if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled) |
252 | watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED; |
253 | if (watchdog_softlockup_user_enabled) |
254 | watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED; |
255 | } |
256 | |
257 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
258 | |
259 | /* |
260 | * Delay the soflockup report when running a known slow code. |
261 | * It does _not_ affect the timestamp of the last successdul reschedule. |
262 | */ |
263 | #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX |
264 | |
265 | #ifdef CONFIG_SMP |
266 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; |
267 | #endif |
268 | |
269 | static struct cpumask watchdog_allowed_mask __read_mostly; |
270 | |
271 | /* Global variables, exported for sysctl */ |
272 | unsigned int __read_mostly softlockup_panic = |
273 | IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC); |
274 | |
275 | static bool softlockup_initialized __read_mostly; |
276 | static u64 __read_mostly sample_period; |
277 | |
278 | /* Timestamp taken after the last successful reschedule. */ |
279 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
280 | /* Timestamp of the last softlockup report. */ |
281 | static DEFINE_PER_CPU(unsigned long, watchdog_report_ts); |
282 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
283 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
284 | static unsigned long soft_lockup_nmi_warn; |
285 | |
286 | static int __init softlockup_panic_setup(char *str) |
287 | { |
288 | softlockup_panic = simple_strtoul(str, NULL, 0); |
289 | return 1; |
290 | } |
291 | __setup("softlockup_panic=" , softlockup_panic_setup); |
292 | |
293 | static int __init nowatchdog_setup(char *str) |
294 | { |
295 | watchdog_user_enabled = 0; |
296 | return 1; |
297 | } |
298 | __setup("nowatchdog" , nowatchdog_setup); |
299 | |
300 | static int __init nosoftlockup_setup(char *str) |
301 | { |
302 | watchdog_softlockup_user_enabled = 0; |
303 | return 1; |
304 | } |
305 | __setup("nosoftlockup" , nosoftlockup_setup); |
306 | |
307 | static int __init watchdog_thresh_setup(char *str) |
308 | { |
309 | get_option(str: &str, pint: &watchdog_thresh); |
310 | return 1; |
311 | } |
312 | __setup("watchdog_thresh=" , watchdog_thresh_setup); |
313 | |
314 | static void __lockup_detector_cleanup(void); |
315 | |
316 | /* |
317 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
318 | * lockups can have false positives under extreme conditions. So we generally |
319 | * want a higher threshold for soft lockups than for hard lockups. So we couple |
320 | * the thresholds with a factor: we make the soft threshold twice the amount of |
321 | * time the hard threshold is. |
322 | */ |
323 | static int get_softlockup_thresh(void) |
324 | { |
325 | return watchdog_thresh * 2; |
326 | } |
327 | |
328 | /* |
329 | * Returns seconds, approximately. We don't need nanosecond |
330 | * resolution, and we don't need to waste time with a big divide when |
331 | * 2^30ns == 1.074s. |
332 | */ |
333 | static unsigned long get_timestamp(void) |
334 | { |
335 | return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ |
336 | } |
337 | |
338 | static void set_sample_period(void) |
339 | { |
340 | /* |
341 | * convert watchdog_thresh from seconds to ns |
342 | * the divide by 5 is to give hrtimer several chances (two |
343 | * or three with the current relation between the soft |
344 | * and hard thresholds) to increment before the |
345 | * hardlockup detector generates a warning |
346 | */ |
347 | sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); |
348 | watchdog_update_hrtimer_threshold(period: sample_period); |
349 | } |
350 | |
351 | static void update_report_ts(void) |
352 | { |
353 | __this_cpu_write(watchdog_report_ts, get_timestamp()); |
354 | } |
355 | |
356 | /* Commands for resetting the watchdog */ |
357 | static void update_touch_ts(void) |
358 | { |
359 | __this_cpu_write(watchdog_touch_ts, get_timestamp()); |
360 | update_report_ts(); |
361 | } |
362 | |
363 | /** |
364 | * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls |
365 | * |
366 | * Call when the scheduler may have stalled for legitimate reasons |
367 | * preventing the watchdog task from executing - e.g. the scheduler |
368 | * entering idle state. This should only be used for scheduler events. |
369 | * Use touch_softlockup_watchdog() for everything else. |
370 | */ |
371 | notrace void touch_softlockup_watchdog_sched(void) |
372 | { |
373 | /* |
374 | * Preemption can be enabled. It doesn't matter which CPU's watchdog |
375 | * report period gets restarted here, so use the raw_ operation. |
376 | */ |
377 | raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT); |
378 | } |
379 | |
380 | notrace void touch_softlockup_watchdog(void) |
381 | { |
382 | touch_softlockup_watchdog_sched(); |
383 | wq_watchdog_touch(raw_smp_processor_id()); |
384 | } |
385 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
386 | |
387 | void touch_all_softlockup_watchdogs(void) |
388 | { |
389 | int cpu; |
390 | |
391 | /* |
392 | * watchdog_mutex cannpt be taken here, as this might be called |
393 | * from (soft)interrupt context, so the access to |
394 | * watchdog_allowed_cpumask might race with a concurrent update. |
395 | * |
396 | * The watchdog time stamp can race against a concurrent real |
397 | * update as well, the only side effect might be a cycle delay for |
398 | * the softlockup check. |
399 | */ |
400 | for_each_cpu(cpu, &watchdog_allowed_mask) { |
401 | per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT; |
402 | wq_watchdog_touch(cpu); |
403 | } |
404 | } |
405 | |
406 | void touch_softlockup_watchdog_sync(void) |
407 | { |
408 | __this_cpu_write(softlockup_touch_sync, true); |
409 | __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT); |
410 | } |
411 | |
412 | static int is_softlockup(unsigned long touch_ts, |
413 | unsigned long period_ts, |
414 | unsigned long now) |
415 | { |
416 | if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) { |
417 | /* Warn about unreasonable delays. */ |
418 | if (time_after(now, period_ts + get_softlockup_thresh())) |
419 | return now - touch_ts; |
420 | } |
421 | return 0; |
422 | } |
423 | |
424 | /* watchdog detector functions */ |
425 | static DEFINE_PER_CPU(struct completion, softlockup_completion); |
426 | static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work); |
427 | |
428 | /* |
429 | * The watchdog feed function - touches the timestamp. |
430 | * |
431 | * It only runs once every sample_period seconds (4 seconds by |
432 | * default) to reset the softlockup timestamp. If this gets delayed |
433 | * for more than 2*watchdog_thresh seconds then the debug-printout |
434 | * triggers in watchdog_timer_fn(). |
435 | */ |
436 | static int softlockup_fn(void *data) |
437 | { |
438 | update_touch_ts(); |
439 | complete(this_cpu_ptr(&softlockup_completion)); |
440 | |
441 | return 0; |
442 | } |
443 | |
444 | /* watchdog kicker functions */ |
445 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
446 | { |
447 | unsigned long touch_ts, period_ts, now; |
448 | struct pt_regs *regs = get_irq_regs(); |
449 | int duration; |
450 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; |
451 | |
452 | if (!watchdog_enabled) |
453 | return HRTIMER_NORESTART; |
454 | |
455 | watchdog_hardlockup_kick(); |
456 | |
457 | /* kick the softlockup detector */ |
458 | if (completion_done(this_cpu_ptr(&softlockup_completion))) { |
459 | reinit_completion(this_cpu_ptr(&softlockup_completion)); |
460 | stop_one_cpu_nowait(smp_processor_id(), |
461 | fn: softlockup_fn, NULL, |
462 | this_cpu_ptr(&softlockup_stop_work)); |
463 | } |
464 | |
465 | /* .. and repeat */ |
466 | hrtimer_forward_now(timer: hrtimer, interval: ns_to_ktime(ns: sample_period)); |
467 | |
468 | /* |
469 | * Read the current timestamp first. It might become invalid anytime |
470 | * when a virtual machine is stopped by the host or when the watchog |
471 | * is touched from NMI. |
472 | */ |
473 | now = get_timestamp(); |
474 | /* |
475 | * If a virtual machine is stopped by the host it can look to |
476 | * the watchdog like a soft lockup. This function touches the watchdog. |
477 | */ |
478 | kvm_check_and_clear_guest_paused(); |
479 | /* |
480 | * The stored timestamp is comparable with @now only when not touched. |
481 | * It might get touched anytime from NMI. Make sure that is_softlockup() |
482 | * uses the same (valid) value. |
483 | */ |
484 | period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts)); |
485 | |
486 | /* Reset the interval when touched by known problematic code. */ |
487 | if (period_ts == SOFTLOCKUP_DELAY_REPORT) { |
488 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
489 | /* |
490 | * If the time stamp was touched atomically |
491 | * make sure the scheduler tick is up to date. |
492 | */ |
493 | __this_cpu_write(softlockup_touch_sync, false); |
494 | sched_clock_tick(); |
495 | } |
496 | |
497 | update_report_ts(); |
498 | return HRTIMER_RESTART; |
499 | } |
500 | |
501 | /* Check for a softlockup. */ |
502 | touch_ts = __this_cpu_read(watchdog_touch_ts); |
503 | duration = is_softlockup(touch_ts, period_ts, now); |
504 | if (unlikely(duration)) { |
505 | /* |
506 | * Prevent multiple soft-lockup reports if one cpu is already |
507 | * engaged in dumping all cpu back traces. |
508 | */ |
509 | if (softlockup_all_cpu_backtrace) { |
510 | if (test_and_set_bit_lock(nr: 0, addr: &soft_lockup_nmi_warn)) |
511 | return HRTIMER_RESTART; |
512 | } |
513 | |
514 | /* Start period for the next softlockup warning. */ |
515 | update_report_ts(); |
516 | |
517 | pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n" , |
518 | smp_processor_id(), duration, |
519 | current->comm, task_pid_nr(current)); |
520 | print_modules(); |
521 | print_irqtrace_events(current); |
522 | if (regs) |
523 | show_regs(regs); |
524 | else |
525 | dump_stack(); |
526 | |
527 | if (softlockup_all_cpu_backtrace) { |
528 | trigger_allbutcpu_cpu_backtrace(smp_processor_id()); |
529 | clear_bit_unlock(nr: 0, addr: &soft_lockup_nmi_warn); |
530 | } |
531 | |
532 | add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); |
533 | if (softlockup_panic) |
534 | panic(fmt: "softlockup: hung tasks" ); |
535 | } |
536 | |
537 | return HRTIMER_RESTART; |
538 | } |
539 | |
540 | static void watchdog_enable(unsigned int cpu) |
541 | { |
542 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
543 | struct completion *done = this_cpu_ptr(&softlockup_completion); |
544 | |
545 | WARN_ON_ONCE(cpu != smp_processor_id()); |
546 | |
547 | init_completion(x: done); |
548 | complete(done); |
549 | |
550 | /* |
551 | * Start the timer first to prevent the hardlockup watchdog triggering |
552 | * before the timer has a chance to fire. |
553 | */ |
554 | hrtimer_init(timer: hrtimer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL_HARD); |
555 | hrtimer->function = watchdog_timer_fn; |
556 | hrtimer_start(timer: hrtimer, tim: ns_to_ktime(ns: sample_period), |
557 | mode: HRTIMER_MODE_REL_PINNED_HARD); |
558 | |
559 | /* Initialize timestamp */ |
560 | update_touch_ts(); |
561 | /* Enable the hardlockup detector */ |
562 | if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) |
563 | watchdog_hardlockup_enable(cpu); |
564 | } |
565 | |
566 | static void watchdog_disable(unsigned int cpu) |
567 | { |
568 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
569 | |
570 | WARN_ON_ONCE(cpu != smp_processor_id()); |
571 | |
572 | /* |
573 | * Disable the hardlockup detector first. That prevents that a large |
574 | * delay between disabling the timer and disabling the hardlockup |
575 | * detector causes a false positive. |
576 | */ |
577 | watchdog_hardlockup_disable(cpu); |
578 | hrtimer_cancel(timer: hrtimer); |
579 | wait_for_completion(this_cpu_ptr(&softlockup_completion)); |
580 | } |
581 | |
582 | static int softlockup_stop_fn(void *data) |
583 | { |
584 | watchdog_disable(smp_processor_id()); |
585 | return 0; |
586 | } |
587 | |
588 | static void softlockup_stop_all(void) |
589 | { |
590 | int cpu; |
591 | |
592 | if (!softlockup_initialized) |
593 | return; |
594 | |
595 | for_each_cpu(cpu, &watchdog_allowed_mask) |
596 | smp_call_on_cpu(cpu, func: softlockup_stop_fn, NULL, phys: false); |
597 | |
598 | cpumask_clear(dstp: &watchdog_allowed_mask); |
599 | } |
600 | |
601 | static int softlockup_start_fn(void *data) |
602 | { |
603 | watchdog_enable(smp_processor_id()); |
604 | return 0; |
605 | } |
606 | |
607 | static void softlockup_start_all(void) |
608 | { |
609 | int cpu; |
610 | |
611 | cpumask_copy(dstp: &watchdog_allowed_mask, srcp: &watchdog_cpumask); |
612 | for_each_cpu(cpu, &watchdog_allowed_mask) |
613 | smp_call_on_cpu(cpu, func: softlockup_start_fn, NULL, phys: false); |
614 | } |
615 | |
616 | int lockup_detector_online_cpu(unsigned int cpu) |
617 | { |
618 | if (cpumask_test_cpu(cpu, cpumask: &watchdog_allowed_mask)) |
619 | watchdog_enable(cpu); |
620 | return 0; |
621 | } |
622 | |
623 | int lockup_detector_offline_cpu(unsigned int cpu) |
624 | { |
625 | if (cpumask_test_cpu(cpu, cpumask: &watchdog_allowed_mask)) |
626 | watchdog_disable(cpu); |
627 | return 0; |
628 | } |
629 | |
630 | static void __lockup_detector_reconfigure(void) |
631 | { |
632 | cpus_read_lock(); |
633 | watchdog_hardlockup_stop(); |
634 | |
635 | softlockup_stop_all(); |
636 | set_sample_period(); |
637 | lockup_detector_update_enable(); |
638 | if (watchdog_enabled && watchdog_thresh) |
639 | softlockup_start_all(); |
640 | |
641 | watchdog_hardlockup_start(); |
642 | cpus_read_unlock(); |
643 | /* |
644 | * Must be called outside the cpus locked section to prevent |
645 | * recursive locking in the perf code. |
646 | */ |
647 | __lockup_detector_cleanup(); |
648 | } |
649 | |
650 | void lockup_detector_reconfigure(void) |
651 | { |
652 | mutex_lock(&watchdog_mutex); |
653 | __lockup_detector_reconfigure(); |
654 | mutex_unlock(lock: &watchdog_mutex); |
655 | } |
656 | |
657 | /* |
658 | * Create the watchdog infrastructure and configure the detector(s). |
659 | */ |
660 | static __init void lockup_detector_setup(void) |
661 | { |
662 | /* |
663 | * If sysctl is off and watchdog got disabled on the command line, |
664 | * nothing to do here. |
665 | */ |
666 | lockup_detector_update_enable(); |
667 | |
668 | if (!IS_ENABLED(CONFIG_SYSCTL) && |
669 | !(watchdog_enabled && watchdog_thresh)) |
670 | return; |
671 | |
672 | mutex_lock(&watchdog_mutex); |
673 | __lockup_detector_reconfigure(); |
674 | softlockup_initialized = true; |
675 | mutex_unlock(lock: &watchdog_mutex); |
676 | } |
677 | |
678 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
679 | static void __lockup_detector_reconfigure(void) |
680 | { |
681 | cpus_read_lock(); |
682 | watchdog_hardlockup_stop(); |
683 | lockup_detector_update_enable(); |
684 | watchdog_hardlockup_start(); |
685 | cpus_read_unlock(); |
686 | } |
687 | void lockup_detector_reconfigure(void) |
688 | { |
689 | __lockup_detector_reconfigure(); |
690 | } |
691 | static inline void lockup_detector_setup(void) |
692 | { |
693 | __lockup_detector_reconfigure(); |
694 | } |
695 | #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ |
696 | |
697 | static void __lockup_detector_cleanup(void) |
698 | { |
699 | lockdep_assert_held(&watchdog_mutex); |
700 | hardlockup_detector_perf_cleanup(); |
701 | } |
702 | |
703 | /** |
704 | * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes |
705 | * |
706 | * Caller must not hold the cpu hotplug rwsem. |
707 | */ |
708 | void lockup_detector_cleanup(void) |
709 | { |
710 | mutex_lock(&watchdog_mutex); |
711 | __lockup_detector_cleanup(); |
712 | mutex_unlock(lock: &watchdog_mutex); |
713 | } |
714 | |
715 | /** |
716 | * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) |
717 | * |
718 | * Special interface for parisc. It prevents lockup detector warnings from |
719 | * the default pm_poweroff() function which busy loops forever. |
720 | */ |
721 | void lockup_detector_soft_poweroff(void) |
722 | { |
723 | watchdog_enabled = 0; |
724 | } |
725 | |
726 | #ifdef CONFIG_SYSCTL |
727 | |
728 | /* Propagate any changes to the watchdog infrastructure */ |
729 | static void proc_watchdog_update(void) |
730 | { |
731 | /* Remove impossible cpus to keep sysctl output clean. */ |
732 | cpumask_and(dstp: &watchdog_cpumask, src1p: &watchdog_cpumask, cpu_possible_mask); |
733 | __lockup_detector_reconfigure(); |
734 | } |
735 | |
736 | /* |
737 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter |
738 | * |
739 | * caller | table->data points to | 'which' |
740 | * -------------------|----------------------------------|------------------------------- |
741 | * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED | |
742 | * | | WATCHDOG_SOFTOCKUP_ENABLED |
743 | * -------------------|----------------------------------|------------------------------- |
744 | * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
745 | * -------------------|----------------------------------|------------------------------- |
746 | * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED |
747 | */ |
748 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
749 | void *buffer, size_t *lenp, loff_t *ppos) |
750 | { |
751 | int err, old, *param = table->data; |
752 | |
753 | mutex_lock(&watchdog_mutex); |
754 | |
755 | if (!write) { |
756 | /* |
757 | * On read synchronize the userspace interface. This is a |
758 | * racy snapshot. |
759 | */ |
760 | *param = (watchdog_enabled & which) != 0; |
761 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
762 | } else { |
763 | old = READ_ONCE(*param); |
764 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
765 | if (!err && old != READ_ONCE(*param)) |
766 | proc_watchdog_update(); |
767 | } |
768 | mutex_unlock(lock: &watchdog_mutex); |
769 | return err; |
770 | } |
771 | |
772 | /* |
773 | * /proc/sys/kernel/watchdog |
774 | */ |
775 | int proc_watchdog(struct ctl_table *table, int write, |
776 | void *buffer, size_t *lenp, loff_t *ppos) |
777 | { |
778 | return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED | |
779 | WATCHDOG_SOFTOCKUP_ENABLED, |
780 | table, write, buffer, lenp, ppos); |
781 | } |
782 | |
783 | /* |
784 | * /proc/sys/kernel/nmi_watchdog |
785 | */ |
786 | int proc_nmi_watchdog(struct ctl_table *table, int write, |
787 | void *buffer, size_t *lenp, loff_t *ppos) |
788 | { |
789 | if (!watchdog_hardlockup_available && write) |
790 | return -ENOTSUPP; |
791 | return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED, |
792 | table, write, buffer, lenp, ppos); |
793 | } |
794 | |
795 | /* |
796 | * /proc/sys/kernel/soft_watchdog |
797 | */ |
798 | int proc_soft_watchdog(struct ctl_table *table, int write, |
799 | void *buffer, size_t *lenp, loff_t *ppos) |
800 | { |
801 | return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED, |
802 | table, write, buffer, lenp, ppos); |
803 | } |
804 | |
805 | /* |
806 | * /proc/sys/kernel/watchdog_thresh |
807 | */ |
808 | int proc_watchdog_thresh(struct ctl_table *table, int write, |
809 | void *buffer, size_t *lenp, loff_t *ppos) |
810 | { |
811 | int err, old; |
812 | |
813 | mutex_lock(&watchdog_mutex); |
814 | |
815 | old = READ_ONCE(watchdog_thresh); |
816 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
817 | |
818 | if (!err && write && old != READ_ONCE(watchdog_thresh)) |
819 | proc_watchdog_update(); |
820 | |
821 | mutex_unlock(lock: &watchdog_mutex); |
822 | return err; |
823 | } |
824 | |
825 | /* |
826 | * The cpumask is the mask of possible cpus that the watchdog can run |
827 | * on, not the mask of cpus it is actually running on. This allows the |
828 | * user to specify a mask that will include cpus that have not yet |
829 | * been brought online, if desired. |
830 | */ |
831 | int proc_watchdog_cpumask(struct ctl_table *table, int write, |
832 | void *buffer, size_t *lenp, loff_t *ppos) |
833 | { |
834 | int err; |
835 | |
836 | mutex_lock(&watchdog_mutex); |
837 | |
838 | err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); |
839 | if (!err && write) |
840 | proc_watchdog_update(); |
841 | |
842 | mutex_unlock(lock: &watchdog_mutex); |
843 | return err; |
844 | } |
845 | |
846 | static const int sixty = 60; |
847 | |
848 | static struct ctl_table watchdog_sysctls[] = { |
849 | { |
850 | .procname = "watchdog" , |
851 | .data = &watchdog_user_enabled, |
852 | .maxlen = sizeof(int), |
853 | .mode = 0644, |
854 | .proc_handler = proc_watchdog, |
855 | .extra1 = SYSCTL_ZERO, |
856 | .extra2 = SYSCTL_ONE, |
857 | }, |
858 | { |
859 | .procname = "watchdog_thresh" , |
860 | .data = &watchdog_thresh, |
861 | .maxlen = sizeof(int), |
862 | .mode = 0644, |
863 | .proc_handler = proc_watchdog_thresh, |
864 | .extra1 = SYSCTL_ZERO, |
865 | .extra2 = (void *)&sixty, |
866 | }, |
867 | { |
868 | .procname = "watchdog_cpumask" , |
869 | .data = &watchdog_cpumask_bits, |
870 | .maxlen = NR_CPUS, |
871 | .mode = 0644, |
872 | .proc_handler = proc_watchdog_cpumask, |
873 | }, |
874 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
875 | { |
876 | .procname = "soft_watchdog" , |
877 | .data = &watchdog_softlockup_user_enabled, |
878 | .maxlen = sizeof(int), |
879 | .mode = 0644, |
880 | .proc_handler = proc_soft_watchdog, |
881 | .extra1 = SYSCTL_ZERO, |
882 | .extra2 = SYSCTL_ONE, |
883 | }, |
884 | { |
885 | .procname = "softlockup_panic" , |
886 | .data = &softlockup_panic, |
887 | .maxlen = sizeof(int), |
888 | .mode = 0644, |
889 | .proc_handler = proc_dointvec_minmax, |
890 | .extra1 = SYSCTL_ZERO, |
891 | .extra2 = SYSCTL_ONE, |
892 | }, |
893 | #ifdef CONFIG_SMP |
894 | { |
895 | .procname = "softlockup_all_cpu_backtrace" , |
896 | .data = &sysctl_softlockup_all_cpu_backtrace, |
897 | .maxlen = sizeof(int), |
898 | .mode = 0644, |
899 | .proc_handler = proc_dointvec_minmax, |
900 | .extra1 = SYSCTL_ZERO, |
901 | .extra2 = SYSCTL_ONE, |
902 | }, |
903 | #endif /* CONFIG_SMP */ |
904 | #endif |
905 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
906 | { |
907 | .procname = "hardlockup_panic" , |
908 | .data = &hardlockup_panic, |
909 | .maxlen = sizeof(int), |
910 | .mode = 0644, |
911 | .proc_handler = proc_dointvec_minmax, |
912 | .extra1 = SYSCTL_ZERO, |
913 | .extra2 = SYSCTL_ONE, |
914 | }, |
915 | #ifdef CONFIG_SMP |
916 | { |
917 | .procname = "hardlockup_all_cpu_backtrace" , |
918 | .data = &sysctl_hardlockup_all_cpu_backtrace, |
919 | .maxlen = sizeof(int), |
920 | .mode = 0644, |
921 | .proc_handler = proc_dointvec_minmax, |
922 | .extra1 = SYSCTL_ZERO, |
923 | .extra2 = SYSCTL_ONE, |
924 | }, |
925 | #endif /* CONFIG_SMP */ |
926 | #endif |
927 | {} |
928 | }; |
929 | |
930 | static struct ctl_table watchdog_hardlockup_sysctl[] = { |
931 | { |
932 | .procname = "nmi_watchdog" , |
933 | .data = &watchdog_hardlockup_user_enabled, |
934 | .maxlen = sizeof(int), |
935 | .mode = 0444, |
936 | .proc_handler = proc_nmi_watchdog, |
937 | .extra1 = SYSCTL_ZERO, |
938 | .extra2 = SYSCTL_ONE, |
939 | }, |
940 | {} |
941 | }; |
942 | |
943 | static void __init watchdog_sysctl_init(void) |
944 | { |
945 | register_sysctl_init("kernel" , watchdog_sysctls); |
946 | |
947 | if (watchdog_hardlockup_available) |
948 | watchdog_hardlockup_sysctl[0].mode = 0644; |
949 | register_sysctl_init("kernel" , watchdog_hardlockup_sysctl); |
950 | } |
951 | |
952 | #else |
953 | #define watchdog_sysctl_init() do { } while (0) |
954 | #endif /* CONFIG_SYSCTL */ |
955 | |
956 | static void __init lockup_detector_delay_init(struct work_struct *work); |
957 | static bool allow_lockup_detector_init_retry __initdata; |
958 | |
959 | static struct work_struct detector_work __initdata = |
960 | __WORK_INITIALIZER(detector_work, lockup_detector_delay_init); |
961 | |
962 | static void __init lockup_detector_delay_init(struct work_struct *work) |
963 | { |
964 | int ret; |
965 | |
966 | ret = watchdog_hardlockup_probe(); |
967 | if (ret) { |
968 | pr_info("Delayed init of the lockup detector failed: %d\n" , ret); |
969 | pr_info("Hard watchdog permanently disabled\n" ); |
970 | return; |
971 | } |
972 | |
973 | allow_lockup_detector_init_retry = false; |
974 | |
975 | watchdog_hardlockup_available = true; |
976 | lockup_detector_setup(); |
977 | } |
978 | |
979 | /* |
980 | * lockup_detector_retry_init - retry init lockup detector if possible. |
981 | * |
982 | * Retry hardlockup detector init. It is useful when it requires some |
983 | * functionality that has to be initialized later on a particular |
984 | * platform. |
985 | */ |
986 | void __init lockup_detector_retry_init(void) |
987 | { |
988 | /* Must be called before late init calls */ |
989 | if (!allow_lockup_detector_init_retry) |
990 | return; |
991 | |
992 | schedule_work(work: &detector_work); |
993 | } |
994 | |
995 | /* |
996 | * Ensure that optional delayed hardlockup init is proceed before |
997 | * the init code and memory is freed. |
998 | */ |
999 | static int __init lockup_detector_check(void) |
1000 | { |
1001 | /* Prevent any later retry. */ |
1002 | allow_lockup_detector_init_retry = false; |
1003 | |
1004 | /* Make sure no work is pending. */ |
1005 | flush_work(work: &detector_work); |
1006 | |
1007 | watchdog_sysctl_init(); |
1008 | |
1009 | return 0; |
1010 | |
1011 | } |
1012 | late_initcall_sync(lockup_detector_check); |
1013 | |
1014 | void __init lockup_detector_init(void) |
1015 | { |
1016 | if (tick_nohz_full_enabled()) |
1017 | pr_info("Disabling watchdog on nohz_full cores by default\n" ); |
1018 | |
1019 | cpumask_copy(dstp: &watchdog_cpumask, |
1020 | srcp: housekeeping_cpumask(type: HK_TYPE_TIMER)); |
1021 | |
1022 | if (!watchdog_hardlockup_probe()) |
1023 | watchdog_hardlockup_available = true; |
1024 | else |
1025 | allow_lockup_detector_init_retry = true; |
1026 | |
1027 | lockup_detector_setup(); |
1028 | } |
1029 | |