1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * linux/include/linux/nmi.h |
4 | */ |
5 | #ifndef LINUX_NMI_H |
6 | #define LINUX_NMI_H |
7 | |
8 | #include <linux/sched.h> |
9 | #include <asm/irq.h> |
10 | |
11 | /* Arch specific watchdogs might need to share extra watchdog-related APIs. */ |
12 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64) |
13 | #include <asm/nmi.h> |
14 | #endif |
15 | |
16 | #ifdef CONFIG_LOCKUP_DETECTOR |
17 | void lockup_detector_init(void); |
18 | void lockup_detector_retry_init(void); |
19 | void lockup_detector_soft_poweroff(void); |
20 | void lockup_detector_cleanup(void); |
21 | |
22 | extern int watchdog_user_enabled; |
23 | extern int watchdog_thresh; |
24 | extern unsigned long watchdog_enabled; |
25 | |
26 | extern struct cpumask watchdog_cpumask; |
27 | extern unsigned long *watchdog_cpumask_bits; |
28 | #ifdef CONFIG_SMP |
29 | extern int sysctl_softlockup_all_cpu_backtrace; |
30 | extern int sysctl_hardlockup_all_cpu_backtrace; |
31 | #else |
32 | #define sysctl_softlockup_all_cpu_backtrace 0 |
33 | #define sysctl_hardlockup_all_cpu_backtrace 0 |
34 | #endif /* !CONFIG_SMP */ |
35 | |
36 | #else /* CONFIG_LOCKUP_DETECTOR */ |
37 | static inline void lockup_detector_init(void) { } |
38 | static inline void lockup_detector_retry_init(void) { } |
39 | static inline void lockup_detector_soft_poweroff(void) { } |
40 | static inline void lockup_detector_cleanup(void) { } |
41 | #endif /* !CONFIG_LOCKUP_DETECTOR */ |
42 | |
43 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
44 | extern void touch_softlockup_watchdog_sched(void); |
45 | extern void touch_softlockup_watchdog(void); |
46 | extern void touch_softlockup_watchdog_sync(void); |
47 | extern void touch_all_softlockup_watchdogs(void); |
48 | extern unsigned int softlockup_panic; |
49 | |
50 | extern int lockup_detector_online_cpu(unsigned int cpu); |
51 | extern int lockup_detector_offline_cpu(unsigned int cpu); |
52 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
53 | static inline void touch_softlockup_watchdog_sched(void) { } |
54 | static inline void touch_softlockup_watchdog(void) { } |
55 | static inline void touch_softlockup_watchdog_sync(void) { } |
56 | static inline void touch_all_softlockup_watchdogs(void) { } |
57 | |
58 | #define lockup_detector_online_cpu NULL |
59 | #define lockup_detector_offline_cpu NULL |
60 | #endif /* CONFIG_SOFTLOCKUP_DETECTOR */ |
61 | |
62 | #ifdef CONFIG_DETECT_HUNG_TASK |
63 | void reset_hung_task_detector(void); |
64 | #else |
65 | static inline void reset_hung_task_detector(void) { } |
66 | #endif |
67 | |
68 | /* |
69 | * The run state of the lockup detectors is controlled by the content of the |
70 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - |
71 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. |
72 | * |
73 | * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and |
74 | * 'watchdog_softlockup_user_enabled' are variables that are only used as an |
75 | * 'interface' between the parameters in /proc/sys/kernel and the internal |
76 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is |
77 | * handled differently because its value is not boolean, and the lockup |
78 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. |
79 | */ |
80 | #define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0 |
81 | #define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1 |
82 | #define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT) |
83 | #define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT) |
84 | |
85 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
86 | extern void hardlockup_detector_disable(void); |
87 | extern unsigned int hardlockup_panic; |
88 | #else |
89 | static inline void hardlockup_detector_disable(void) {} |
90 | #endif |
91 | |
92 | /* Sparc64 has special implemetantion that is always enabled. */ |
93 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64) |
94 | void arch_touch_nmi_watchdog(void); |
95 | #else |
96 | static inline void arch_touch_nmi_watchdog(void) { } |
97 | #endif |
98 | |
99 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER) |
100 | void watchdog_hardlockup_touch_cpu(unsigned int cpu); |
101 | void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); |
102 | #endif |
103 | |
104 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
105 | extern void hardlockup_detector_perf_stop(void); |
106 | extern void hardlockup_detector_perf_restart(void); |
107 | extern void hardlockup_detector_perf_cleanup(void); |
108 | #else |
109 | static inline void hardlockup_detector_perf_stop(void) { } |
110 | static inline void hardlockup_detector_perf_restart(void) { } |
111 | static inline void hardlockup_detector_perf_cleanup(void) { } |
112 | #endif |
113 | |
114 | void watchdog_hardlockup_stop(void); |
115 | void watchdog_hardlockup_start(void); |
116 | int watchdog_hardlockup_probe(void); |
117 | void watchdog_hardlockup_enable(unsigned int cpu); |
118 | void watchdog_hardlockup_disable(unsigned int cpu); |
119 | |
120 | void lockup_detector_reconfigure(void); |
121 | |
122 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY |
123 | void watchdog_buddy_check_hardlockup(int hrtimer_interrupts); |
124 | #else |
125 | static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {} |
126 | #endif |
127 | |
128 | /** |
129 | * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout. |
130 | * |
131 | * If we support detecting hardlockups, touch_nmi_watchdog() may be |
132 | * used to pet the watchdog (reset the timeout) - for code which |
133 | * intentionally disables interrupts for a long time. This call is stateless. |
134 | * |
135 | * Though this function has "nmi" in the name, the hardlockup watchdog might |
136 | * not be backed by NMIs. This function will likely be renamed to |
137 | * touch_hardlockup_watchdog() in the future. |
138 | */ |
139 | static inline void touch_nmi_watchdog(void) |
140 | { |
141 | /* |
142 | * Pass on to the hardlockup detector selected via CONFIG_. Note that |
143 | * the hardlockup detector may not be arch-specific nor using NMIs |
144 | * and the arch_touch_nmi_watchdog() function will likely be renamed |
145 | * in the future. |
146 | */ |
147 | arch_touch_nmi_watchdog(); |
148 | |
149 | touch_softlockup_watchdog(); |
150 | } |
151 | |
152 | /* |
153 | * Create trigger_all_cpu_backtrace() out of the arch-provided |
154 | * base function. Return whether such support was available, |
155 | * to allow calling code to fall back to some other mechanism: |
156 | */ |
157 | #ifdef arch_trigger_cpumask_backtrace |
158 | static inline bool trigger_all_cpu_backtrace(void) |
159 | { |
160 | arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu: -1); |
161 | return true; |
162 | } |
163 | |
164 | static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu) |
165 | { |
166 | arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu); |
167 | return true; |
168 | } |
169 | |
170 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
171 | { |
172 | arch_trigger_cpumask_backtrace(mask, exclude_cpu: -1); |
173 | return true; |
174 | } |
175 | |
176 | static inline bool trigger_single_cpu_backtrace(int cpu) |
177 | { |
178 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), exclude_cpu: -1); |
179 | return true; |
180 | } |
181 | |
182 | /* generic implementation */ |
183 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
184 | int exclude_cpu, |
185 | void (*raise)(cpumask_t *mask)); |
186 | bool nmi_cpu_backtrace(struct pt_regs *regs); |
187 | |
188 | #else |
189 | static inline bool trigger_all_cpu_backtrace(void) |
190 | { |
191 | return false; |
192 | } |
193 | static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu) |
194 | { |
195 | return false; |
196 | } |
197 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
198 | { |
199 | return false; |
200 | } |
201 | static inline bool trigger_single_cpu_backtrace(int cpu) |
202 | { |
203 | return false; |
204 | } |
205 | #endif |
206 | |
207 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF |
208 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
209 | bool arch_perf_nmi_is_available(void); |
210 | #endif |
211 | |
212 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
213 | defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
214 | void watchdog_update_hrtimer_threshold(u64 period); |
215 | #else |
216 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } |
217 | #endif |
218 | |
219 | struct ctl_table; |
220 | int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); |
221 | int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); |
222 | int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); |
223 | int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); |
224 | int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); |
225 | |
226 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
227 | #include <asm/nmi.h> |
228 | #endif |
229 | |
230 | #ifdef CONFIG_NMI_CHECK_CPU |
231 | void nmi_backtrace_stall_snap(const struct cpumask *btp); |
232 | void nmi_backtrace_stall_check(const struct cpumask *btp); |
233 | #else |
234 | static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} |
235 | static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} |
236 | #endif |
237 | |
238 | #endif |
239 | |