1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Detect hard lockups on a system using perf |
4 | * |
5 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
6 | * |
7 | * Note: Most of this code is borrowed heavily from the original softlockup |
8 | * detector, so thanks to Ingo for the initial implementation. |
9 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
10 | * to those contributors as well. |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) "NMI watchdog: " fmt |
14 | |
15 | #include <linux/nmi.h> |
16 | #include <linux/atomic.h> |
17 | #include <linux/module.h> |
18 | #include <linux/sched/debug.h> |
19 | |
20 | #include <asm/irq_regs.h> |
21 | #include <linux/perf_event.h> |
22 | |
23 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
24 | |
25 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); |
26 | |
27 | #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP |
28 | static DEFINE_PER_CPU(ktime_t, last_timestamp); |
29 | static DEFINE_PER_CPU(unsigned int, nmi_rearmed); |
30 | static ktime_t watchdog_hrtimer_sample_threshold __read_mostly; |
31 | |
32 | void watchdog_update_hrtimer_threshold(u64 period) |
33 | { |
34 | /* |
35 | * The hrtimer runs with a period of (watchdog_threshold * 2) / 5 |
36 | * |
37 | * So it runs effectively with 2.5 times the rate of the NMI |
38 | * watchdog. That means the hrtimer should fire 2-3 times before |
39 | * the NMI watchdog expires. The NMI watchdog on x86 is based on |
40 | * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles |
41 | * might run way faster than expected and the NMI fires in a |
42 | * smaller period than the one deduced from the nominal CPU |
43 | * frequency. Depending on the Turbo-Mode factor this might be fast |
44 | * enough to get the NMI period smaller than the hrtimer watchdog |
45 | * period and trigger false positives. |
46 | * |
47 | * The sample threshold is used to check in the NMI handler whether |
48 | * the minimum time between two NMI samples has elapsed. That |
49 | * prevents false positives. |
50 | * |
51 | * Set this to 4/5 of the actual watchdog threshold period so the |
52 | * hrtimer is guaranteed to fire at least once within the real |
53 | * watchdog threshold. |
54 | */ |
55 | watchdog_hrtimer_sample_threshold = period * 2; |
56 | } |
57 | |
58 | static bool watchdog_check_timestamp(void) |
59 | { |
60 | ktime_t delta, now = ktime_get_mono_fast_ns(); |
61 | |
62 | delta = now - __this_cpu_read(last_timestamp); |
63 | if (delta < watchdog_hrtimer_sample_threshold) { |
64 | /* |
65 | * If ktime is jiffies based, a stalled timer would prevent |
66 | * jiffies from being incremented and the filter would look |
67 | * at a stale timestamp and never trigger. |
68 | */ |
69 | if (__this_cpu_inc_return(nmi_rearmed) < 10) |
70 | return false; |
71 | } |
72 | __this_cpu_write(nmi_rearmed, 0); |
73 | __this_cpu_write(last_timestamp, now); |
74 | return true; |
75 | } |
76 | |
77 | static void watchdog_init_timestamp(void) |
78 | { |
79 | __this_cpu_write(nmi_rearmed, 0); |
80 | __this_cpu_write(last_timestamp, ktime_get_mono_fast_ns()); |
81 | } |
82 | #else |
83 | static inline bool watchdog_check_timestamp(void) { return true; } |
84 | static inline void watchdog_init_timestamp(void) { } |
85 | #endif |
86 | |
87 | static struct perf_event_attr wd_hw_attr = { |
88 | .type = PERF_TYPE_HARDWARE, |
89 | .config = PERF_COUNT_HW_CPU_CYCLES, |
90 | .size = sizeof(struct perf_event_attr), |
91 | .pinned = 1, |
92 | .disabled = 1, |
93 | }; |
94 | |
95 | static struct perf_event_attr fallback_wd_hw_attr = { |
96 | .type = PERF_TYPE_HARDWARE, |
97 | .config = PERF_COUNT_HW_CPU_CYCLES, |
98 | .size = sizeof(struct perf_event_attr), |
99 | .pinned = 1, |
100 | .disabled = 1, |
101 | }; |
102 | |
103 | /* Callback function for perf event subsystem */ |
104 | static void watchdog_overflow_callback(struct perf_event *event, |
105 | struct perf_sample_data *data, |
106 | struct pt_regs *regs) |
107 | { |
108 | /* Ensure the watchdog never gets throttled */ |
109 | event->hw.interrupts = 0; |
110 | |
111 | if (!watchdog_check_timestamp()) |
112 | return; |
113 | |
114 | watchdog_hardlockup_check(smp_processor_id(), regs); |
115 | } |
116 | |
117 | static int hardlockup_detector_event_create(void) |
118 | { |
119 | unsigned int cpu; |
120 | struct perf_event_attr *wd_attr; |
121 | struct perf_event *evt; |
122 | |
123 | /* |
124 | * Preemption is not disabled because memory will be allocated. |
125 | * Ensure CPU-locality by calling this in per-CPU kthread. |
126 | */ |
127 | WARN_ON(!is_percpu_thread()); |
128 | cpu = raw_smp_processor_id(); |
129 | wd_attr = &wd_hw_attr; |
130 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
131 | |
132 | /* Try to register using hardware perf events */ |
133 | evt = perf_event_create_kernel_counter(attr: wd_attr, cpu, NULL, |
134 | callback: watchdog_overflow_callback, NULL); |
135 | if (IS_ERR(ptr: evt)) { |
136 | wd_attr = &fallback_wd_hw_attr; |
137 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
138 | evt = perf_event_create_kernel_counter(attr: wd_attr, cpu, NULL, |
139 | callback: watchdog_overflow_callback, NULL); |
140 | } |
141 | |
142 | if (IS_ERR(ptr: evt)) { |
143 | pr_debug("Perf event create on CPU %d failed with %ld\n" , cpu, |
144 | PTR_ERR(evt)); |
145 | return PTR_ERR(ptr: evt); |
146 | } |
147 | WARN_ONCE(this_cpu_read(watchdog_ev), "unexpected watchdog_ev leak" ); |
148 | this_cpu_write(watchdog_ev, evt); |
149 | return 0; |
150 | } |
151 | |
152 | /** |
153 | * watchdog_hardlockup_enable - Enable the local event |
154 | * @cpu: The CPU to enable hard lockup on. |
155 | */ |
156 | void watchdog_hardlockup_enable(unsigned int cpu) |
157 | { |
158 | WARN_ON_ONCE(cpu != smp_processor_id()); |
159 | |
160 | if (hardlockup_detector_event_create()) |
161 | return; |
162 | |
163 | /* use original value for check */ |
164 | if (!atomic_fetch_inc(v: &watchdog_cpus)) |
165 | pr_info("Enabled. Permanently consumes one hw-PMU counter.\n" ); |
166 | |
167 | watchdog_init_timestamp(); |
168 | perf_event_enable(this_cpu_read(watchdog_ev)); |
169 | } |
170 | |
171 | /** |
172 | * watchdog_hardlockup_disable - Disable the local event |
173 | * @cpu: The CPU to enable hard lockup on. |
174 | */ |
175 | void watchdog_hardlockup_disable(unsigned int cpu) |
176 | { |
177 | struct perf_event *event = this_cpu_read(watchdog_ev); |
178 | |
179 | WARN_ON_ONCE(cpu != smp_processor_id()); |
180 | |
181 | if (event) { |
182 | perf_event_disable(event); |
183 | perf_event_release_kernel(event); |
184 | this_cpu_write(watchdog_ev, NULL); |
185 | atomic_dec(v: &watchdog_cpus); |
186 | } |
187 | } |
188 | |
189 | /** |
190 | * hardlockup_detector_perf_stop - Globally stop watchdog events |
191 | * |
192 | * Special interface for x86 to handle the perf HT bug. |
193 | */ |
194 | void __init hardlockup_detector_perf_stop(void) |
195 | { |
196 | int cpu; |
197 | |
198 | lockdep_assert_cpus_held(); |
199 | |
200 | for_each_online_cpu(cpu) { |
201 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
202 | |
203 | if (event) |
204 | perf_event_disable(event); |
205 | } |
206 | } |
207 | |
208 | /** |
209 | * hardlockup_detector_perf_restart - Globally restart watchdog events |
210 | * |
211 | * Special interface for x86 to handle the perf HT bug. |
212 | */ |
213 | void __init hardlockup_detector_perf_restart(void) |
214 | { |
215 | int cpu; |
216 | |
217 | lockdep_assert_cpus_held(); |
218 | |
219 | if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)) |
220 | return; |
221 | |
222 | for_each_online_cpu(cpu) { |
223 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
224 | |
225 | if (event) |
226 | perf_event_enable(event); |
227 | } |
228 | } |
229 | |
230 | bool __weak __init arch_perf_nmi_is_available(void) |
231 | { |
232 | return true; |
233 | } |
234 | |
235 | /** |
236 | * watchdog_hardlockup_probe - Probe whether NMI event is available at all |
237 | */ |
238 | int __init watchdog_hardlockup_probe(void) |
239 | { |
240 | int ret; |
241 | |
242 | if (!arch_perf_nmi_is_available()) |
243 | return -ENODEV; |
244 | |
245 | ret = hardlockup_detector_event_create(); |
246 | |
247 | if (ret) { |
248 | pr_info("Perf NMI watchdog permanently disabled\n" ); |
249 | } else { |
250 | perf_event_release_kernel(this_cpu_read(watchdog_ev)); |
251 | this_cpu_write(watchdog_ev, NULL); |
252 | } |
253 | return ret; |
254 | } |
255 | |
256 | /** |
257 | * hardlockup_config_perf_event - Overwrite config of wd_hw_attr. |
258 | * @str: number which identifies the raw perf event to use |
259 | */ |
260 | void __init hardlockup_config_perf_event(const char *str) |
261 | { |
262 | u64 config; |
263 | char buf[24]; |
264 | char *comma = strchr(str, ','); |
265 | |
266 | if (!comma) { |
267 | if (kstrtoull(s: str, base: 16, res: &config)) |
268 | return; |
269 | } else { |
270 | unsigned int len = comma - str; |
271 | |
272 | if (len > sizeof(buf)) |
273 | return; |
274 | |
275 | strscpy(buf, str, len); |
276 | if (kstrtoull(s: buf, base: 16, res: &config)) |
277 | return; |
278 | } |
279 | |
280 | wd_hw_attr.type = PERF_TYPE_RAW; |
281 | wd_hw_attr.config = config; |
282 | } |
283 | |