1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * latencytop.c: Latency display infrastructure |
4 | * |
5 | * (C) Copyright 2008 Intel Corporation |
6 | * Author: Arjan van de Ven <arjan@linux.intel.com> |
7 | */ |
8 | |
9 | /* |
10 | * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is |
11 | * used by the "latencytop" userspace tool. The latency that is tracked is not |
12 | * the 'traditional' interrupt latency (which is primarily caused by something |
13 | * else consuming CPU), but instead, it is the latency an application encounters |
14 | * because the kernel sleeps on its behalf for various reasons. |
15 | * |
16 | * This code tracks 2 levels of statistics: |
17 | * 1) System level latency |
18 | * 2) Per process latency |
19 | * |
20 | * The latency is stored in fixed sized data structures in an accumulated form; |
21 | * if the "same" latency cause is hit twice, this will be tracked as one entry |
22 | * in the data structure. Both the count, total accumulated latency and maximum |
23 | * latency are tracked in this data structure. When the fixed size structure is |
24 | * full, no new causes are tracked until the buffer is flushed by writing to |
25 | * the /proc file; the userspace tool does this on a regular basis. |
26 | * |
27 | * A latency cause is identified by a stringified backtrace at the point that |
28 | * the scheduler gets invoked. The userland tool will use this string to |
29 | * identify the cause of the latency in human readable form. |
30 | * |
31 | * The information is exported via /proc/latency_stats and /proc/<pid>/latency. |
32 | * These files look like this: |
33 | * |
34 | * Latency Top version : v0.1 |
35 | * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl |
36 | * | | | | |
37 | * | | | +----> the stringified backtrace |
38 | * | | +---------> The maximum latency for this entry in microseconds |
39 | * | +--------------> The accumulated latency for this entry (microseconds) |
40 | * +-------------------> The number of times this entry is hit |
41 | * |
42 | * (note: the average latency is the accumulated latency divided by the number |
43 | * of times) |
44 | */ |
45 | |
46 | #include <linux/kallsyms.h> |
47 | #include <linux/seq_file.h> |
48 | #include <linux/notifier.h> |
49 | #include <linux/spinlock.h> |
50 | #include <linux/proc_fs.h> |
51 | #include <linux/latencytop.h> |
52 | #include <linux/export.h> |
53 | #include <linux/sched.h> |
54 | #include <linux/sched/debug.h> |
55 | #include <linux/sched/stat.h> |
56 | #include <linux/list.h> |
57 | #include <linux/stacktrace.h> |
58 | #include <linux/sysctl.h> |
59 | |
60 | static DEFINE_RAW_SPINLOCK(latency_lock); |
61 | |
62 | #define MAXLR 128 |
63 | static struct latency_record latency_record[MAXLR]; |
64 | |
65 | int latencytop_enabled; |
66 | |
67 | #ifdef CONFIG_SYSCTL |
68 | static int sysctl_latencytop(const struct ctl_table *table, int write, void *buffer, |
69 | size_t *lenp, loff_t *ppos) |
70 | { |
71 | int err; |
72 | |
73 | err = proc_dointvec(table, write, buffer, lenp, ppos); |
74 | if (latencytop_enabled) |
75 | force_schedstat_enabled(); |
76 | |
77 | return err; |
78 | } |
79 | |
80 | static const struct ctl_table latencytop_sysctl[] = { |
81 | { |
82 | .procname = "latencytop" , |
83 | .data = &latencytop_enabled, |
84 | .maxlen = sizeof(int), |
85 | .mode = 0644, |
86 | .proc_handler = sysctl_latencytop, |
87 | }, |
88 | }; |
89 | #endif |
90 | |
91 | void clear_tsk_latency_tracing(struct task_struct *p) |
92 | { |
93 | unsigned long flags; |
94 | |
95 | raw_spin_lock_irqsave(&latency_lock, flags); |
96 | memset(&p->latency_record, 0, sizeof(p->latency_record)); |
97 | p->latency_record_count = 0; |
98 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
99 | } |
100 | |
101 | static void clear_global_latency_tracing(void) |
102 | { |
103 | unsigned long flags; |
104 | |
105 | raw_spin_lock_irqsave(&latency_lock, flags); |
106 | memset(&latency_record, 0, sizeof(latency_record)); |
107 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
108 | } |
109 | |
110 | static void __sched |
111 | account_global_scheduler_latency(struct task_struct *tsk, |
112 | struct latency_record *lat) |
113 | { |
114 | int firstnonnull = MAXLR; |
115 | int i; |
116 | |
117 | /* skip kernel threads for now */ |
118 | if (!tsk->mm) |
119 | return; |
120 | |
121 | for (i = 0; i < MAXLR; i++) { |
122 | int q, same = 1; |
123 | |
124 | /* Nothing stored: */ |
125 | if (!latency_record[i].backtrace[0]) { |
126 | if (firstnonnull > i) |
127 | firstnonnull = i; |
128 | continue; |
129 | } |
130 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
131 | unsigned long record = lat->backtrace[q]; |
132 | |
133 | if (latency_record[i].backtrace[q] != record) { |
134 | same = 0; |
135 | break; |
136 | } |
137 | |
138 | /* 0 entry marks end of backtrace: */ |
139 | if (!record) |
140 | break; |
141 | } |
142 | if (same) { |
143 | latency_record[i].count++; |
144 | latency_record[i].time += lat->time; |
145 | if (lat->time > latency_record[i].max) |
146 | latency_record[i].max = lat->time; |
147 | return; |
148 | } |
149 | } |
150 | |
151 | i = firstnonnull; |
152 | if (i >= MAXLR) |
153 | return; |
154 | |
155 | /* Allocted a new one: */ |
156 | memcpy(&latency_record[i], lat, sizeof(struct latency_record)); |
157 | } |
158 | |
159 | /** |
160 | * __account_scheduler_latency - record an occurred latency |
161 | * @tsk: the task struct of the task hitting the latency |
162 | * @usecs: the duration of the latency in microseconds |
163 | * @inter: 1 if the sleep was interruptible, 0 if uninterruptible |
164 | * |
165 | * This function is the main entry point for recording latency entries |
166 | * as called by the scheduler. |
167 | * |
168 | * This function has a few special cases to deal with normal 'non-latency' |
169 | * sleeps: specifically, interruptible sleep longer than 5 msec is skipped |
170 | * since this usually is caused by waiting for events via select() and co. |
171 | * |
172 | * Negative latencies (caused by time going backwards) are also explicitly |
173 | * skipped. |
174 | */ |
175 | void __sched |
176 | __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) |
177 | { |
178 | unsigned long flags; |
179 | int i, q; |
180 | struct latency_record lat; |
181 | |
182 | /* Long interruptible waits are generally user requested... */ |
183 | if (inter && usecs > 5000) |
184 | return; |
185 | |
186 | /* Negative sleeps are time going backwards */ |
187 | /* Zero-time sleeps are non-interesting */ |
188 | if (usecs <= 0) |
189 | return; |
190 | |
191 | memset(&lat, 0, sizeof(lat)); |
192 | lat.count = 1; |
193 | lat.time = usecs; |
194 | lat.max = usecs; |
195 | |
196 | stack_trace_save_tsk(task: tsk, store: lat.backtrace, LT_BACKTRACEDEPTH, skipnr: 0); |
197 | |
198 | raw_spin_lock_irqsave(&latency_lock, flags); |
199 | |
200 | account_global_scheduler_latency(tsk, lat: &lat); |
201 | |
202 | for (i = 0; i < tsk->latency_record_count; i++) { |
203 | struct latency_record *mylat; |
204 | int same = 1; |
205 | |
206 | mylat = &tsk->latency_record[i]; |
207 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
208 | unsigned long record = lat.backtrace[q]; |
209 | |
210 | if (mylat->backtrace[q] != record) { |
211 | same = 0; |
212 | break; |
213 | } |
214 | |
215 | /* 0 entry is end of backtrace */ |
216 | if (!record) |
217 | break; |
218 | } |
219 | if (same) { |
220 | mylat->count++; |
221 | mylat->time += lat.time; |
222 | if (lat.time > mylat->max) |
223 | mylat->max = lat.time; |
224 | goto out_unlock; |
225 | } |
226 | } |
227 | |
228 | /* |
229 | * short term hack; if we're > 32 we stop; future we recycle: |
230 | */ |
231 | if (tsk->latency_record_count >= LT_SAVECOUNT) |
232 | goto out_unlock; |
233 | |
234 | /* Allocated a new one: */ |
235 | i = tsk->latency_record_count++; |
236 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); |
237 | |
238 | out_unlock: |
239 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
240 | } |
241 | |
242 | static int lstats_show(struct seq_file *m, void *v) |
243 | { |
244 | int i; |
245 | |
246 | seq_puts(m, s: "Latency Top version : v0.1\n" ); |
247 | |
248 | for (i = 0; i < MAXLR; i++) { |
249 | struct latency_record *lr = &latency_record[i]; |
250 | |
251 | if (lr->backtrace[0]) { |
252 | int q; |
253 | seq_printf(m, fmt: "%i %lu %lu" , |
254 | lr->count, lr->time, lr->max); |
255 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
256 | unsigned long bt = lr->backtrace[q]; |
257 | |
258 | if (!bt) |
259 | break; |
260 | |
261 | seq_printf(m, fmt: " %ps" , (void *)bt); |
262 | } |
263 | seq_puts(m, s: "\n" ); |
264 | } |
265 | } |
266 | return 0; |
267 | } |
268 | |
269 | static ssize_t |
270 | lstats_write(struct file *file, const char __user *buf, size_t count, |
271 | loff_t *offs) |
272 | { |
273 | clear_global_latency_tracing(); |
274 | |
275 | return count; |
276 | } |
277 | |
278 | static int lstats_open(struct inode *inode, struct file *filp) |
279 | { |
280 | return single_open(filp, lstats_show, NULL); |
281 | } |
282 | |
283 | static const struct proc_ops lstats_proc_ops = { |
284 | .proc_open = lstats_open, |
285 | .proc_read = seq_read, |
286 | .proc_write = lstats_write, |
287 | .proc_lseek = seq_lseek, |
288 | .proc_release = single_release, |
289 | }; |
290 | |
291 | static int __init init_lstats_procfs(void) |
292 | { |
293 | proc_create(name: "latency_stats" , mode: 0644, NULL, proc_ops: &lstats_proc_ops); |
294 | #ifdef CONFIG_SYSCTL |
295 | register_sysctl_init("kernel" , latencytop_sysctl); |
296 | #endif |
297 | return 0; |
298 | } |
299 | device_initcall(init_lstats_procfs); |
300 | |