1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KERNEL_STATS_H
3#define _KERNEL_STATS_H
4
5#ifdef CONFIG_SCHEDSTATS
6
7extern struct static_key_false sched_schedstats;
8
9/*
10 * Expects runqueue lock to be held for atomicity of update
11 */
12static inline void
13rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
14{
15 if (rq) {
16 rq->rq_sched_info.run_delay += delta;
17 rq->rq_sched_info.pcount++;
18 }
19}
20
21/*
22 * Expects runqueue lock to be held for atomicity of update
23 */
24static inline void
25rq_sched_info_depart(struct rq *rq, unsigned long long delta)
26{
27 if (rq)
28 rq->rq_cpu_time += delta;
29}
30
31static inline void
32rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
33{
34 if (rq)
35 rq->rq_sched_info.run_delay += delta;
36}
37#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
38#define __schedstat_inc(var) do { var++; } while (0)
39#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
40#define __schedstat_add(var, amt) do { var += (amt); } while (0)
41#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
42#define __schedstat_set(var, val) do { var = (val); } while (0)
43#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
44#define schedstat_val(var) (var)
45#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
46
47void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
48 struct sched_statistics *stats);
49
50void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
51 struct sched_statistics *stats);
52void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
53 struct sched_statistics *stats);
54
55static inline void
56check_schedstat_required(void)
57{
58 if (schedstat_enabled())
59 return;
60
61 /* Force schedstat enabled if a dependent tracepoint is active */
62 if (trace_sched_stat_wait_enabled() ||
63 trace_sched_stat_sleep_enabled() ||
64 trace_sched_stat_iowait_enabled() ||
65 trace_sched_stat_blocked_enabled() ||
66 trace_sched_stat_runtime_enabled())
67 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
68}
69
70#else /* !CONFIG_SCHEDSTATS: */
71
72static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
73static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
74static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
75# define schedstat_enabled() 0
76# define __schedstat_inc(var) do { } while (0)
77# define schedstat_inc(var) do { } while (0)
78# define __schedstat_add(var, amt) do { } while (0)
79# define schedstat_add(var, amt) do { } while (0)
80# define __schedstat_set(var, val) do { } while (0)
81# define schedstat_set(var, val) do { } while (0)
82# define schedstat_val(var) 0
83# define schedstat_val_or_zero(var) 0
84
85# define __update_stats_wait_start(rq, p, stats) do { } while (0)
86# define __update_stats_wait_end(rq, p, stats) do { } while (0)
87# define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
88# define check_schedstat_required() do { } while (0)
89
90#endif /* CONFIG_SCHEDSTATS */
91
92#ifdef CONFIG_FAIR_GROUP_SCHED
93struct sched_entity_stats {
94 struct sched_entity se;
95 struct sched_statistics stats;
96} __no_randomize_layout;
97#endif
98
99static inline struct sched_statistics *
100__schedstats_from_se(struct sched_entity *se)
101{
102#ifdef CONFIG_FAIR_GROUP_SCHED
103 if (!entity_is_task(se))
104 return &container_of(se, struct sched_entity_stats, se)->stats;
105#endif
106 return &task_of(se)->stats;
107}
108
109#ifdef CONFIG_PSI
110void psi_task_change(struct task_struct *task, int clear, int set);
111void psi_task_switch(struct task_struct *prev, struct task_struct *next,
112 bool sleep);
113#ifdef CONFIG_IRQ_TIME_ACCOUNTING
114void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
115#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */
116static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
117 struct task_struct *prev) {}
118#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
119/*
120 * PSI tracks state that persists across sleeps, such as iowaits and
121 * memory stalls. As a result, it has to distinguish between sleeps,
122 * where a task's runnable state changes, and migrations, where a task
123 * and its runnable state are being moved between CPUs and runqueues.
124 *
125 * A notable case is a task whose dequeue is delayed. PSI considers
126 * those sleeping, but because they are still on the runqueue they can
127 * go through migration requeues. In this case, *sleeping* states need
128 * to be transferred.
129 */
130static inline void psi_enqueue(struct task_struct *p, int flags)
131{
132 int clear = 0, set = 0;
133
134 if (static_branch_likely(&psi_disabled))
135 return;
136
137 /* Same runqueue, nothing changed for psi */
138 if (flags & ENQUEUE_RESTORE)
139 return;
140
141 /* psi_sched_switch() will handle the flags */
142 if (task_on_cpu(task_rq(p), p))
143 return;
144
145 if (p->se.sched_delayed) {
146 /* CPU migration of "sleeping" task */
147 WARN_ON_ONCE(!(flags & ENQUEUE_MIGRATED));
148 if (p->in_memstall)
149 set |= TSK_MEMSTALL;
150 if (p->in_iowait)
151 set |= TSK_IOWAIT;
152 } else if (flags & ENQUEUE_MIGRATED) {
153 /* CPU migration of runnable task */
154 set = TSK_RUNNING;
155 if (p->in_memstall)
156 set |= TSK_MEMSTALL | TSK_MEMSTALL_RUNNING;
157 } else {
158 /* Wakeup of new or sleeping task */
159 if (p->in_iowait)
160 clear |= TSK_IOWAIT;
161 set = TSK_RUNNING;
162 if (p->in_memstall)
163 set |= TSK_MEMSTALL_RUNNING;
164 }
165
166 psi_task_change(task: p, clear, set);
167}
168
169static inline void psi_dequeue(struct task_struct *p, int flags)
170{
171 if (static_branch_likely(&psi_disabled))
172 return;
173
174 /* Same runqueue, nothing changed for psi */
175 if (flags & DEQUEUE_SAVE)
176 return;
177
178 /*
179 * A voluntary sleep is a dequeue followed by a task switch. To
180 * avoid walking all ancestors twice, psi_task_switch() handles
181 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
182 * Do nothing here.
183 *
184 * In the SCHED_PROXY_EXECUTION case we may do sleeping
185 * dequeues that are not followed by a task switch, so check
186 * TSK_ONCPU is set to ensure the task switch is imminent.
187 * Otherwise clear the flags as usual.
188 */
189 if ((flags & DEQUEUE_SLEEP) && (p->psi_flags & TSK_ONCPU))
190 return;
191
192 /*
193 * When migrating a task to another CPU, clear all psi
194 * state. The enqueue callback above will work it out.
195 */
196 psi_task_change(task: p, clear: p->psi_flags, set: 0);
197}
198
199static inline void psi_ttwu_dequeue(struct task_struct *p)
200{
201 if (static_branch_likely(&psi_disabled))
202 return;
203 /*
204 * Is the task being migrated during a wakeup? Make sure to
205 * deregister its sleep-persistent psi states from the old
206 * queue, and let psi_enqueue() know it has to requeue.
207 */
208 if (unlikely(p->psi_flags)) {
209 struct rq_flags rf;
210 struct rq *rq;
211
212 rq = __task_rq_lock(p, rf: &rf);
213 psi_task_change(task: p, clear: p->psi_flags, set: 0);
214 __task_rq_unlock(rq, p, rf: &rf);
215 }
216}
217
218static inline void psi_sched_switch(struct task_struct *prev,
219 struct task_struct *next,
220 bool sleep)
221{
222 if (static_branch_likely(&psi_disabled))
223 return;
224
225 psi_task_switch(prev, next, sleep);
226}
227
228#else /* !CONFIG_PSI: */
229static inline void psi_enqueue(struct task_struct *p, bool migrate) {}
230static inline void psi_dequeue(struct task_struct *p, bool migrate) {}
231static inline void psi_ttwu_dequeue(struct task_struct *p) {}
232static inline void psi_sched_switch(struct task_struct *prev,
233 struct task_struct *next,
234 bool sleep) {}
235static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
236 struct task_struct *prev) {}
237#endif /* !CONFIG_PSI */
238
239#ifdef CONFIG_SCHED_INFO
240/*
241 * We are interested in knowing how long it was from the *first* time a
242 * task was queued to the time that it finally hit a CPU, we call this routine
243 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
244 * delta taken on each CPU would annul the skew.
245 */
246static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
247{
248 unsigned long long delta = 0;
249
250 if (!t->sched_info.last_queued)
251 return;
252
253 delta = rq_clock(rq) - t->sched_info.last_queued;
254 t->sched_info.last_queued = 0;
255 t->sched_info.run_delay += delta;
256 if (delta > t->sched_info.max_run_delay)
257 t->sched_info.max_run_delay = delta;
258 if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay))
259 t->sched_info.min_run_delay = delta;
260 rq_sched_info_dequeue(rq, delta);
261}
262
263/*
264 * Called when a task finally hits the CPU. We can now calculate how
265 * long it was waiting to run. We also note when it began so that we
266 * can keep stats on how long its time-slice is.
267 */
268static void sched_info_arrive(struct rq *rq, struct task_struct *t)
269{
270 unsigned long long now, delta = 0;
271
272 if (!t->sched_info.last_queued)
273 return;
274
275 now = rq_clock(rq);
276 delta = now - t->sched_info.last_queued;
277 t->sched_info.last_queued = 0;
278 t->sched_info.run_delay += delta;
279 t->sched_info.last_arrival = now;
280 t->sched_info.pcount++;
281 if (delta > t->sched_info.max_run_delay)
282 t->sched_info.max_run_delay = delta;
283 if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay))
284 t->sched_info.min_run_delay = delta;
285
286 rq_sched_info_arrive(rq, delta);
287}
288
289/*
290 * This function is only called from enqueue_task(), but also only updates
291 * the timestamp if it is already not set. It's assumed that
292 * sched_info_dequeue() will clear that stamp when appropriate.
293 */
294static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
295{
296 if (!t->sched_info.last_queued)
297 t->sched_info.last_queued = rq_clock(rq);
298}
299
300/*
301 * Called when a process ceases being the active-running process involuntarily
302 * due, typically, to expiring its time slice (this may also be called when
303 * switching to the idle task). Now we can calculate how long we ran.
304 * Also, if the process is still in the TASK_RUNNING state, call
305 * sched_info_enqueue() to mark that it has now again started waiting on
306 * the runqueue.
307 */
308static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
309{
310 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
311
312 rq_sched_info_depart(rq, delta);
313
314 if (task_is_running(t))
315 sched_info_enqueue(rq, t);
316}
317
318/*
319 * Called when tasks are switched involuntarily due, typically, to expiring
320 * their time slice. (This may also be called when switching to or from
321 * the idle task.) We are only called when prev != next.
322 */
323static inline void
324sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
325{
326 /*
327 * prev now departs the CPU. It's not interesting to record
328 * stats about how efficient we were at scheduling the idle
329 * process, however.
330 */
331 if (prev != rq->idle)
332 sched_info_depart(rq, t: prev);
333
334 if (next != rq->idle)
335 sched_info_arrive(rq, t: next);
336}
337
338#else /* !CONFIG_SCHED_INFO: */
339# define sched_info_enqueue(rq, t) do { } while (0)
340# define sched_info_dequeue(rq, t) do { } while (0)
341# define sched_info_switch(rq, t, next) do { } while (0)
342#endif /* !CONFIG_SCHED_INFO */
343
344#endif /* _KERNEL_STATS_H */
345

source code of linux/kernel/sched/stats.h