1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | /* |
3 | * Scheduler internal types and methods: |
4 | */ |
5 | #ifndef _KERNEL_SCHED_SCHED_H |
6 | #define _KERNEL_SCHED_SCHED_H |
7 | |
8 | #include <linux/sched/affinity.h> |
9 | #include <linux/sched/autogroup.h> |
10 | #include <linux/sched/cpufreq.h> |
11 | #include <linux/sched/deadline.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/sched/loadavg.h> |
14 | #include <linux/sched/mm.h> |
15 | #include <linux/sched/rseq_api.h> |
16 | #include <linux/sched/signal.h> |
17 | #include <linux/sched/smt.h> |
18 | #include <linux/sched/stat.h> |
19 | #include <linux/sched/sysctl.h> |
20 | #include <linux/sched/task_flags.h> |
21 | #include <linux/sched/task.h> |
22 | #include <linux/sched/topology.h> |
23 | |
24 | #include <linux/atomic.h> |
25 | #include <linux/bitmap.h> |
26 | #include <linux/bug.h> |
27 | #include <linux/capability.h> |
28 | #include <linux/cgroup_api.h> |
29 | #include <linux/cgroup.h> |
30 | #include <linux/context_tracking.h> |
31 | #include <linux/cpufreq.h> |
32 | #include <linux/cpumask_api.h> |
33 | #include <linux/ctype.h> |
34 | #include <linux/file.h> |
35 | #include <linux/fs_api.h> |
36 | #include <linux/hrtimer_api.h> |
37 | #include <linux/interrupt.h> |
38 | #include <linux/irq_work.h> |
39 | #include <linux/jiffies.h> |
40 | #include <linux/kref_api.h> |
41 | #include <linux/kthread.h> |
42 | #include <linux/ktime_api.h> |
43 | #include <linux/lockdep_api.h> |
44 | #include <linux/lockdep.h> |
45 | #include <linux/minmax.h> |
46 | #include <linux/mm.h> |
47 | #include <linux/module.h> |
48 | #include <linux/mutex_api.h> |
49 | #include <linux/plist.h> |
50 | #include <linux/poll.h> |
51 | #include <linux/proc_fs.h> |
52 | #include <linux/profile.h> |
53 | #include <linux/psi.h> |
54 | #include <linux/rcupdate.h> |
55 | #include <linux/seq_file.h> |
56 | #include <linux/seqlock.h> |
57 | #include <linux/softirq.h> |
58 | #include <linux/spinlock_api.h> |
59 | #include <linux/static_key.h> |
60 | #include <linux/stop_machine.h> |
61 | #include <linux/syscalls_api.h> |
62 | #include <linux/syscalls.h> |
63 | #include <linux/tick.h> |
64 | #include <linux/topology.h> |
65 | #include <linux/types.h> |
66 | #include <linux/u64_stats_sync_api.h> |
67 | #include <linux/uaccess.h> |
68 | #include <linux/wait_api.h> |
69 | #include <linux/wait_bit.h> |
70 | #include <linux/workqueue_api.h> |
71 | #include <linux/delayacct.h> |
72 | |
73 | #include <trace/events/power.h> |
74 | #include <trace/events/sched.h> |
75 | |
76 | #include "../workqueue_internal.h" |
77 | |
78 | struct rq; |
79 | struct cfs_rq; |
80 | struct rt_rq; |
81 | struct sched_group; |
82 | struct cpuidle_state; |
83 | |
84 | #ifdef CONFIG_PARAVIRT |
85 | # include <asm/paravirt.h> |
86 | # include <asm/paravirt_api_clock.h> |
87 | #endif |
88 | |
89 | #include <asm/barrier.h> |
90 | |
91 | #include "cpupri.h" |
92 | #include "cpudeadline.h" |
93 | |
94 | /* task_struct::on_rq states: */ |
95 | #define TASK_ON_RQ_QUEUED 1 |
96 | #define TASK_ON_RQ_MIGRATING 2 |
97 | |
98 | extern __read_mostly int scheduler_running; |
99 | |
100 | extern unsigned long calc_load_update; |
101 | extern atomic_long_t calc_load_tasks; |
102 | |
103 | extern void calc_global_load_tick(struct rq *this_rq); |
104 | extern long calc_load_fold_active(struct rq *this_rq, long adjust); |
105 | |
106 | extern void call_trace_sched_update_nr_running(struct rq *rq, int count); |
107 | |
108 | extern int sysctl_sched_rt_period; |
109 | extern int sysctl_sched_rt_runtime; |
110 | extern int sched_rr_timeslice; |
111 | |
112 | /* |
113 | * Asymmetric CPU capacity bits |
114 | */ |
115 | struct asym_cap_data { |
116 | struct list_head link; |
117 | struct rcu_head rcu; |
118 | unsigned long capacity; |
119 | unsigned long cpus[]; |
120 | }; |
121 | |
122 | extern struct list_head asym_cap_list; |
123 | |
124 | #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) |
125 | |
126 | /* |
127 | * Helpers for converting nanosecond timing to jiffy resolution |
128 | */ |
129 | #define NS_TO_JIFFIES(time) ((unsigned long)(time) / (NSEC_PER_SEC/HZ)) |
130 | |
131 | /* |
132 | * Increase resolution of nice-level calculations for 64-bit architectures. |
133 | * The extra resolution improves shares distribution and load balancing of |
134 | * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group |
135 | * hierarchies, especially on larger systems. This is not a user-visible change |
136 | * and does not change the user-interface for setting shares/weights. |
137 | * |
138 | * We increase resolution only if we have enough bits to allow this increased |
139 | * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit |
140 | * are pretty high and the returns do not justify the increased costs. |
141 | * |
142 | * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to |
143 | * increase coverage and consistency always enable it on 64-bit platforms. |
144 | */ |
145 | #ifdef CONFIG_64BIT |
146 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) |
147 | # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) |
148 | # define scale_load_down(w) \ |
149 | ({ \ |
150 | unsigned long __w = (w); \ |
151 | \ |
152 | if (__w) \ |
153 | __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ |
154 | __w; \ |
155 | }) |
156 | #else |
157 | # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) |
158 | # define scale_load(w) (w) |
159 | # define scale_load_down(w) (w) |
160 | #endif |
161 | |
162 | /* |
163 | * Task weight (visible to users) and its load (invisible to users) have |
164 | * independent resolution, but they should be well calibrated. We use |
165 | * scale_load() and scale_load_down(w) to convert between them. The |
166 | * following must be true: |
167 | * |
168 | * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD |
169 | * |
170 | */ |
171 | #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) |
172 | |
173 | /* |
174 | * Single value that decides SCHED_DEADLINE internal math precision. |
175 | * 10 -> just above 1us |
176 | * 9 -> just above 0.5us |
177 | */ |
178 | #define DL_SCALE 10 |
179 | |
180 | /* |
181 | * Single value that denotes runtime == period, ie unlimited time. |
182 | */ |
183 | #define RUNTIME_INF ((u64)~0ULL) |
184 | |
185 | static inline int idle_policy(int policy) |
186 | { |
187 | return policy == SCHED_IDLE; |
188 | } |
189 | |
190 | static inline int normal_policy(int policy) |
191 | { |
192 | #ifdef CONFIG_SCHED_CLASS_EXT |
193 | if (policy == SCHED_EXT) |
194 | return true; |
195 | #endif |
196 | return policy == SCHED_NORMAL; |
197 | } |
198 | |
199 | static inline int fair_policy(int policy) |
200 | { |
201 | return normal_policy(policy) || policy == SCHED_BATCH; |
202 | } |
203 | |
204 | static inline int rt_policy(int policy) |
205 | { |
206 | return policy == SCHED_FIFO || policy == SCHED_RR; |
207 | } |
208 | |
209 | static inline int dl_policy(int policy) |
210 | { |
211 | return policy == SCHED_DEADLINE; |
212 | } |
213 | |
214 | static inline bool valid_policy(int policy) |
215 | { |
216 | return idle_policy(policy) || fair_policy(policy) || |
217 | rt_policy(policy) || dl_policy(policy); |
218 | } |
219 | |
220 | static inline int task_has_idle_policy(struct task_struct *p) |
221 | { |
222 | return idle_policy(policy: p->policy); |
223 | } |
224 | |
225 | static inline int task_has_rt_policy(struct task_struct *p) |
226 | { |
227 | return rt_policy(policy: p->policy); |
228 | } |
229 | |
230 | static inline int task_has_dl_policy(struct task_struct *p) |
231 | { |
232 | return dl_policy(policy: p->policy); |
233 | } |
234 | |
235 | #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) |
236 | |
237 | static inline void update_avg(u64 *avg, u64 sample) |
238 | { |
239 | s64 diff = sample - *avg; |
240 | |
241 | *avg += diff / 8; |
242 | } |
243 | |
244 | /* |
245 | * Shifting a value by an exponent greater *or equal* to the size of said value |
246 | * is UB; cap at size-1. |
247 | */ |
248 | #define shr_bound(val, shift) \ |
249 | (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) |
250 | |
251 | /* |
252 | * cgroup weight knobs should use the common MIN, DFL and MAX values which are |
253 | * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it |
254 | * maps pretty well onto the shares value used by scheduler and the round-trip |
255 | * conversions preserve the original value over the entire range. |
256 | */ |
257 | static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight) |
258 | { |
259 | return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL); |
260 | } |
261 | |
262 | static inline unsigned long sched_weight_to_cgroup(unsigned long weight) |
263 | { |
264 | return clamp_t(unsigned long, |
265 | DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024), |
266 | CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); |
267 | } |
268 | |
269 | /* |
270 | * !! For sched_setattr_nocheck() (kernel) only !! |
271 | * |
272 | * This is actually gross. :( |
273 | * |
274 | * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE |
275 | * tasks, but still be able to sleep. We need this on platforms that cannot |
276 | * atomically change clock frequency. Remove once fast switching will be |
277 | * available on such platforms. |
278 | * |
279 | * SUGOV stands for SchedUtil GOVernor. |
280 | */ |
281 | #define SCHED_FLAG_SUGOV 0x10000000 |
282 | |
283 | #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) |
284 | |
285 | static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) |
286 | { |
287 | #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL |
288 | return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); |
289 | #else |
290 | return false; |
291 | #endif |
292 | } |
293 | |
294 | /* |
295 | * Tells if entity @a should preempt entity @b. |
296 | */ |
297 | static inline bool dl_entity_preempt(const struct sched_dl_entity *a, |
298 | const struct sched_dl_entity *b) |
299 | { |
300 | return dl_entity_is_special(dl_se: a) || |
301 | dl_time_before(a: a->deadline, b: b->deadline); |
302 | } |
303 | |
304 | /* |
305 | * This is the priority-queue data structure of the RT scheduling class: |
306 | */ |
307 | struct rt_prio_array { |
308 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
309 | struct list_head queue[MAX_RT_PRIO]; |
310 | }; |
311 | |
312 | struct rt_bandwidth { |
313 | /* nests inside the rq lock: */ |
314 | raw_spinlock_t rt_runtime_lock; |
315 | ktime_t rt_period; |
316 | u64 rt_runtime; |
317 | struct hrtimer rt_period_timer; |
318 | unsigned int rt_period_active; |
319 | }; |
320 | |
321 | static inline int dl_bandwidth_enabled(void) |
322 | { |
323 | return sysctl_sched_rt_runtime >= 0; |
324 | } |
325 | |
326 | /* |
327 | * To keep the bandwidth of -deadline tasks under control |
328 | * we need some place where: |
329 | * - store the maximum -deadline bandwidth of each cpu; |
330 | * - cache the fraction of bandwidth that is currently allocated in |
331 | * each root domain; |
332 | * |
333 | * This is all done in the data structure below. It is similar to the |
334 | * one used for RT-throttling (rt_bandwidth), with the main difference |
335 | * that, since here we are only interested in admission control, we |
336 | * do not decrease any runtime while the group "executes", neither we |
337 | * need a timer to replenish it. |
338 | * |
339 | * With respect to SMP, bandwidth is given on a per root domain basis, |
340 | * meaning that: |
341 | * - bw (< 100%) is the deadline bandwidth of each CPU; |
342 | * - total_bw is the currently allocated bandwidth in each root domain; |
343 | */ |
344 | struct dl_bw { |
345 | raw_spinlock_t lock; |
346 | u64 bw; |
347 | u64 total_bw; |
348 | }; |
349 | |
350 | extern void init_dl_bw(struct dl_bw *dl_b); |
351 | extern int sched_dl_global_validate(void); |
352 | extern void sched_dl_do_global(void); |
353 | extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); |
354 | extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); |
355 | extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); |
356 | extern bool __checkparam_dl(const struct sched_attr *attr); |
357 | extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); |
358 | extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
359 | extern int dl_bw_deactivate(int cpu); |
360 | extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec); |
361 | /* |
362 | * SCHED_DEADLINE supports servers (nested scheduling) with the following |
363 | * interface: |
364 | * |
365 | * dl_se::rq -- runqueue we belong to. |
366 | * |
367 | * dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the |
368 | * server when it runs out of tasks to run. |
369 | * |
370 | * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this |
371 | * returns NULL. |
372 | * |
373 | * dl_server_update() -- called from update_curr_common(), propagates runtime |
374 | * to the server. |
375 | * |
376 | * dl_server_start() |
377 | * dl_server_stop() -- start/stop the server when it has (no) tasks. |
378 | * |
379 | * dl_server_init() -- initializes the server. |
380 | */ |
381 | extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec); |
382 | extern void dl_server_start(struct sched_dl_entity *dl_se); |
383 | extern void dl_server_stop(struct sched_dl_entity *dl_se); |
384 | extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, |
385 | dl_server_has_tasks_f has_tasks, |
386 | dl_server_pick_f pick_task); |
387 | |
388 | extern void dl_server_update_idle_time(struct rq *rq, |
389 | struct task_struct *p); |
390 | extern void fair_server_init(struct rq *rq); |
391 | extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq); |
392 | extern int dl_server_apply_params(struct sched_dl_entity *dl_se, |
393 | u64 runtime, u64 period, bool init); |
394 | |
395 | static inline bool dl_server_active(struct sched_dl_entity *dl_se) |
396 | { |
397 | return dl_se->dl_server_active; |
398 | } |
399 | |
400 | #ifdef CONFIG_CGROUP_SCHED |
401 | |
402 | extern struct list_head task_groups; |
403 | |
404 | struct cfs_bandwidth { |
405 | #ifdef CONFIG_CFS_BANDWIDTH |
406 | raw_spinlock_t lock; |
407 | ktime_t period; |
408 | u64 quota; |
409 | u64 runtime; |
410 | u64 burst; |
411 | u64 runtime_snap; |
412 | s64 hierarchical_quota; |
413 | |
414 | u8 idle; |
415 | u8 period_active; |
416 | u8 slack_started; |
417 | struct hrtimer period_timer; |
418 | struct hrtimer slack_timer; |
419 | struct list_head throttled_cfs_rq; |
420 | |
421 | /* Statistics: */ |
422 | int nr_periods; |
423 | int nr_throttled; |
424 | int nr_burst; |
425 | u64 throttled_time; |
426 | u64 burst_time; |
427 | #endif |
428 | }; |
429 | |
430 | /* Task group related information */ |
431 | struct task_group { |
432 | struct cgroup_subsys_state css; |
433 | |
434 | #ifdef CONFIG_GROUP_SCHED_WEIGHT |
435 | /* A positive value indicates that this is a SCHED_IDLE group. */ |
436 | int idle; |
437 | #endif |
438 | |
439 | #ifdef CONFIG_FAIR_GROUP_SCHED |
440 | /* schedulable entities of this group on each CPU */ |
441 | struct sched_entity **se; |
442 | /* runqueue "owned" by this group on each CPU */ |
443 | struct cfs_rq **cfs_rq; |
444 | unsigned long shares; |
445 | #ifdef CONFIG_SMP |
446 | /* |
447 | * load_avg can be heavily contended at clock tick time, so put |
448 | * it in its own cache-line separated from the fields above which |
449 | * will also be accessed at each tick. |
450 | */ |
451 | atomic_long_t load_avg ____cacheline_aligned; |
452 | #endif |
453 | #endif |
454 | |
455 | #ifdef CONFIG_RT_GROUP_SCHED |
456 | struct sched_rt_entity **rt_se; |
457 | struct rt_rq **rt_rq; |
458 | |
459 | struct rt_bandwidth rt_bandwidth; |
460 | #endif |
461 | |
462 | #ifdef CONFIG_EXT_GROUP_SCHED |
463 | u32 scx_flags; /* SCX_TG_* */ |
464 | u32 scx_weight; |
465 | #endif |
466 | |
467 | struct rcu_head rcu; |
468 | struct list_head list; |
469 | |
470 | struct task_group *parent; |
471 | struct list_head siblings; |
472 | struct list_head children; |
473 | |
474 | #ifdef CONFIG_SCHED_AUTOGROUP |
475 | struct autogroup *autogroup; |
476 | #endif |
477 | |
478 | struct cfs_bandwidth cfs_bandwidth; |
479 | |
480 | #ifdef CONFIG_UCLAMP_TASK_GROUP |
481 | /* The two decimal precision [%] value requested from user-space */ |
482 | unsigned int uclamp_pct[UCLAMP_CNT]; |
483 | /* Clamp values requested for a task group */ |
484 | struct uclamp_se uclamp_req[UCLAMP_CNT]; |
485 | /* Effective clamp values used for a task group */ |
486 | struct uclamp_se uclamp[UCLAMP_CNT]; |
487 | #endif |
488 | |
489 | }; |
490 | |
491 | #ifdef CONFIG_GROUP_SCHED_WEIGHT |
492 | #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
493 | |
494 | /* |
495 | * A weight of 0 or 1 can cause arithmetics problems. |
496 | * A weight of a cfs_rq is the sum of weights of which entities |
497 | * are queued on this cfs_rq, so a weight of a entity should not be |
498 | * too large, so as the shares value of a task group. |
499 | * (The default weight is 1024 - so there's no practical |
500 | * limitation from this.) |
501 | */ |
502 | #define MIN_SHARES (1UL << 1) |
503 | #define MAX_SHARES (1UL << 18) |
504 | #endif |
505 | |
506 | typedef int (*tg_visitor)(struct task_group *, void *); |
507 | |
508 | extern int walk_tg_tree_from(struct task_group *from, |
509 | tg_visitor down, tg_visitor up, void *data); |
510 | |
511 | /* |
512 | * Iterate the full tree, calling @down when first entering a node and @up when |
513 | * leaving it for the final time. |
514 | * |
515 | * Caller must hold rcu_lock or sufficient equivalent. |
516 | */ |
517 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
518 | { |
519 | return walk_tg_tree_from(from: &root_task_group, down, up, data); |
520 | } |
521 | |
522 | static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |
523 | { |
524 | return css ? container_of(css, struct task_group, css) : NULL; |
525 | } |
526 | |
527 | extern int tg_nop(struct task_group *tg, void *data); |
528 | |
529 | #ifdef CONFIG_FAIR_GROUP_SCHED |
530 | extern void free_fair_sched_group(struct task_group *tg); |
531 | extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
532 | extern void online_fair_sched_group(struct task_group *tg); |
533 | extern void unregister_fair_sched_group(struct task_group *tg); |
534 | #else |
535 | static inline void free_fair_sched_group(struct task_group *tg) { } |
536 | static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
537 | { |
538 | return 1; |
539 | } |
540 | static inline void online_fair_sched_group(struct task_group *tg) { } |
541 | static inline void unregister_fair_sched_group(struct task_group *tg) { } |
542 | #endif |
543 | |
544 | extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
545 | struct sched_entity *se, int cpu, |
546 | struct sched_entity *parent); |
547 | extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); |
548 | |
549 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
550 | extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
551 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
552 | extern bool cfs_task_bw_constrained(struct task_struct *p); |
553 | |
554 | extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
555 | struct sched_rt_entity *rt_se, int cpu, |
556 | struct sched_rt_entity *parent); |
557 | extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); |
558 | extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); |
559 | extern long sched_group_rt_runtime(struct task_group *tg); |
560 | extern long sched_group_rt_period(struct task_group *tg); |
561 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); |
562 | |
563 | extern struct task_group *sched_create_group(struct task_group *parent); |
564 | extern void sched_online_group(struct task_group *tg, |
565 | struct task_group *parent); |
566 | extern void sched_destroy_group(struct task_group *tg); |
567 | extern void sched_release_group(struct task_group *tg); |
568 | |
569 | extern void sched_move_task(struct task_struct *tsk, bool for_autogroup); |
570 | |
571 | #ifdef CONFIG_FAIR_GROUP_SCHED |
572 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
573 | |
574 | extern int sched_group_set_idle(struct task_group *tg, long idle); |
575 | |
576 | #ifdef CONFIG_SMP |
577 | extern void set_task_rq_fair(struct sched_entity *se, |
578 | struct cfs_rq *prev, struct cfs_rq *next); |
579 | #else /* !CONFIG_SMP */ |
580 | static inline void set_task_rq_fair(struct sched_entity *se, |
581 | struct cfs_rq *prev, struct cfs_rq *next) { } |
582 | #endif /* CONFIG_SMP */ |
583 | #else /* !CONFIG_FAIR_GROUP_SCHED */ |
584 | static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; } |
585 | static inline int sched_group_set_idle(struct task_group *tg, long idle) { return 0; } |
586 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
587 | |
588 | #else /* CONFIG_CGROUP_SCHED */ |
589 | |
590 | struct cfs_bandwidth { }; |
591 | |
592 | static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } |
593 | |
594 | #endif /* CONFIG_CGROUP_SCHED */ |
595 | |
596 | extern void unregister_rt_sched_group(struct task_group *tg); |
597 | extern void free_rt_sched_group(struct task_group *tg); |
598 | extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
599 | |
600 | /* |
601 | * u64_u32_load/u64_u32_store |
602 | * |
603 | * Use a copy of a u64 value to protect against data race. This is only |
604 | * applicable for 32-bits architectures. |
605 | */ |
606 | #ifdef CONFIG_64BIT |
607 | # define u64_u32_load_copy(var, copy) var |
608 | # define u64_u32_store_copy(var, copy, val) (var = val) |
609 | #else |
610 | # define u64_u32_load_copy(var, copy) \ |
611 | ({ \ |
612 | u64 __val, __val_copy; \ |
613 | do { \ |
614 | __val_copy = copy; \ |
615 | /* \ |
616 | * paired with u64_u32_store_copy(), ordering access \ |
617 | * to var and copy. \ |
618 | */ \ |
619 | smp_rmb(); \ |
620 | __val = var; \ |
621 | } while (__val != __val_copy); \ |
622 | __val; \ |
623 | }) |
624 | # define u64_u32_store_copy(var, copy, val) \ |
625 | do { \ |
626 | typeof(val) __val = (val); \ |
627 | var = __val; \ |
628 | /* \ |
629 | * paired with u64_u32_load_copy(), ordering access to var and \ |
630 | * copy. \ |
631 | */ \ |
632 | smp_wmb(); \ |
633 | copy = __val; \ |
634 | } while (0) |
635 | #endif |
636 | # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) |
637 | # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) |
638 | |
639 | struct balance_callback { |
640 | struct balance_callback *next; |
641 | void (*func)(struct rq *rq); |
642 | }; |
643 | |
644 | /* CFS-related fields in a runqueue */ |
645 | struct cfs_rq { |
646 | struct load_weight load; |
647 | unsigned int nr_queued; |
648 | unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */ |
649 | unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */ |
650 | unsigned int h_nr_idle; /* SCHED_IDLE */ |
651 | |
652 | s64 avg_vruntime; |
653 | u64 avg_load; |
654 | |
655 | u64 min_vruntime; |
656 | #ifdef CONFIG_SCHED_CORE |
657 | unsigned int forceidle_seq; |
658 | u64 min_vruntime_fi; |
659 | #endif |
660 | |
661 | struct rb_root_cached tasks_timeline; |
662 | |
663 | /* |
664 | * 'curr' points to currently running entity on this cfs_rq. |
665 | * It is set to NULL otherwise (i.e when none are currently running). |
666 | */ |
667 | struct sched_entity *curr; |
668 | struct sched_entity *next; |
669 | |
670 | #ifdef CONFIG_SMP |
671 | /* |
672 | * CFS load tracking |
673 | */ |
674 | struct sched_avg avg; |
675 | #ifndef CONFIG_64BIT |
676 | u64 last_update_time_copy; |
677 | #endif |
678 | struct { |
679 | raw_spinlock_t lock ____cacheline_aligned; |
680 | int nr; |
681 | unsigned long load_avg; |
682 | unsigned long util_avg; |
683 | unsigned long runnable_avg; |
684 | } removed; |
685 | |
686 | #ifdef CONFIG_FAIR_GROUP_SCHED |
687 | u64 last_update_tg_load_avg; |
688 | unsigned long tg_load_avg_contrib; |
689 | long propagate; |
690 | long prop_runnable_sum; |
691 | |
692 | /* |
693 | * h_load = weight * f(tg) |
694 | * |
695 | * Where f(tg) is the recursive weight fraction assigned to |
696 | * this group. |
697 | */ |
698 | unsigned long h_load; |
699 | u64 last_h_load_update; |
700 | struct sched_entity *h_load_next; |
701 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
702 | #endif /* CONFIG_SMP */ |
703 | |
704 | #ifdef CONFIG_FAIR_GROUP_SCHED |
705 | struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ |
706 | |
707 | /* |
708 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
709 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
710 | * (like users, containers etc.) |
711 | * |
712 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. |
713 | * This list is used during load balance. |
714 | */ |
715 | int on_list; |
716 | struct list_head leaf_cfs_rq_list; |
717 | struct task_group *tg; /* group that "owns" this runqueue */ |
718 | |
719 | /* Locally cached copy of our task_group's idle value */ |
720 | int idle; |
721 | |
722 | #ifdef CONFIG_CFS_BANDWIDTH |
723 | int runtime_enabled; |
724 | s64 runtime_remaining; |
725 | |
726 | u64 throttled_pelt_idle; |
727 | #ifndef CONFIG_64BIT |
728 | u64 throttled_pelt_idle_copy; |
729 | #endif |
730 | u64 throttled_clock; |
731 | u64 throttled_clock_pelt; |
732 | u64 throttled_clock_pelt_time; |
733 | u64 throttled_clock_self; |
734 | u64 throttled_clock_self_time; |
735 | int throttled; |
736 | int throttle_count; |
737 | struct list_head throttled_list; |
738 | struct list_head throttled_csd_list; |
739 | #endif /* CONFIG_CFS_BANDWIDTH */ |
740 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
741 | }; |
742 | |
743 | #ifdef CONFIG_SCHED_CLASS_EXT |
744 | /* scx_rq->flags, protected by the rq lock */ |
745 | enum scx_rq_flags { |
746 | /* |
747 | * A hotplugged CPU starts scheduling before rq_online_scx(). Track |
748 | * ops.cpu_on/offline() state so that ops.enqueue/dispatch() are called |
749 | * only while the BPF scheduler considers the CPU to be online. |
750 | */ |
751 | SCX_RQ_ONLINE = 1 << 0, |
752 | SCX_RQ_CAN_STOP_TICK = 1 << 1, |
753 | SCX_RQ_BAL_PENDING = 1 << 2, /* balance hasn't run yet */ |
754 | SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ |
755 | SCX_RQ_BYPASSING = 1 << 4, |
756 | SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */ |
757 | |
758 | SCX_RQ_IN_WAKEUP = 1 << 16, |
759 | SCX_RQ_IN_BALANCE = 1 << 17, |
760 | }; |
761 | |
762 | struct scx_rq { |
763 | struct scx_dispatch_q local_dsq; |
764 | struct list_head runnable_list; /* runnable tasks on this rq */ |
765 | struct list_head ddsp_deferred_locals; /* deferred ddsps from enq */ |
766 | unsigned long ops_qseq; |
767 | u64 extra_enq_flags; /* see move_task_to_local_dsq() */ |
768 | u32 nr_running; |
769 | u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */ |
770 | bool cpu_released; |
771 | u32 flags; |
772 | u64 clock; /* current per-rq clock -- see scx_bpf_now() */ |
773 | cpumask_var_t cpus_to_kick; |
774 | cpumask_var_t cpus_to_kick_if_idle; |
775 | cpumask_var_t cpus_to_preempt; |
776 | cpumask_var_t cpus_to_wait; |
777 | unsigned long pnt_seq; |
778 | struct balance_callback deferred_bal_cb; |
779 | struct irq_work deferred_irq_work; |
780 | struct irq_work kick_cpus_irq_work; |
781 | }; |
782 | #endif /* CONFIG_SCHED_CLASS_EXT */ |
783 | |
784 | static inline int rt_bandwidth_enabled(void) |
785 | { |
786 | return sysctl_sched_rt_runtime >= 0; |
787 | } |
788 | |
789 | /* RT IPI pull logic requires IRQ_WORK */ |
790 | #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) |
791 | # define HAVE_RT_PUSH_IPI |
792 | #endif |
793 | |
794 | /* Real-Time classes' related field in a runqueue: */ |
795 | struct rt_rq { |
796 | struct rt_prio_array active; |
797 | unsigned int rt_nr_running; |
798 | unsigned int rr_nr_running; |
799 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
800 | struct { |
801 | int curr; /* highest queued rt task prio */ |
802 | #ifdef CONFIG_SMP |
803 | int next; /* next highest */ |
804 | #endif |
805 | } highest_prio; |
806 | #endif |
807 | #ifdef CONFIG_SMP |
808 | bool overloaded; |
809 | struct plist_head pushable_tasks; |
810 | |
811 | #endif /* CONFIG_SMP */ |
812 | int rt_queued; |
813 | |
814 | #ifdef CONFIG_RT_GROUP_SCHED |
815 | int rt_throttled; |
816 | u64 rt_time; /* consumed RT time, goes up in update_curr_rt */ |
817 | u64 rt_runtime; /* allotted RT time, "slice" from rt_bandwidth, RT sharing/balancing */ |
818 | /* Nests inside the rq lock: */ |
819 | raw_spinlock_t rt_runtime_lock; |
820 | |
821 | unsigned int rt_nr_boosted; |
822 | |
823 | struct rq *rq; /* this is always top-level rq, cache? */ |
824 | #endif |
825 | #ifdef CONFIG_CGROUP_SCHED |
826 | struct task_group *tg; /* this tg has "this" rt_rq on given CPU for runnable entities */ |
827 | #endif |
828 | }; |
829 | |
830 | static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) |
831 | { |
832 | return rt_rq->rt_queued && rt_rq->rt_nr_running; |
833 | } |
834 | |
835 | /* Deadline class' related fields in a runqueue */ |
836 | struct dl_rq { |
837 | /* runqueue is an rbtree, ordered by deadline */ |
838 | struct rb_root_cached root; |
839 | |
840 | unsigned int dl_nr_running; |
841 | |
842 | #ifdef CONFIG_SMP |
843 | /* |
844 | * Deadline values of the currently executing and the |
845 | * earliest ready task on this rq. Caching these facilitates |
846 | * the decision whether or not a ready but not running task |
847 | * should migrate somewhere else. |
848 | */ |
849 | struct { |
850 | u64 curr; |
851 | u64 next; |
852 | } earliest_dl; |
853 | |
854 | bool overloaded; |
855 | |
856 | /* |
857 | * Tasks on this rq that can be pushed away. They are kept in |
858 | * an rb-tree, ordered by tasks' deadlines, with caching |
859 | * of the leftmost (earliest deadline) element. |
860 | */ |
861 | struct rb_root_cached pushable_dl_tasks_root; |
862 | #else |
863 | struct dl_bw dl_bw; |
864 | #endif |
865 | /* |
866 | * "Active utilization" for this runqueue: increased when a |
867 | * task wakes up (becomes TASK_RUNNING) and decreased when a |
868 | * task blocks |
869 | */ |
870 | u64 running_bw; |
871 | |
872 | /* |
873 | * Utilization of the tasks "assigned" to this runqueue (including |
874 | * the tasks that are in runqueue and the tasks that executed on this |
875 | * CPU and blocked). Increased when a task moves to this runqueue, and |
876 | * decreased when the task moves away (migrates, changes scheduling |
877 | * policy, or terminates). |
878 | * This is needed to compute the "inactive utilization" for the |
879 | * runqueue (inactive utilization = this_bw - running_bw). |
880 | */ |
881 | u64 this_bw; |
882 | u64 extra_bw; |
883 | |
884 | /* |
885 | * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM |
886 | * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). |
887 | */ |
888 | u64 max_bw; |
889 | |
890 | /* |
891 | * Inverse of the fraction of CPU utilization that can be reclaimed |
892 | * by the GRUB algorithm. |
893 | */ |
894 | u64 bw_ratio; |
895 | }; |
896 | |
897 | #ifdef CONFIG_FAIR_GROUP_SCHED |
898 | |
899 | /* An entity is a task if it doesn't "own" a runqueue */ |
900 | #define entity_is_task(se) (!se->my_q) |
901 | |
902 | static inline void se_update_runnable(struct sched_entity *se) |
903 | { |
904 | if (!entity_is_task(se)) |
905 | se->runnable_weight = se->my_q->h_nr_runnable; |
906 | } |
907 | |
908 | static inline long se_runnable(struct sched_entity *se) |
909 | { |
910 | if (se->sched_delayed) |
911 | return false; |
912 | |
913 | if (entity_is_task(se)) |
914 | return !!se->on_rq; |
915 | else |
916 | return se->runnable_weight; |
917 | } |
918 | |
919 | #else /* !CONFIG_FAIR_GROUP_SCHED: */ |
920 | |
921 | #define entity_is_task(se) 1 |
922 | |
923 | static inline void se_update_runnable(struct sched_entity *se) { } |
924 | |
925 | static inline long se_runnable(struct sched_entity *se) |
926 | { |
927 | if (se->sched_delayed) |
928 | return false; |
929 | |
930 | return !!se->on_rq; |
931 | } |
932 | |
933 | #endif /* !CONFIG_FAIR_GROUP_SCHED */ |
934 | |
935 | #ifdef CONFIG_SMP |
936 | /* |
937 | * XXX we want to get rid of these helpers and use the full load resolution. |
938 | */ |
939 | static inline long se_weight(struct sched_entity *se) |
940 | { |
941 | return scale_load_down(se->load.weight); |
942 | } |
943 | |
944 | |
945 | static inline bool sched_asym_prefer(int a, int b) |
946 | { |
947 | return arch_asym_cpu_priority(cpu: a) > arch_asym_cpu_priority(cpu: b); |
948 | } |
949 | |
950 | struct perf_domain { |
951 | struct em_perf_domain *em_pd; |
952 | struct perf_domain *next; |
953 | struct rcu_head rcu; |
954 | }; |
955 | |
956 | /* |
957 | * We add the notion of a root-domain which will be used to define per-domain |
958 | * variables. Each exclusive cpuset essentially defines an island domain by |
959 | * fully partitioning the member CPUs from any other cpuset. Whenever a new |
960 | * exclusive cpuset is created, we also create and attach a new root-domain |
961 | * object. |
962 | * |
963 | */ |
964 | struct root_domain { |
965 | atomic_t refcount; |
966 | atomic_t rto_count; |
967 | struct rcu_head rcu; |
968 | cpumask_var_t span; |
969 | cpumask_var_t online; |
970 | |
971 | /* |
972 | * Indicate pullable load on at least one CPU, e.g: |
973 | * - More than one runnable task |
974 | * - Running task is misfit |
975 | */ |
976 | bool overloaded; |
977 | |
978 | /* Indicate one or more CPUs over-utilized (tipping point) */ |
979 | bool overutilized; |
980 | |
981 | /* |
982 | * The bit corresponding to a CPU gets set here if such CPU has more |
983 | * than one runnable -deadline task (as it is below for RT tasks). |
984 | */ |
985 | cpumask_var_t dlo_mask; |
986 | atomic_t dlo_count; |
987 | struct dl_bw dl_bw; |
988 | struct cpudl cpudl; |
989 | |
990 | /* |
991 | * Indicate whether a root_domain's dl_bw has been checked or |
992 | * updated. It's monotonously increasing value. |
993 | * |
994 | * Also, some corner cases, like 'wrap around' is dangerous, but given |
995 | * that u64 is 'big enough'. So that shouldn't be a concern. |
996 | */ |
997 | u64 visit_cookie; |
998 | |
999 | #ifdef HAVE_RT_PUSH_IPI |
1000 | /* |
1001 | * For IPI pull requests, loop across the rto_mask. |
1002 | */ |
1003 | struct irq_work rto_push_work; |
1004 | raw_spinlock_t rto_lock; |
1005 | /* These are only updated and read within rto_lock */ |
1006 | int rto_loop; |
1007 | int rto_cpu; |
1008 | /* These atomics are updated outside of a lock */ |
1009 | atomic_t rto_loop_next; |
1010 | atomic_t rto_loop_start; |
1011 | #endif |
1012 | /* |
1013 | * The "RT overload" flag: it gets set if a CPU has more than |
1014 | * one runnable RT task. |
1015 | */ |
1016 | cpumask_var_t rto_mask; |
1017 | struct cpupri cpupri; |
1018 | |
1019 | /* |
1020 | * NULL-terminated list of performance domains intersecting with the |
1021 | * CPUs of the rd. Protected by RCU. |
1022 | */ |
1023 | struct perf_domain __rcu *pd; |
1024 | }; |
1025 | |
1026 | extern void init_defrootdomain(void); |
1027 | extern int sched_init_domains(const struct cpumask *cpu_map); |
1028 | extern void rq_attach_root(struct rq *rq, struct root_domain *rd); |
1029 | extern void sched_get_rd(struct root_domain *rd); |
1030 | extern void sched_put_rd(struct root_domain *rd); |
1031 | |
1032 | static inline int get_rd_overloaded(struct root_domain *rd) |
1033 | { |
1034 | return READ_ONCE(rd->overloaded); |
1035 | } |
1036 | |
1037 | static inline void set_rd_overloaded(struct root_domain *rd, int status) |
1038 | { |
1039 | if (get_rd_overloaded(rd) != status) |
1040 | WRITE_ONCE(rd->overloaded, status); |
1041 | } |
1042 | |
1043 | #ifdef HAVE_RT_PUSH_IPI |
1044 | extern void rto_push_irq_work_func(struct irq_work *work); |
1045 | #endif |
1046 | #endif /* CONFIG_SMP */ |
1047 | |
1048 | #ifdef CONFIG_UCLAMP_TASK |
1049 | /* |
1050 | * struct uclamp_bucket - Utilization clamp bucket |
1051 | * @value: utilization clamp value for tasks on this clamp bucket |
1052 | * @tasks: number of RUNNABLE tasks on this clamp bucket |
1053 | * |
1054 | * Keep track of how many tasks are RUNNABLE for a given utilization |
1055 | * clamp value. |
1056 | */ |
1057 | struct uclamp_bucket { |
1058 | unsigned long value : bits_per(SCHED_CAPACITY_SCALE); |
1059 | unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); |
1060 | }; |
1061 | |
1062 | /* |
1063 | * struct uclamp_rq - rq's utilization clamp |
1064 | * @value: currently active clamp values for a rq |
1065 | * @bucket: utilization clamp buckets affecting a rq |
1066 | * |
1067 | * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. |
1068 | * A clamp value is affecting a rq when there is at least one task RUNNABLE |
1069 | * (or actually running) with that value. |
1070 | * |
1071 | * There are up to UCLAMP_CNT possible different clamp values, currently there |
1072 | * are only two: minimum utilization and maximum utilization. |
1073 | * |
1074 | * All utilization clamping values are MAX aggregated, since: |
1075 | * - for util_min: we want to run the CPU at least at the max of the minimum |
1076 | * utilization required by its currently RUNNABLE tasks. |
1077 | * - for util_max: we want to allow the CPU to run up to the max of the |
1078 | * maximum utilization allowed by its currently RUNNABLE tasks. |
1079 | * |
1080 | * Since on each system we expect only a limited number of different |
1081 | * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track |
1082 | * the metrics required to compute all the per-rq utilization clamp values. |
1083 | */ |
1084 | struct uclamp_rq { |
1085 | unsigned int value; |
1086 | struct uclamp_bucket bucket[UCLAMP_BUCKETS]; |
1087 | }; |
1088 | |
1089 | DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); |
1090 | #endif /* CONFIG_UCLAMP_TASK */ |
1091 | |
1092 | /* |
1093 | * This is the main, per-CPU runqueue data structure. |
1094 | * |
1095 | * Locking rule: those places that want to lock multiple runqueues |
1096 | * (such as the load balancing or the thread migration code), lock |
1097 | * acquire operations must be ordered by ascending &runqueue. |
1098 | */ |
1099 | struct rq { |
1100 | /* runqueue lock: */ |
1101 | raw_spinlock_t __lock; |
1102 | |
1103 | unsigned int nr_running; |
1104 | #ifdef CONFIG_NUMA_BALANCING |
1105 | unsigned int nr_numa_running; |
1106 | unsigned int nr_preferred_running; |
1107 | unsigned int numa_migrate_on; |
1108 | #endif |
1109 | #ifdef CONFIG_NO_HZ_COMMON |
1110 | #ifdef CONFIG_SMP |
1111 | unsigned long last_blocked_load_update_tick; |
1112 | unsigned int has_blocked_load; |
1113 | call_single_data_t nohz_csd; |
1114 | #endif /* CONFIG_SMP */ |
1115 | unsigned int nohz_tick_stopped; |
1116 | atomic_t nohz_flags; |
1117 | #endif /* CONFIG_NO_HZ_COMMON */ |
1118 | |
1119 | #ifdef CONFIG_SMP |
1120 | unsigned int ttwu_pending; |
1121 | #endif |
1122 | u64 nr_switches; |
1123 | |
1124 | #ifdef CONFIG_UCLAMP_TASK |
1125 | /* Utilization clamp values based on CPU's RUNNABLE tasks */ |
1126 | struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; |
1127 | unsigned int uclamp_flags; |
1128 | #define UCLAMP_FLAG_IDLE 0x01 |
1129 | #endif |
1130 | |
1131 | struct cfs_rq cfs; |
1132 | struct rt_rq rt; |
1133 | struct dl_rq dl; |
1134 | #ifdef CONFIG_SCHED_CLASS_EXT |
1135 | struct scx_rq scx; |
1136 | #endif |
1137 | |
1138 | struct sched_dl_entity fair_server; |
1139 | |
1140 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1141 | /* list of leaf cfs_rq on this CPU: */ |
1142 | struct list_head leaf_cfs_rq_list; |
1143 | struct list_head *tmp_alone_branch; |
1144 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
1145 | |
1146 | /* |
1147 | * This is part of a global counter where only the total sum |
1148 | * over all CPUs matters. A task can increase this counter on |
1149 | * one CPU and if it got migrated afterwards it may decrease |
1150 | * it on another CPU. Always updated under the runqueue lock: |
1151 | */ |
1152 | unsigned int nr_uninterruptible; |
1153 | |
1154 | union { |
1155 | struct task_struct __rcu *donor; /* Scheduler context */ |
1156 | struct task_struct __rcu *curr; /* Execution context */ |
1157 | }; |
1158 | struct sched_dl_entity *dl_server; |
1159 | struct task_struct *idle; |
1160 | struct task_struct *stop; |
1161 | unsigned long next_balance; |
1162 | struct mm_struct *prev_mm; |
1163 | |
1164 | unsigned int clock_update_flags; |
1165 | u64 clock; |
1166 | /* Ensure that all clocks are in the same cache line */ |
1167 | u64 clock_task ____cacheline_aligned; |
1168 | u64 clock_pelt; |
1169 | unsigned long lost_idle_time; |
1170 | u64 clock_pelt_idle; |
1171 | u64 clock_idle; |
1172 | #ifndef CONFIG_64BIT |
1173 | u64 clock_pelt_idle_copy; |
1174 | u64 clock_idle_copy; |
1175 | #endif |
1176 | |
1177 | atomic_t nr_iowait; |
1178 | |
1179 | u64 last_seen_need_resched_ns; |
1180 | int ticks_without_resched; |
1181 | |
1182 | #ifdef CONFIG_MEMBARRIER |
1183 | int membarrier_state; |
1184 | #endif |
1185 | |
1186 | #ifdef CONFIG_SMP |
1187 | struct root_domain *rd; |
1188 | struct sched_domain __rcu *sd; |
1189 | |
1190 | unsigned long cpu_capacity; |
1191 | |
1192 | struct balance_callback *balance_callback; |
1193 | |
1194 | unsigned char nohz_idle_balance; |
1195 | unsigned char idle_balance; |
1196 | |
1197 | unsigned long misfit_task_load; |
1198 | |
1199 | /* For active balancing */ |
1200 | int active_balance; |
1201 | int push_cpu; |
1202 | struct cpu_stop_work active_balance_work; |
1203 | |
1204 | /* CPU of this runqueue: */ |
1205 | int cpu; |
1206 | int online; |
1207 | |
1208 | struct list_head cfs_tasks; |
1209 | |
1210 | struct sched_avg avg_rt; |
1211 | struct sched_avg avg_dl; |
1212 | #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
1213 | struct sched_avg avg_irq; |
1214 | #endif |
1215 | #ifdef CONFIG_SCHED_HW_PRESSURE |
1216 | struct sched_avg avg_hw; |
1217 | #endif |
1218 | u64 idle_stamp; |
1219 | u64 avg_idle; |
1220 | |
1221 | /* This is used to determine avg_idle's max value */ |
1222 | u64 max_idle_balance_cost; |
1223 | |
1224 | #ifdef CONFIG_HOTPLUG_CPU |
1225 | struct rcuwait hotplug_wait; |
1226 | #endif |
1227 | #endif /* CONFIG_SMP */ |
1228 | |
1229 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
1230 | u64 prev_irq_time; |
1231 | u64 psi_irq_time; |
1232 | #endif |
1233 | #ifdef CONFIG_PARAVIRT |
1234 | u64 prev_steal_time; |
1235 | #endif |
1236 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
1237 | u64 prev_steal_time_rq; |
1238 | #endif |
1239 | |
1240 | /* calc_load related fields */ |
1241 | unsigned long calc_load_update; |
1242 | long calc_load_active; |
1243 | |
1244 | #ifdef CONFIG_SCHED_HRTICK |
1245 | #ifdef CONFIG_SMP |
1246 | call_single_data_t hrtick_csd; |
1247 | #endif |
1248 | struct hrtimer hrtick_timer; |
1249 | ktime_t hrtick_time; |
1250 | #endif |
1251 | |
1252 | #ifdef CONFIG_SCHEDSTATS |
1253 | /* latency stats */ |
1254 | struct sched_info rq_sched_info; |
1255 | unsigned long long rq_cpu_time; |
1256 | |
1257 | /* sys_sched_yield() stats */ |
1258 | unsigned int yld_count; |
1259 | |
1260 | /* schedule() stats */ |
1261 | unsigned int sched_count; |
1262 | unsigned int sched_goidle; |
1263 | |
1264 | /* try_to_wake_up() stats */ |
1265 | unsigned int ttwu_count; |
1266 | unsigned int ttwu_local; |
1267 | #endif |
1268 | |
1269 | #ifdef CONFIG_CPU_IDLE |
1270 | /* Must be inspected within a RCU lock section */ |
1271 | struct cpuidle_state *idle_state; |
1272 | #endif |
1273 | |
1274 | #ifdef CONFIG_SMP |
1275 | unsigned int nr_pinned; |
1276 | #endif |
1277 | unsigned int push_busy; |
1278 | struct cpu_stop_work push_work; |
1279 | |
1280 | #ifdef CONFIG_SCHED_CORE |
1281 | /* per rq */ |
1282 | struct rq *core; |
1283 | struct task_struct *core_pick; |
1284 | struct sched_dl_entity *core_dl_server; |
1285 | unsigned int core_enabled; |
1286 | unsigned int core_sched_seq; |
1287 | struct rb_root core_tree; |
1288 | |
1289 | /* shared state -- careful with sched_core_cpu_deactivate() */ |
1290 | unsigned int core_task_seq; |
1291 | unsigned int core_pick_seq; |
1292 | unsigned long core_cookie; |
1293 | unsigned int core_forceidle_count; |
1294 | unsigned int core_forceidle_seq; |
1295 | unsigned int core_forceidle_occupation; |
1296 | u64 core_forceidle_start; |
1297 | #endif |
1298 | |
1299 | /* Scratch cpumask to be temporarily used under rq_lock */ |
1300 | cpumask_var_t scratch_mask; |
1301 | |
1302 | #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) |
1303 | call_single_data_t cfsb_csd; |
1304 | struct list_head cfsb_csd_list; |
1305 | #endif |
1306 | }; |
1307 | |
1308 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1309 | |
1310 | /* CPU runqueue to which this cfs_rq is attached */ |
1311 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
1312 | { |
1313 | return cfs_rq->rq; |
1314 | } |
1315 | |
1316 | #else |
1317 | |
1318 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
1319 | { |
1320 | return container_of(cfs_rq, struct rq, cfs); |
1321 | } |
1322 | #endif |
1323 | |
1324 | static inline int cpu_of(struct rq *rq) |
1325 | { |
1326 | #ifdef CONFIG_SMP |
1327 | return rq->cpu; |
1328 | #else |
1329 | return 0; |
1330 | #endif |
1331 | } |
1332 | |
1333 | #define MDF_PUSH 0x01 |
1334 | |
1335 | static inline bool is_migration_disabled(struct task_struct *p) |
1336 | { |
1337 | #ifdef CONFIG_SMP |
1338 | return p->migration_disabled; |
1339 | #else |
1340 | return false; |
1341 | #endif |
1342 | } |
1343 | |
1344 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
1345 | |
1346 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
1347 | #define this_rq() this_cpu_ptr(&runqueues) |
1348 | #define task_rq(p) cpu_rq(task_cpu(p)) |
1349 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
1350 | #define raw_rq() raw_cpu_ptr(&runqueues) |
1351 | |
1352 | static inline void rq_set_donor(struct rq *rq, struct task_struct *t) |
1353 | { |
1354 | /* Do nothing */ |
1355 | } |
1356 | |
1357 | #ifdef CONFIG_SCHED_CORE |
1358 | static inline struct cpumask *sched_group_span(struct sched_group *sg); |
1359 | |
1360 | DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); |
1361 | |
1362 | static inline bool sched_core_enabled(struct rq *rq) |
1363 | { |
1364 | return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; |
1365 | } |
1366 | |
1367 | static inline bool sched_core_disabled(void) |
1368 | { |
1369 | return !static_branch_unlikely(&__sched_core_enabled); |
1370 | } |
1371 | |
1372 | /* |
1373 | * Be careful with this function; not for general use. The return value isn't |
1374 | * stable unless you actually hold a relevant rq->__lock. |
1375 | */ |
1376 | static inline raw_spinlock_t *rq_lockp(struct rq *rq) |
1377 | { |
1378 | if (sched_core_enabled(rq)) |
1379 | return &rq->core->__lock; |
1380 | |
1381 | return &rq->__lock; |
1382 | } |
1383 | |
1384 | static inline raw_spinlock_t *__rq_lockp(struct rq *rq) |
1385 | { |
1386 | if (rq->core_enabled) |
1387 | return &rq->core->__lock; |
1388 | |
1389 | return &rq->__lock; |
1390 | } |
1391 | |
1392 | extern bool |
1393 | cfs_prio_less(const struct task_struct *a, const struct task_struct *b, bool fi); |
1394 | |
1395 | extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); |
1396 | |
1397 | /* |
1398 | * Helpers to check if the CPU's core cookie matches with the task's cookie |
1399 | * when core scheduling is enabled. |
1400 | * A special case is that the task's cookie always matches with CPU's core |
1401 | * cookie if the CPU is in an idle core. |
1402 | */ |
1403 | static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) |
1404 | { |
1405 | /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
1406 | if (!sched_core_enabled(rq)) |
1407 | return true; |
1408 | |
1409 | return rq->core->core_cookie == p->core_cookie; |
1410 | } |
1411 | |
1412 | static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) |
1413 | { |
1414 | bool idle_core = true; |
1415 | int cpu; |
1416 | |
1417 | /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
1418 | if (!sched_core_enabled(rq)) |
1419 | return true; |
1420 | |
1421 | for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { |
1422 | if (!available_idle_cpu(cpu)) { |
1423 | idle_core = false; |
1424 | break; |
1425 | } |
1426 | } |
1427 | |
1428 | /* |
1429 | * A CPU in an idle core is always the best choice for tasks with |
1430 | * cookies. |
1431 | */ |
1432 | return idle_core || rq->core->core_cookie == p->core_cookie; |
1433 | } |
1434 | |
1435 | static inline bool sched_group_cookie_match(struct rq *rq, |
1436 | struct task_struct *p, |
1437 | struct sched_group *group) |
1438 | { |
1439 | int cpu; |
1440 | |
1441 | /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
1442 | if (!sched_core_enabled(rq)) |
1443 | return true; |
1444 | |
1445 | for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { |
1446 | if (sched_core_cookie_match(cpu_rq(cpu), p)) |
1447 | return true; |
1448 | } |
1449 | return false; |
1450 | } |
1451 | |
1452 | static inline bool sched_core_enqueued(struct task_struct *p) |
1453 | { |
1454 | return !RB_EMPTY_NODE(&p->core_node); |
1455 | } |
1456 | |
1457 | extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); |
1458 | extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); |
1459 | |
1460 | extern void sched_core_get(void); |
1461 | extern void sched_core_put(void); |
1462 | |
1463 | #else /* !CONFIG_SCHED_CORE: */ |
1464 | |
1465 | static inline bool sched_core_enabled(struct rq *rq) |
1466 | { |
1467 | return false; |
1468 | } |
1469 | |
1470 | static inline bool sched_core_disabled(void) |
1471 | { |
1472 | return true; |
1473 | } |
1474 | |
1475 | static inline raw_spinlock_t *rq_lockp(struct rq *rq) |
1476 | { |
1477 | return &rq->__lock; |
1478 | } |
1479 | |
1480 | static inline raw_spinlock_t *__rq_lockp(struct rq *rq) |
1481 | { |
1482 | return &rq->__lock; |
1483 | } |
1484 | |
1485 | static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) |
1486 | { |
1487 | return true; |
1488 | } |
1489 | |
1490 | static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) |
1491 | { |
1492 | return true; |
1493 | } |
1494 | |
1495 | static inline bool sched_group_cookie_match(struct rq *rq, |
1496 | struct task_struct *p, |
1497 | struct sched_group *group) |
1498 | { |
1499 | return true; |
1500 | } |
1501 | |
1502 | #endif /* !CONFIG_SCHED_CORE */ |
1503 | #ifdef CONFIG_RT_GROUP_SCHED |
1504 | # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED |
1505 | DECLARE_STATIC_KEY_FALSE(rt_group_sched); |
1506 | static inline bool rt_group_sched_enabled(void) |
1507 | { |
1508 | return static_branch_unlikely(&rt_group_sched); |
1509 | } |
1510 | # else |
1511 | DECLARE_STATIC_KEY_TRUE(rt_group_sched); |
1512 | static inline bool rt_group_sched_enabled(void) |
1513 | { |
1514 | return static_branch_likely(&rt_group_sched); |
1515 | } |
1516 | # endif /* CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED */ |
1517 | #else |
1518 | # define rt_group_sched_enabled() false |
1519 | #endif /* CONFIG_RT_GROUP_SCHED */ |
1520 | |
1521 | static inline void lockdep_assert_rq_held(struct rq *rq) |
1522 | { |
1523 | lockdep_assert_held(__rq_lockp(rq)); |
1524 | } |
1525 | |
1526 | extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); |
1527 | extern bool raw_spin_rq_trylock(struct rq *rq); |
1528 | extern void raw_spin_rq_unlock(struct rq *rq); |
1529 | |
1530 | static inline void raw_spin_rq_lock(struct rq *rq) |
1531 | { |
1532 | raw_spin_rq_lock_nested(rq, subclass: 0); |
1533 | } |
1534 | |
1535 | static inline void raw_spin_rq_lock_irq(struct rq *rq) |
1536 | { |
1537 | local_irq_disable(); |
1538 | raw_spin_rq_lock(rq); |
1539 | } |
1540 | |
1541 | static inline void raw_spin_rq_unlock_irq(struct rq *rq) |
1542 | { |
1543 | raw_spin_rq_unlock(rq); |
1544 | local_irq_enable(); |
1545 | } |
1546 | |
1547 | static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) |
1548 | { |
1549 | unsigned long flags; |
1550 | |
1551 | local_irq_save(flags); |
1552 | raw_spin_rq_lock(rq); |
1553 | |
1554 | return flags; |
1555 | } |
1556 | |
1557 | static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) |
1558 | { |
1559 | raw_spin_rq_unlock(rq); |
1560 | local_irq_restore(flags); |
1561 | } |
1562 | |
1563 | #define raw_spin_rq_lock_irqsave(rq, flags) \ |
1564 | do { \ |
1565 | flags = _raw_spin_rq_lock_irqsave(rq); \ |
1566 | } while (0) |
1567 | |
1568 | #ifdef CONFIG_SCHED_SMT |
1569 | extern void __update_idle_core(struct rq *rq); |
1570 | |
1571 | static inline void update_idle_core(struct rq *rq) |
1572 | { |
1573 | if (static_branch_unlikely(&sched_smt_present)) |
1574 | __update_idle_core(rq); |
1575 | } |
1576 | |
1577 | #else |
1578 | static inline void update_idle_core(struct rq *rq) { } |
1579 | #endif |
1580 | |
1581 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1582 | |
1583 | static inline struct task_struct *task_of(struct sched_entity *se) |
1584 | { |
1585 | WARN_ON_ONCE(!entity_is_task(se)); |
1586 | return container_of(se, struct task_struct, se); |
1587 | } |
1588 | |
1589 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) |
1590 | { |
1591 | return p->se.cfs_rq; |
1592 | } |
1593 | |
1594 | /* runqueue on which this entity is (to be) queued */ |
1595 | static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) |
1596 | { |
1597 | return se->cfs_rq; |
1598 | } |
1599 | |
1600 | /* runqueue "owned" by this group */ |
1601 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
1602 | { |
1603 | return grp->my_q; |
1604 | } |
1605 | |
1606 | #else /* !CONFIG_FAIR_GROUP_SCHED: */ |
1607 | |
1608 | #define task_of(_se) container_of(_se, struct task_struct, se) |
1609 | |
1610 | static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) |
1611 | { |
1612 | return &task_rq(p)->cfs; |
1613 | } |
1614 | |
1615 | static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) |
1616 | { |
1617 | const struct task_struct *p = task_of(se); |
1618 | struct rq *rq = task_rq(p); |
1619 | |
1620 | return &rq->cfs; |
1621 | } |
1622 | |
1623 | /* runqueue "owned" by this group */ |
1624 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
1625 | { |
1626 | return NULL; |
1627 | } |
1628 | |
1629 | #endif /* !CONFIG_FAIR_GROUP_SCHED */ |
1630 | |
1631 | extern void update_rq_clock(struct rq *rq); |
1632 | |
1633 | /* |
1634 | * rq::clock_update_flags bits |
1635 | * |
1636 | * %RQCF_REQ_SKIP - will request skipping of clock update on the next |
1637 | * call to __schedule(). This is an optimisation to avoid |
1638 | * neighbouring rq clock updates. |
1639 | * |
1640 | * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is |
1641 | * in effect and calls to update_rq_clock() are being ignored. |
1642 | * |
1643 | * %RQCF_UPDATED - is a debug flag that indicates whether a call has been |
1644 | * made to update_rq_clock() since the last time rq::lock was pinned. |
1645 | * |
1646 | * If inside of __schedule(), clock_update_flags will have been |
1647 | * shifted left (a left shift is a cheap operation for the fast path |
1648 | * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, |
1649 | * |
1650 | * if (rq-clock_update_flags >= RQCF_UPDATED) |
1651 | * |
1652 | * to check if %RQCF_UPDATED is set. It'll never be shifted more than |
1653 | * one position though, because the next rq_unpin_lock() will shift it |
1654 | * back. |
1655 | */ |
1656 | #define RQCF_REQ_SKIP 0x01 |
1657 | #define RQCF_ACT_SKIP 0x02 |
1658 | #define RQCF_UPDATED 0x04 |
1659 | |
1660 | static inline void assert_clock_updated(struct rq *rq) |
1661 | { |
1662 | /* |
1663 | * The only reason for not seeing a clock update since the |
1664 | * last rq_pin_lock() is if we're currently skipping updates. |
1665 | */ |
1666 | WARN_ON_ONCE(rq->clock_update_flags < RQCF_ACT_SKIP); |
1667 | } |
1668 | |
1669 | static inline u64 rq_clock(struct rq *rq) |
1670 | { |
1671 | lockdep_assert_rq_held(rq); |
1672 | assert_clock_updated(rq); |
1673 | |
1674 | return rq->clock; |
1675 | } |
1676 | |
1677 | static inline u64 rq_clock_task(struct rq *rq) |
1678 | { |
1679 | lockdep_assert_rq_held(rq); |
1680 | assert_clock_updated(rq); |
1681 | |
1682 | return rq->clock_task; |
1683 | } |
1684 | |
1685 | static inline void rq_clock_skip_update(struct rq *rq) |
1686 | { |
1687 | lockdep_assert_rq_held(rq); |
1688 | rq->clock_update_flags |= RQCF_REQ_SKIP; |
1689 | } |
1690 | |
1691 | /* |
1692 | * See rt task throttling, which is the only time a skip |
1693 | * request is canceled. |
1694 | */ |
1695 | static inline void rq_clock_cancel_skipupdate(struct rq *rq) |
1696 | { |
1697 | lockdep_assert_rq_held(rq); |
1698 | rq->clock_update_flags &= ~RQCF_REQ_SKIP; |
1699 | } |
1700 | |
1701 | /* |
1702 | * During cpu offlining and rq wide unthrottling, we can trigger |
1703 | * an update_rq_clock() for several cfs and rt runqueues (Typically |
1704 | * when using list_for_each_entry_*) |
1705 | * rq_clock_start_loop_update() can be called after updating the clock |
1706 | * once and before iterating over the list to prevent multiple update. |
1707 | * After the iterative traversal, we need to call rq_clock_stop_loop_update() |
1708 | * to clear RQCF_ACT_SKIP of rq->clock_update_flags. |
1709 | */ |
1710 | static inline void rq_clock_start_loop_update(struct rq *rq) |
1711 | { |
1712 | lockdep_assert_rq_held(rq); |
1713 | WARN_ON_ONCE(rq->clock_update_flags & RQCF_ACT_SKIP); |
1714 | rq->clock_update_flags |= RQCF_ACT_SKIP; |
1715 | } |
1716 | |
1717 | static inline void rq_clock_stop_loop_update(struct rq *rq) |
1718 | { |
1719 | lockdep_assert_rq_held(rq); |
1720 | rq->clock_update_flags &= ~RQCF_ACT_SKIP; |
1721 | } |
1722 | |
1723 | struct rq_flags { |
1724 | unsigned long flags; |
1725 | struct pin_cookie cookie; |
1726 | /* |
1727 | * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the |
1728 | * current pin context is stashed here in case it needs to be |
1729 | * restored in rq_repin_lock(). |
1730 | */ |
1731 | unsigned int clock_update_flags; |
1732 | }; |
1733 | |
1734 | extern struct balance_callback balance_push_callback; |
1735 | |
1736 | #ifdef CONFIG_SCHED_CLASS_EXT |
1737 | extern const struct sched_class ext_sched_class; |
1738 | |
1739 | DECLARE_STATIC_KEY_FALSE(__scx_enabled); /* SCX BPF scheduler loaded */ |
1740 | DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */ |
1741 | |
1742 | #define scx_enabled() static_branch_unlikely(&__scx_enabled) |
1743 | #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) |
1744 | |
1745 | static inline void scx_rq_clock_update(struct rq *rq, u64 clock) |
1746 | { |
1747 | if (!scx_enabled()) |
1748 | return; |
1749 | WRITE_ONCE(rq->scx.clock, clock); |
1750 | smp_store_release(&rq->scx.flags, rq->scx.flags | SCX_RQ_CLK_VALID); |
1751 | } |
1752 | |
1753 | static inline void scx_rq_clock_invalidate(struct rq *rq) |
1754 | { |
1755 | if (!scx_enabled()) |
1756 | return; |
1757 | WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID); |
1758 | } |
1759 | |
1760 | #else /* !CONFIG_SCHED_CLASS_EXT */ |
1761 | #define scx_enabled() false |
1762 | #define scx_switched_all() false |
1763 | |
1764 | static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {} |
1765 | static inline void scx_rq_clock_invalidate(struct rq *rq) {} |
1766 | #endif /* !CONFIG_SCHED_CLASS_EXT */ |
1767 | |
1768 | /* |
1769 | * Lockdep annotation that avoids accidental unlocks; it's like a |
1770 | * sticky/continuous lockdep_assert_held(). |
1771 | * |
1772 | * This avoids code that has access to 'struct rq *rq' (basically everything in |
1773 | * the scheduler) from accidentally unlocking the rq if they do not also have a |
1774 | * copy of the (on-stack) 'struct rq_flags rf'. |
1775 | * |
1776 | * Also see Documentation/locking/lockdep-design.rst. |
1777 | */ |
1778 | static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) |
1779 | { |
1780 | rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); |
1781 | |
1782 | rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
1783 | rf->clock_update_flags = 0; |
1784 | #ifdef CONFIG_SMP |
1785 | WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback); |
1786 | #endif |
1787 | } |
1788 | |
1789 | static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) |
1790 | { |
1791 | if (rq->clock_update_flags > RQCF_ACT_SKIP) |
1792 | rf->clock_update_flags = RQCF_UPDATED; |
1793 | |
1794 | scx_rq_clock_invalidate(rq); |
1795 | lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); |
1796 | } |
1797 | |
1798 | static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) |
1799 | { |
1800 | lockdep_repin_lock(__rq_lockp(rq), rf->cookie); |
1801 | |
1802 | /* |
1803 | * Restore the value we stashed in @rf for this pin context. |
1804 | */ |
1805 | rq->clock_update_flags |= rf->clock_update_flags; |
1806 | } |
1807 | |
1808 | extern |
1809 | struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
1810 | __acquires(rq->lock); |
1811 | |
1812 | extern |
1813 | struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
1814 | __acquires(p->pi_lock) |
1815 | __acquires(rq->lock); |
1816 | |
1817 | static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) |
1818 | __releases(rq->lock) |
1819 | { |
1820 | rq_unpin_lock(rq, rf); |
1821 | raw_spin_rq_unlock(rq); |
1822 | } |
1823 | |
1824 | static inline void |
1825 | task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) |
1826 | __releases(rq->lock) |
1827 | __releases(p->pi_lock) |
1828 | { |
1829 | rq_unpin_lock(rq, rf); |
1830 | raw_spin_rq_unlock(rq); |
1831 | raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
1832 | } |
1833 | |
1834 | DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, |
1835 | _T->rq = task_rq_lock(_T->lock, &_T->rf), |
1836 | task_rq_unlock(_T->rq, _T->lock, &_T->rf), |
1837 | struct rq *rq; struct rq_flags rf) |
1838 | |
1839 | static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) |
1840 | __acquires(rq->lock) |
1841 | { |
1842 | raw_spin_rq_lock_irqsave(rq, rf->flags); |
1843 | rq_pin_lock(rq, rf); |
1844 | } |
1845 | |
1846 | static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) |
1847 | __acquires(rq->lock) |
1848 | { |
1849 | raw_spin_rq_lock_irq(rq); |
1850 | rq_pin_lock(rq, rf); |
1851 | } |
1852 | |
1853 | static inline void rq_lock(struct rq *rq, struct rq_flags *rf) |
1854 | __acquires(rq->lock) |
1855 | { |
1856 | raw_spin_rq_lock(rq); |
1857 | rq_pin_lock(rq, rf); |
1858 | } |
1859 | |
1860 | static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) |
1861 | __releases(rq->lock) |
1862 | { |
1863 | rq_unpin_lock(rq, rf); |
1864 | raw_spin_rq_unlock_irqrestore(rq, flags: rf->flags); |
1865 | } |
1866 | |
1867 | static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) |
1868 | __releases(rq->lock) |
1869 | { |
1870 | rq_unpin_lock(rq, rf); |
1871 | raw_spin_rq_unlock_irq(rq); |
1872 | } |
1873 | |
1874 | static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) |
1875 | __releases(rq->lock) |
1876 | { |
1877 | rq_unpin_lock(rq, rf); |
1878 | raw_spin_rq_unlock(rq); |
1879 | } |
1880 | |
1881 | DEFINE_LOCK_GUARD_1(rq_lock, struct rq, |
1882 | rq_lock(_T->lock, &_T->rf), |
1883 | rq_unlock(_T->lock, &_T->rf), |
1884 | struct rq_flags rf) |
1885 | |
1886 | DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, |
1887 | rq_lock_irq(_T->lock, &_T->rf), |
1888 | rq_unlock_irq(_T->lock, &_T->rf), |
1889 | struct rq_flags rf) |
1890 | |
1891 | DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, |
1892 | rq_lock_irqsave(_T->lock, &_T->rf), |
1893 | rq_unlock_irqrestore(_T->lock, &_T->rf), |
1894 | struct rq_flags rf) |
1895 | |
1896 | static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) |
1897 | __acquires(rq->lock) |
1898 | { |
1899 | struct rq *rq; |
1900 | |
1901 | local_irq_disable(); |
1902 | rq = this_rq(); |
1903 | rq_lock(rq, rf); |
1904 | |
1905 | return rq; |
1906 | } |
1907 | |
1908 | #ifdef CONFIG_NUMA |
1909 | |
1910 | enum numa_topology_type { |
1911 | NUMA_DIRECT, |
1912 | NUMA_GLUELESS_MESH, |
1913 | NUMA_BACKPLANE, |
1914 | }; |
1915 | |
1916 | extern enum numa_topology_type sched_numa_topology_type; |
1917 | extern int sched_max_numa_distance; |
1918 | extern bool find_numa_distance(int distance); |
1919 | extern void sched_init_numa(int offline_node); |
1920 | extern void sched_update_numa(int cpu, bool online); |
1921 | extern void sched_domains_numa_masks_set(unsigned int cpu); |
1922 | extern void sched_domains_numa_masks_clear(unsigned int cpu); |
1923 | extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); |
1924 | |
1925 | #else /* !CONFIG_NUMA: */ |
1926 | |
1927 | static inline void sched_init_numa(int offline_node) { } |
1928 | static inline void sched_update_numa(int cpu, bool online) { } |
1929 | static inline void sched_domains_numa_masks_set(unsigned int cpu) { } |
1930 | static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } |
1931 | |
1932 | static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) |
1933 | { |
1934 | return nr_cpu_ids; |
1935 | } |
1936 | |
1937 | #endif /* !CONFIG_NUMA */ |
1938 | |
1939 | #ifdef CONFIG_NUMA_BALANCING |
1940 | |
1941 | /* The regions in numa_faults array from task_struct */ |
1942 | enum numa_faults_stats { |
1943 | NUMA_MEM = 0, |
1944 | NUMA_CPU, |
1945 | NUMA_MEMBUF, |
1946 | NUMA_CPUBUF |
1947 | }; |
1948 | |
1949 | extern void sched_setnuma(struct task_struct *p, int node); |
1950 | extern int migrate_task_to(struct task_struct *p, int cpu); |
1951 | extern int migrate_swap(struct task_struct *p, struct task_struct *t, |
1952 | int cpu, int scpu); |
1953 | extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); |
1954 | |
1955 | #else /* !CONFIG_NUMA_BALANCING: */ |
1956 | |
1957 | static inline void |
1958 | init_numa_balancing(unsigned long clone_flags, struct task_struct *p) |
1959 | { |
1960 | } |
1961 | |
1962 | #endif /* !CONFIG_NUMA_BALANCING */ |
1963 | |
1964 | #ifdef CONFIG_SMP |
1965 | |
1966 | static inline void |
1967 | queue_balance_callback(struct rq *rq, |
1968 | struct balance_callback *head, |
1969 | void (*func)(struct rq *rq)) |
1970 | { |
1971 | lockdep_assert_rq_held(rq); |
1972 | |
1973 | /* |
1974 | * Don't (re)queue an already queued item; nor queue anything when |
1975 | * balance_push() is active, see the comment with |
1976 | * balance_push_callback. |
1977 | */ |
1978 | if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) |
1979 | return; |
1980 | |
1981 | head->func = func; |
1982 | head->next = rq->balance_callback; |
1983 | rq->balance_callback = head; |
1984 | } |
1985 | |
1986 | #define rcu_dereference_check_sched_domain(p) \ |
1987 | rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex)) |
1988 | |
1989 | /* |
1990 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
1991 | * See destroy_sched_domains: call_rcu for details. |
1992 | * |
1993 | * The domain tree of any CPU may only be accessed from within |
1994 | * preempt-disabled sections. |
1995 | */ |
1996 | #define for_each_domain(cpu, __sd) \ |
1997 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
1998 | __sd; __sd = __sd->parent) |
1999 | |
2000 | /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ |
2001 | #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | |
2002 | static const unsigned int SD_SHARED_CHILD_MASK = |
2003 | #include <linux/sched/sd_flags.h> |
2004 | 0; |
2005 | #undef SD_FLAG |
2006 | |
2007 | /** |
2008 | * highest_flag_domain - Return highest sched_domain containing flag. |
2009 | * @cpu: The CPU whose highest level of sched domain is to |
2010 | * be returned. |
2011 | * @flag: The flag to check for the highest sched_domain |
2012 | * for the given CPU. |
2013 | * |
2014 | * Returns the highest sched_domain of a CPU which contains @flag. If @flag has |
2015 | * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. |
2016 | */ |
2017 | static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
2018 | { |
2019 | struct sched_domain *sd, *hsd = NULL; |
2020 | |
2021 | for_each_domain(cpu, sd) { |
2022 | if (sd->flags & flag) { |
2023 | hsd = sd; |
2024 | continue; |
2025 | } |
2026 | |
2027 | /* |
2028 | * Stop the search if @flag is known to be shared at lower |
2029 | * levels. It will not be found further up. |
2030 | */ |
2031 | if (flag & SD_SHARED_CHILD_MASK) |
2032 | break; |
2033 | } |
2034 | |
2035 | return hsd; |
2036 | } |
2037 | |
2038 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
2039 | { |
2040 | struct sched_domain *sd; |
2041 | |
2042 | for_each_domain(cpu, sd) { |
2043 | if (sd->flags & flag) |
2044 | break; |
2045 | } |
2046 | |
2047 | return sd; |
2048 | } |
2049 | |
2050 | DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); |
2051 | DECLARE_PER_CPU(int, sd_llc_size); |
2052 | DECLARE_PER_CPU(int, sd_llc_id); |
2053 | DECLARE_PER_CPU(int, sd_share_id); |
2054 | DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); |
2055 | DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); |
2056 | DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); |
2057 | DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); |
2058 | |
2059 | extern struct static_key_false sched_asym_cpucapacity; |
2060 | extern struct static_key_false sched_cluster_active; |
2061 | |
2062 | static __always_inline bool sched_asym_cpucap_active(void) |
2063 | { |
2064 | return static_branch_unlikely(&sched_asym_cpucapacity); |
2065 | } |
2066 | |
2067 | struct sched_group_capacity { |
2068 | atomic_t ref; |
2069 | /* |
2070 | * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity |
2071 | * for a single CPU. |
2072 | */ |
2073 | unsigned long capacity; |
2074 | unsigned long min_capacity; /* Min per-CPU capacity in group */ |
2075 | unsigned long max_capacity; /* Max per-CPU capacity in group */ |
2076 | unsigned long next_update; |
2077 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
2078 | |
2079 | int id; |
2080 | |
2081 | unsigned long cpumask[]; /* Balance mask */ |
2082 | }; |
2083 | |
2084 | struct sched_group { |
2085 | struct sched_group *next; /* Must be a circular list */ |
2086 | atomic_t ref; |
2087 | |
2088 | unsigned int group_weight; |
2089 | unsigned int cores; |
2090 | struct sched_group_capacity *sgc; |
2091 | int asym_prefer_cpu; /* CPU of highest priority in group */ |
2092 | int flags; |
2093 | |
2094 | /* |
2095 | * The CPUs this group covers. |
2096 | * |
2097 | * NOTE: this field is variable length. (Allocated dynamically |
2098 | * by attaching extra space to the end of the structure, |
2099 | * depending on how many CPUs the kernel has booted up with) |
2100 | */ |
2101 | unsigned long cpumask[]; |
2102 | }; |
2103 | |
2104 | static inline struct cpumask *sched_group_span(struct sched_group *sg) |
2105 | { |
2106 | return to_cpumask(sg->cpumask); |
2107 | } |
2108 | |
2109 | /* |
2110 | * See build_balance_mask(). |
2111 | */ |
2112 | static inline struct cpumask *group_balance_mask(struct sched_group *sg) |
2113 | { |
2114 | return to_cpumask(sg->sgc->cpumask); |
2115 | } |
2116 | |
2117 | extern int group_balance_cpu(struct sched_group *sg); |
2118 | |
2119 | extern void update_sched_domain_debugfs(void); |
2120 | extern void dirty_sched_domain_sysctl(int cpu); |
2121 | |
2122 | extern int sched_update_scaling(void); |
2123 | |
2124 | static inline const struct cpumask *task_user_cpus(struct task_struct *p) |
2125 | { |
2126 | if (!p->user_cpus_ptr) |
2127 | return cpu_possible_mask; /* &init_task.cpus_mask */ |
2128 | return p->user_cpus_ptr; |
2129 | } |
2130 | |
2131 | #endif /* CONFIG_SMP */ |
2132 | |
2133 | #ifdef CONFIG_CGROUP_SCHED |
2134 | |
2135 | /* |
2136 | * Return the group to which this tasks belongs. |
2137 | * |
2138 | * We cannot use task_css() and friends because the cgroup subsystem |
2139 | * changes that value before the cgroup_subsys::attach() method is called, |
2140 | * therefore we cannot pin it and might observe the wrong value. |
2141 | * |
2142 | * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
2143 | * core changes this before calling sched_move_task(). |
2144 | * |
2145 | * Instead we use a 'copy' which is updated from sched_move_task() while |
2146 | * holding both task_struct::pi_lock and rq::lock. |
2147 | */ |
2148 | static inline struct task_group *task_group(struct task_struct *p) |
2149 | { |
2150 | return p->sched_task_group; |
2151 | } |
2152 | |
2153 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
2154 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
2155 | { |
2156 | #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
2157 | struct task_group *tg = task_group(p); |
2158 | #endif |
2159 | |
2160 | #ifdef CONFIG_FAIR_GROUP_SCHED |
2161 | set_task_rq_fair(se: &p->se, prev: p->se.cfs_rq, next: tg->cfs_rq[cpu]); |
2162 | p->se.cfs_rq = tg->cfs_rq[cpu]; |
2163 | p->se.parent = tg->se[cpu]; |
2164 | p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; |
2165 | #endif |
2166 | |
2167 | #ifdef CONFIG_RT_GROUP_SCHED |
2168 | /* |
2169 | * p->rt.rt_rq is NULL initially and it is easier to assign |
2170 | * root_task_group's rt_rq than switching in rt_rq_of_se() |
2171 | * Clobbers tg(!) |
2172 | */ |
2173 | if (!rt_group_sched_enabled()) |
2174 | tg = &root_task_group; |
2175 | p->rt.rt_rq = tg->rt_rq[cpu]; |
2176 | p->rt.parent = tg->rt_se[cpu]; |
2177 | #endif |
2178 | } |
2179 | |
2180 | #else /* !CONFIG_CGROUP_SCHED: */ |
2181 | |
2182 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
2183 | |
2184 | static inline struct task_group *task_group(struct task_struct *p) |
2185 | { |
2186 | return NULL; |
2187 | } |
2188 | |
2189 | #endif /* !CONFIG_CGROUP_SCHED */ |
2190 | |
2191 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
2192 | { |
2193 | set_task_rq(p, cpu); |
2194 | #ifdef CONFIG_SMP |
2195 | /* |
2196 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
2197 | * successfully executed on another CPU. We must ensure that updates of |
2198 | * per-task data have been completed by this moment. |
2199 | */ |
2200 | smp_wmb(); |
2201 | WRITE_ONCE(task_thread_info(p)->cpu, cpu); |
2202 | p->wake_cpu = cpu; |
2203 | #endif |
2204 | } |
2205 | |
2206 | /* |
2207 | * Tunables: |
2208 | */ |
2209 | |
2210 | #define SCHED_FEAT(name, enabled) \ |
2211 | __SCHED_FEAT_##name , |
2212 | |
2213 | enum { |
2214 | #include "features.h" |
2215 | __SCHED_FEAT_NR, |
2216 | }; |
2217 | |
2218 | #undef SCHED_FEAT |
2219 | |
2220 | /* |
2221 | * To support run-time toggling of sched features, all the translation units |
2222 | * (but core.c) reference the sysctl_sched_features defined in core.c. |
2223 | */ |
2224 | extern __read_mostly unsigned int sysctl_sched_features; |
2225 | |
2226 | #ifdef CONFIG_JUMP_LABEL |
2227 | |
2228 | #define SCHED_FEAT(name, enabled) \ |
2229 | static __always_inline bool static_branch_##name(struct static_key *key) \ |
2230 | { \ |
2231 | return static_key_##enabled(key); \ |
2232 | } |
2233 | |
2234 | #include "features.h" |
2235 | #undef SCHED_FEAT |
2236 | |
2237 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
2238 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
2239 | |
2240 | #else /* !CONFIG_JUMP_LABEL: */ |
2241 | |
2242 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
2243 | |
2244 | #endif /* !CONFIG_JUMP_LABEL */ |
2245 | |
2246 | extern struct static_key_false sched_numa_balancing; |
2247 | extern struct static_key_false sched_schedstats; |
2248 | |
2249 | static inline u64 global_rt_period(void) |
2250 | { |
2251 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
2252 | } |
2253 | |
2254 | static inline u64 global_rt_runtime(void) |
2255 | { |
2256 | if (sysctl_sched_rt_runtime < 0) |
2257 | return RUNTIME_INF; |
2258 | |
2259 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
2260 | } |
2261 | |
2262 | /* |
2263 | * Is p the current execution context? |
2264 | */ |
2265 | static inline int task_current(struct rq *rq, struct task_struct *p) |
2266 | { |
2267 | return rq->curr == p; |
2268 | } |
2269 | |
2270 | /* |
2271 | * Is p the current scheduling context? |
2272 | * |
2273 | * Note that it might be the current execution context at the same time if |
2274 | * rq->curr == rq->donor == p. |
2275 | */ |
2276 | static inline int task_current_donor(struct rq *rq, struct task_struct *p) |
2277 | { |
2278 | return rq->donor == p; |
2279 | } |
2280 | |
2281 | static inline int task_on_cpu(struct rq *rq, struct task_struct *p) |
2282 | { |
2283 | #ifdef CONFIG_SMP |
2284 | return p->on_cpu; |
2285 | #else |
2286 | return task_current(rq, p); |
2287 | #endif |
2288 | } |
2289 | |
2290 | static inline int task_on_rq_queued(struct task_struct *p) |
2291 | { |
2292 | return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED; |
2293 | } |
2294 | |
2295 | static inline int task_on_rq_migrating(struct task_struct *p) |
2296 | { |
2297 | return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; |
2298 | } |
2299 | |
2300 | /* Wake flags. The first three directly map to some SD flag value */ |
2301 | #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ |
2302 | #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ |
2303 | #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ |
2304 | |
2305 | #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ |
2306 | #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ |
2307 | #define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */ |
2308 | #define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */ |
2309 | |
2310 | #ifdef CONFIG_SMP |
2311 | static_assert(WF_EXEC == SD_BALANCE_EXEC); |
2312 | static_assert(WF_FORK == SD_BALANCE_FORK); |
2313 | static_assert(WF_TTWU == SD_BALANCE_WAKE); |
2314 | #endif |
2315 | |
2316 | /* |
2317 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
2318 | * of tasks with abnormal "nice" values across CPUs the contribution that |
2319 | * each task makes to its run queue's load is weighted according to its |
2320 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
2321 | * scaled version of the new time slice allocation that they receive on time |
2322 | * slice expiry etc. |
2323 | */ |
2324 | |
2325 | #define WEIGHT_IDLEPRIO 3 |
2326 | #define WMULT_IDLEPRIO 1431655765 |
2327 | |
2328 | extern const int sched_prio_to_weight[40]; |
2329 | extern const u32 sched_prio_to_wmult[40]; |
2330 | |
2331 | /* |
2332 | * {de,en}queue flags: |
2333 | * |
2334 | * DEQUEUE_SLEEP - task is no longer runnable |
2335 | * ENQUEUE_WAKEUP - task just became runnable |
2336 | * |
2337 | * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks |
2338 | * are in a known state which allows modification. Such pairs |
2339 | * should preserve as much state as possible. |
2340 | * |
2341 | * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location |
2342 | * in the runqueue. |
2343 | * |
2344 | * NOCLOCK - skip the update_rq_clock() (avoids double updates) |
2345 | * |
2346 | * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE) |
2347 | * |
2348 | * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) |
2349 | * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) |
2350 | * ENQUEUE_MIGRATED - the task was migrated during wakeup |
2351 | * ENQUEUE_RQ_SELECTED - ->select_task_rq() was called |
2352 | * |
2353 | */ |
2354 | |
2355 | #define DEQUEUE_SLEEP 0x01 /* Matches ENQUEUE_WAKEUP */ |
2356 | #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ |
2357 | #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ |
2358 | #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ |
2359 | #define DEQUEUE_SPECIAL 0x10 |
2360 | #define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */ |
2361 | #define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */ |
2362 | |
2363 | #define ENQUEUE_WAKEUP 0x01 |
2364 | #define ENQUEUE_RESTORE 0x02 |
2365 | #define ENQUEUE_MOVE 0x04 |
2366 | #define ENQUEUE_NOCLOCK 0x08 |
2367 | |
2368 | #define ENQUEUE_HEAD 0x10 |
2369 | #define ENQUEUE_REPLENISH 0x20 |
2370 | #ifdef CONFIG_SMP |
2371 | #define ENQUEUE_MIGRATED 0x40 |
2372 | #else |
2373 | #define ENQUEUE_MIGRATED 0x00 |
2374 | #endif |
2375 | #define ENQUEUE_INITIAL 0x80 |
2376 | #define ENQUEUE_MIGRATING 0x100 |
2377 | #define ENQUEUE_DELAYED 0x200 |
2378 | #define ENQUEUE_RQ_SELECTED 0x400 |
2379 | |
2380 | #define RETRY_TASK ((void *)-1UL) |
2381 | |
2382 | struct affinity_context { |
2383 | const struct cpumask *new_mask; |
2384 | struct cpumask *user_mask; |
2385 | unsigned int flags; |
2386 | }; |
2387 | |
2388 | extern s64 update_curr_common(struct rq *rq); |
2389 | |
2390 | struct sched_class { |
2391 | |
2392 | #ifdef CONFIG_UCLAMP_TASK |
2393 | int uclamp_enabled; |
2394 | #endif |
2395 | |
2396 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
2397 | bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
2398 | void (*yield_task) (struct rq *rq); |
2399 | bool (*yield_to_task)(struct rq *rq, struct task_struct *p); |
2400 | |
2401 | void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); |
2402 | |
2403 | int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
2404 | struct task_struct *(*pick_task)(struct rq *rq); |
2405 | /* |
2406 | * Optional! When implemented pick_next_task() should be equivalent to: |
2407 | * |
2408 | * next = pick_task(); |
2409 | * if (next) { |
2410 | * put_prev_task(prev); |
2411 | * set_next_task_first(next); |
2412 | * } |
2413 | */ |
2414 | struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); |
2415 | |
2416 | void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); |
2417 | void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); |
2418 | |
2419 | #ifdef CONFIG_SMP |
2420 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); |
2421 | |
2422 | void (*migrate_task_rq)(struct task_struct *p, int new_cpu); |
2423 | |
2424 | void (*task_woken)(struct rq *this_rq, struct task_struct *task); |
2425 | |
2426 | void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); |
2427 | |
2428 | void (*rq_online)(struct rq *rq); |
2429 | void (*rq_offline)(struct rq *rq); |
2430 | |
2431 | struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); |
2432 | #endif |
2433 | |
2434 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); |
2435 | void (*task_fork)(struct task_struct *p); |
2436 | void (*task_dead)(struct task_struct *p); |
2437 | |
2438 | /* |
2439 | * The switched_from() call is allowed to drop rq->lock, therefore we |
2440 | * cannot assume the switched_from/switched_to pair is serialized by |
2441 | * rq->lock. They are however serialized by p->pi_lock. |
2442 | */ |
2443 | void (*switching_to) (struct rq *this_rq, struct task_struct *task); |
2444 | void (*switched_from)(struct rq *this_rq, struct task_struct *task); |
2445 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
2446 | void (*reweight_task)(struct rq *this_rq, struct task_struct *task, |
2447 | const struct load_weight *lw); |
2448 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
2449 | int oldprio); |
2450 | |
2451 | unsigned int (*get_rr_interval)(struct rq *rq, |
2452 | struct task_struct *task); |
2453 | |
2454 | void (*update_curr)(struct rq *rq); |
2455 | |
2456 | #ifdef CONFIG_FAIR_GROUP_SCHED |
2457 | void (*task_change_group)(struct task_struct *p); |
2458 | #endif |
2459 | |
2460 | #ifdef CONFIG_SCHED_CORE |
2461 | int (*task_is_throttled)(struct task_struct *p, int cpu); |
2462 | #endif |
2463 | }; |
2464 | |
2465 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
2466 | { |
2467 | WARN_ON_ONCE(rq->donor != prev); |
2468 | prev->sched_class->put_prev_task(rq, prev, NULL); |
2469 | } |
2470 | |
2471 | static inline void set_next_task(struct rq *rq, struct task_struct *next) |
2472 | { |
2473 | next->sched_class->set_next_task(rq, next, false); |
2474 | } |
2475 | |
2476 | static inline void |
2477 | __put_prev_set_next_dl_server(struct rq *rq, |
2478 | struct task_struct *prev, |
2479 | struct task_struct *next) |
2480 | { |
2481 | prev->dl_server = NULL; |
2482 | next->dl_server = rq->dl_server; |
2483 | rq->dl_server = NULL; |
2484 | } |
2485 | |
2486 | static inline void put_prev_set_next_task(struct rq *rq, |
2487 | struct task_struct *prev, |
2488 | struct task_struct *next) |
2489 | { |
2490 | WARN_ON_ONCE(rq->curr != prev); |
2491 | |
2492 | __put_prev_set_next_dl_server(rq, prev, next); |
2493 | |
2494 | if (next == prev) |
2495 | return; |
2496 | |
2497 | prev->sched_class->put_prev_task(rq, prev, next); |
2498 | next->sched_class->set_next_task(rq, next, true); |
2499 | } |
2500 | |
2501 | /* |
2502 | * Helper to define a sched_class instance; each one is placed in a separate |
2503 | * section which is ordered by the linker script: |
2504 | * |
2505 | * include/asm-generic/vmlinux.lds.h |
2506 | * |
2507 | * *CAREFUL* they are laid out in *REVERSE* order!!! |
2508 | * |
2509 | * Also enforce alignment on the instance, not the type, to guarantee layout. |
2510 | */ |
2511 | #define DEFINE_SCHED_CLASS(name) \ |
2512 | const struct sched_class name##_sched_class \ |
2513 | __aligned(__alignof__(struct sched_class)) \ |
2514 | __section("__" #name "_sched_class") |
2515 | |
2516 | /* Defined in include/asm-generic/vmlinux.lds.h */ |
2517 | extern struct sched_class __sched_class_highest[]; |
2518 | extern struct sched_class __sched_class_lowest[]; |
2519 | |
2520 | extern const struct sched_class stop_sched_class; |
2521 | extern const struct sched_class dl_sched_class; |
2522 | extern const struct sched_class rt_sched_class; |
2523 | extern const struct sched_class fair_sched_class; |
2524 | extern const struct sched_class idle_sched_class; |
2525 | |
2526 | /* |
2527 | * Iterate only active classes. SCX can take over all fair tasks or be |
2528 | * completely disabled. If the former, skip fair. If the latter, skip SCX. |
2529 | */ |
2530 | static inline const struct sched_class *next_active_class(const struct sched_class *class) |
2531 | { |
2532 | class++; |
2533 | #ifdef CONFIG_SCHED_CLASS_EXT |
2534 | if (scx_switched_all() && class == &fair_sched_class) |
2535 | class++; |
2536 | if (!scx_enabled() && class == &ext_sched_class) |
2537 | class++; |
2538 | #endif |
2539 | return class; |
2540 | } |
2541 | |
2542 | #define for_class_range(class, _from, _to) \ |
2543 | for (class = (_from); class < (_to); class++) |
2544 | |
2545 | #define for_each_class(class) \ |
2546 | for_class_range(class, __sched_class_highest, __sched_class_lowest) |
2547 | |
2548 | #define for_active_class_range(class, _from, _to) \ |
2549 | for (class = (_from); class != (_to); class = next_active_class(class)) |
2550 | |
2551 | #define for_each_active_class(class) \ |
2552 | for_active_class_range(class, __sched_class_highest, __sched_class_lowest) |
2553 | |
2554 | #define sched_class_above(_a, _b) ((_a) < (_b)) |
2555 | |
2556 | static inline bool sched_stop_runnable(struct rq *rq) |
2557 | { |
2558 | return rq->stop && task_on_rq_queued(p: rq->stop); |
2559 | } |
2560 | |
2561 | static inline bool sched_dl_runnable(struct rq *rq) |
2562 | { |
2563 | return rq->dl.dl_nr_running > 0; |
2564 | } |
2565 | |
2566 | static inline bool sched_rt_runnable(struct rq *rq) |
2567 | { |
2568 | return rq->rt.rt_queued > 0; |
2569 | } |
2570 | |
2571 | static inline bool sched_fair_runnable(struct rq *rq) |
2572 | { |
2573 | return rq->cfs.nr_queued > 0; |
2574 | } |
2575 | |
2576 | extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
2577 | extern struct task_struct *pick_task_idle(struct rq *rq); |
2578 | |
2579 | #define SCA_CHECK 0x01 |
2580 | #define SCA_MIGRATE_DISABLE 0x02 |
2581 | #define SCA_MIGRATE_ENABLE 0x04 |
2582 | #define SCA_USER 0x08 |
2583 | |
2584 | #ifdef CONFIG_SMP |
2585 | |
2586 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
2587 | |
2588 | extern void sched_balance_trigger(struct rq *rq); |
2589 | |
2590 | extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx); |
2591 | extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); |
2592 | |
2593 | static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) |
2594 | { |
2595 | /* When not in the task's cpumask, no point in looking further. */ |
2596 | if (!cpumask_test_cpu(cpu, cpumask: p->cpus_ptr)) |
2597 | return false; |
2598 | |
2599 | /* Can @cpu run a user thread? */ |
2600 | if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p)) |
2601 | return false; |
2602 | |
2603 | return true; |
2604 | } |
2605 | |
2606 | static inline cpumask_t *alloc_user_cpus_ptr(int node) |
2607 | { |
2608 | /* |
2609 | * See do_set_cpus_allowed() above for the rcu_head usage. |
2610 | */ |
2611 | int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); |
2612 | |
2613 | return kmalloc_node(size, GFP_KERNEL, node); |
2614 | } |
2615 | |
2616 | static inline struct task_struct *get_push_task(struct rq *rq) |
2617 | { |
2618 | struct task_struct *p = rq->donor; |
2619 | |
2620 | lockdep_assert_rq_held(rq); |
2621 | |
2622 | if (rq->push_busy) |
2623 | return NULL; |
2624 | |
2625 | if (p->nr_cpus_allowed == 1) |
2626 | return NULL; |
2627 | |
2628 | if (p->migration_disabled) |
2629 | return NULL; |
2630 | |
2631 | rq->push_busy = true; |
2632 | return get_task_struct(t: p); |
2633 | } |
2634 | |
2635 | extern int push_cpu_stop(void *arg); |
2636 | |
2637 | #else /* !CONFIG_SMP: */ |
2638 | |
2639 | static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) |
2640 | { |
2641 | return true; |
2642 | } |
2643 | |
2644 | static inline int __set_cpus_allowed_ptr(struct task_struct *p, |
2645 | struct affinity_context *ctx) |
2646 | { |
2647 | return set_cpus_allowed_ptr(p, ctx->new_mask); |
2648 | } |
2649 | |
2650 | static inline cpumask_t *alloc_user_cpus_ptr(int node) |
2651 | { |
2652 | return NULL; |
2653 | } |
2654 | |
2655 | #endif /* !CONFIG_SMP */ |
2656 | |
2657 | #ifdef CONFIG_CPU_IDLE |
2658 | |
2659 | static inline void idle_set_state(struct rq *rq, |
2660 | struct cpuidle_state *idle_state) |
2661 | { |
2662 | rq->idle_state = idle_state; |
2663 | } |
2664 | |
2665 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
2666 | { |
2667 | WARN_ON_ONCE(!rcu_read_lock_held()); |
2668 | |
2669 | return rq->idle_state; |
2670 | } |
2671 | |
2672 | #else /* !CONFIG_CPU_IDLE: */ |
2673 | |
2674 | static inline void idle_set_state(struct rq *rq, |
2675 | struct cpuidle_state *idle_state) |
2676 | { |
2677 | } |
2678 | |
2679 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
2680 | { |
2681 | return NULL; |
2682 | } |
2683 | |
2684 | #endif /* !CONFIG_CPU_IDLE */ |
2685 | |
2686 | extern void schedule_idle(void); |
2687 | asmlinkage void schedule_user(void); |
2688 | |
2689 | extern void sysrq_sched_debug_show(void); |
2690 | extern void sched_init_granularity(void); |
2691 | extern void update_max_interval(void); |
2692 | |
2693 | extern void init_sched_dl_class(void); |
2694 | extern void init_sched_rt_class(void); |
2695 | extern void init_sched_fair_class(void); |
2696 | |
2697 | extern void resched_curr(struct rq *rq); |
2698 | extern void resched_curr_lazy(struct rq *rq); |
2699 | extern void resched_cpu(int cpu); |
2700 | |
2701 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
2702 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); |
2703 | |
2704 | extern void init_dl_entity(struct sched_dl_entity *dl_se); |
2705 | |
2706 | #define BW_SHIFT 20 |
2707 | #define BW_UNIT (1 << BW_SHIFT) |
2708 | #define RATIO_SHIFT 8 |
2709 | #define MAX_BW_BITS (64 - BW_SHIFT) |
2710 | #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) |
2711 | |
2712 | extern unsigned long to_ratio(u64 period, u64 runtime); |
2713 | |
2714 | extern void init_entity_runnable_average(struct sched_entity *se); |
2715 | extern void post_init_entity_util_avg(struct task_struct *p); |
2716 | |
2717 | #ifdef CONFIG_NO_HZ_FULL |
2718 | extern bool sched_can_stop_tick(struct rq *rq); |
2719 | extern int __init sched_tick_offload_init(void); |
2720 | |
2721 | /* |
2722 | * Tick may be needed by tasks in the runqueue depending on their policy and |
2723 | * requirements. If tick is needed, lets send the target an IPI to kick it out of |
2724 | * nohz mode if necessary. |
2725 | */ |
2726 | static inline void sched_update_tick_dependency(struct rq *rq) |
2727 | { |
2728 | int cpu = cpu_of(rq); |
2729 | |
2730 | if (!tick_nohz_full_cpu(cpu)) |
2731 | return; |
2732 | |
2733 | if (sched_can_stop_tick(rq)) |
2734 | tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); |
2735 | else |
2736 | tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); |
2737 | } |
2738 | #else /* !CONFIG_NO_HZ_FULL: */ |
2739 | static inline int sched_tick_offload_init(void) { return 0; } |
2740 | static inline void sched_update_tick_dependency(struct rq *rq) { } |
2741 | #endif /* !CONFIG_NO_HZ_FULL */ |
2742 | |
2743 | static inline void add_nr_running(struct rq *rq, unsigned count) |
2744 | { |
2745 | unsigned prev_nr = rq->nr_running; |
2746 | |
2747 | rq->nr_running = prev_nr + count; |
2748 | if (trace_sched_update_nr_running_tp_enabled()) { |
2749 | call_trace_sched_update_nr_running(rq, count); |
2750 | } |
2751 | |
2752 | #ifdef CONFIG_SMP |
2753 | if (prev_nr < 2 && rq->nr_running >= 2) |
2754 | set_rd_overloaded(rd: rq->rd, status: 1); |
2755 | #endif |
2756 | |
2757 | sched_update_tick_dependency(rq); |
2758 | } |
2759 | |
2760 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
2761 | { |
2762 | rq->nr_running -= count; |
2763 | if (trace_sched_update_nr_running_tp_enabled()) { |
2764 | call_trace_sched_update_nr_running(rq, count: -count); |
2765 | } |
2766 | |
2767 | /* Check if we still need preemption */ |
2768 | sched_update_tick_dependency(rq); |
2769 | } |
2770 | |
2771 | static inline void __block_task(struct rq *rq, struct task_struct *p) |
2772 | { |
2773 | if (p->sched_contributes_to_load) |
2774 | rq->nr_uninterruptible++; |
2775 | |
2776 | if (p->in_iowait) { |
2777 | atomic_inc(v: &rq->nr_iowait); |
2778 | delayacct_blkio_start(); |
2779 | } |
2780 | |
2781 | ASSERT_EXCLUSIVE_WRITER(p->on_rq); |
2782 | |
2783 | /* |
2784 | * The moment this write goes through, ttwu() can swoop in and migrate |
2785 | * this task, rendering our rq->__lock ineffective. |
2786 | * |
2787 | * __schedule() try_to_wake_up() |
2788 | * LOCK rq->__lock LOCK p->pi_lock |
2789 | * pick_next_task() |
2790 | * pick_next_task_fair() |
2791 | * pick_next_entity() |
2792 | * dequeue_entities() |
2793 | * __block_task() |
2794 | * RELEASE p->on_rq = 0 if (p->on_rq && ...) |
2795 | * break; |
2796 | * |
2797 | * ACQUIRE (after ctrl-dep) |
2798 | * |
2799 | * cpu = select_task_rq(); |
2800 | * set_task_cpu(p, cpu); |
2801 | * ttwu_queue() |
2802 | * ttwu_do_activate() |
2803 | * LOCK rq->__lock |
2804 | * activate_task() |
2805 | * STORE p->on_rq = 1 |
2806 | * UNLOCK rq->__lock |
2807 | * |
2808 | * Callers must ensure to not reference @p after this -- we no longer |
2809 | * own it. |
2810 | */ |
2811 | smp_store_release(&p->on_rq, 0); |
2812 | } |
2813 | |
2814 | extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
2815 | extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
2816 | |
2817 | extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); |
2818 | |
2819 | #ifdef CONFIG_PREEMPT_RT |
2820 | # define SCHED_NR_MIGRATE_BREAK 8 |
2821 | #else |
2822 | # define SCHED_NR_MIGRATE_BREAK 32 |
2823 | #endif |
2824 | |
2825 | extern __read_mostly unsigned int sysctl_sched_nr_migrate; |
2826 | extern __read_mostly unsigned int sysctl_sched_migration_cost; |
2827 | |
2828 | extern unsigned int sysctl_sched_base_slice; |
2829 | |
2830 | extern int sysctl_resched_latency_warn_ms; |
2831 | extern int sysctl_resched_latency_warn_once; |
2832 | |
2833 | extern unsigned int sysctl_sched_tunable_scaling; |
2834 | |
2835 | extern unsigned int sysctl_numa_balancing_scan_delay; |
2836 | extern unsigned int sysctl_numa_balancing_scan_period_min; |
2837 | extern unsigned int sysctl_numa_balancing_scan_period_max; |
2838 | extern unsigned int sysctl_numa_balancing_scan_size; |
2839 | extern unsigned int sysctl_numa_balancing_hot_threshold; |
2840 | |
2841 | #ifdef CONFIG_SCHED_HRTICK |
2842 | |
2843 | /* |
2844 | * Use hrtick when: |
2845 | * - enabled by features |
2846 | * - hrtimer is actually high res |
2847 | */ |
2848 | static inline int hrtick_enabled(struct rq *rq) |
2849 | { |
2850 | if (!cpu_active(cpu: cpu_of(rq))) |
2851 | return 0; |
2852 | return hrtimer_is_hres_active(timer: &rq->hrtick_timer); |
2853 | } |
2854 | |
2855 | static inline int hrtick_enabled_fair(struct rq *rq) |
2856 | { |
2857 | if (!sched_feat(HRTICK)) |
2858 | return 0; |
2859 | return hrtick_enabled(rq); |
2860 | } |
2861 | |
2862 | static inline int hrtick_enabled_dl(struct rq *rq) |
2863 | { |
2864 | if (!sched_feat(HRTICK_DL)) |
2865 | return 0; |
2866 | return hrtick_enabled(rq); |
2867 | } |
2868 | |
2869 | extern void hrtick_start(struct rq *rq, u64 delay); |
2870 | |
2871 | #else /* !CONFIG_SCHED_HRTICK: */ |
2872 | |
2873 | static inline int hrtick_enabled_fair(struct rq *rq) |
2874 | { |
2875 | return 0; |
2876 | } |
2877 | |
2878 | static inline int hrtick_enabled_dl(struct rq *rq) |
2879 | { |
2880 | return 0; |
2881 | } |
2882 | |
2883 | static inline int hrtick_enabled(struct rq *rq) |
2884 | { |
2885 | return 0; |
2886 | } |
2887 | |
2888 | #endif /* !CONFIG_SCHED_HRTICK */ |
2889 | |
2890 | #ifndef arch_scale_freq_tick |
2891 | static __always_inline void arch_scale_freq_tick(void) { } |
2892 | #endif |
2893 | |
2894 | #ifndef arch_scale_freq_capacity |
2895 | /** |
2896 | * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. |
2897 | * @cpu: the CPU in question. |
2898 | * |
2899 | * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. |
2900 | * |
2901 | * f_curr |
2902 | * ------ * SCHED_CAPACITY_SCALE |
2903 | * f_max |
2904 | */ |
2905 | static __always_inline |
2906 | unsigned long arch_scale_freq_capacity(int cpu) |
2907 | { |
2908 | return SCHED_CAPACITY_SCALE; |
2909 | } |
2910 | #endif |
2911 | |
2912 | /* |
2913 | * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to |
2914 | * acquire rq lock instead of rq_lock(). So at the end of these two functions |
2915 | * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of |
2916 | * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. |
2917 | */ |
2918 | static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) |
2919 | { |
2920 | rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
2921 | /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ |
2922 | #ifdef CONFIG_SMP |
2923 | rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
2924 | #endif |
2925 | } |
2926 | |
2927 | #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ |
2928 | __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ |
2929 | static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ |
2930 | { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ |
2931 | _lock; return _t; } |
2932 | |
2933 | #ifdef CONFIG_SMP |
2934 | |
2935 | static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) |
2936 | { |
2937 | #ifdef CONFIG_SCHED_CORE |
2938 | /* |
2939 | * In order to not have {0,2},{1,3} turn into into an AB-BA, |
2940 | * order by core-id first and cpu-id second. |
2941 | * |
2942 | * Notably: |
2943 | * |
2944 | * double_rq_lock(0,3); will take core-0, core-1 lock |
2945 | * double_rq_lock(1,2); will take core-1, core-0 lock |
2946 | * |
2947 | * when only cpu-id is considered. |
2948 | */ |
2949 | if (rq1->core->cpu < rq2->core->cpu) |
2950 | return true; |
2951 | if (rq1->core->cpu > rq2->core->cpu) |
2952 | return false; |
2953 | |
2954 | /* |
2955 | * __sched_core_flip() relies on SMT having cpu-id lock order. |
2956 | */ |
2957 | #endif |
2958 | return rq1->cpu < rq2->cpu; |
2959 | } |
2960 | |
2961 | extern void double_rq_lock(struct rq *rq1, struct rq *rq2); |
2962 | |
2963 | #ifdef CONFIG_PREEMPTION |
2964 | |
2965 | /* |
2966 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
2967 | * way at the expense of forcing extra atomic operations in all |
2968 | * invocations. This assures that the double_lock is acquired using the |
2969 | * same underlying policy as the spinlock_t on this architecture, which |
2970 | * reduces latency compared to the unfair variant below. However, it |
2971 | * also adds more overhead and therefore may reduce throughput. |
2972 | */ |
2973 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
2974 | __releases(this_rq->lock) |
2975 | __acquires(busiest->lock) |
2976 | __acquires(this_rq->lock) |
2977 | { |
2978 | raw_spin_rq_unlock(rq: this_rq); |
2979 | double_rq_lock(rq1: this_rq, rq2: busiest); |
2980 | |
2981 | return 1; |
2982 | } |
2983 | |
2984 | #else /* !CONFIG_PREEMPTION: */ |
2985 | /* |
2986 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
2987 | * latency by eliminating extra atomic operations when the locks are |
2988 | * already in proper order on entry. This favors lower CPU-ids and will |
2989 | * grant the double lock to lower CPUs over higher ids under contention, |
2990 | * regardless of entry order into the function. |
2991 | */ |
2992 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
2993 | __releases(this_rq->lock) |
2994 | __acquires(busiest->lock) |
2995 | __acquires(this_rq->lock) |
2996 | { |
2997 | if (__rq_lockp(this_rq) == __rq_lockp(busiest) || |
2998 | likely(raw_spin_rq_trylock(busiest))) { |
2999 | double_rq_clock_clear_update(this_rq, busiest); |
3000 | return 0; |
3001 | } |
3002 | |
3003 | if (rq_order_less(this_rq, busiest)) { |
3004 | raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); |
3005 | double_rq_clock_clear_update(this_rq, busiest); |
3006 | return 0; |
3007 | } |
3008 | |
3009 | raw_spin_rq_unlock(this_rq); |
3010 | double_rq_lock(this_rq, busiest); |
3011 | |
3012 | return 1; |
3013 | } |
3014 | |
3015 | #endif /* !CONFIG_PREEMPTION */ |
3016 | |
3017 | /* |
3018 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
3019 | */ |
3020 | static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
3021 | { |
3022 | lockdep_assert_irqs_disabled(); |
3023 | |
3024 | return _double_lock_balance(this_rq, busiest); |
3025 | } |
3026 | |
3027 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
3028 | __releases(busiest->lock) |
3029 | { |
3030 | if (__rq_lockp(rq: this_rq) != __rq_lockp(rq: busiest)) |
3031 | raw_spin_rq_unlock(rq: busiest); |
3032 | lock_set_subclass(lock: &__rq_lockp(rq: this_rq)->dep_map, subclass: 0, _RET_IP_); |
3033 | } |
3034 | |
3035 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
3036 | { |
3037 | if (l1 > l2) |
3038 | swap(l1, l2); |
3039 | |
3040 | spin_lock(lock: l1); |
3041 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
3042 | } |
3043 | |
3044 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
3045 | { |
3046 | if (l1 > l2) |
3047 | swap(l1, l2); |
3048 | |
3049 | spin_lock_irq(lock: l1); |
3050 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
3051 | } |
3052 | |
3053 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
3054 | { |
3055 | if (l1 > l2) |
3056 | swap(l1, l2); |
3057 | |
3058 | raw_spin_lock(l1); |
3059 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
3060 | } |
3061 | |
3062 | static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
3063 | { |
3064 | raw_spin_unlock(l1); |
3065 | raw_spin_unlock(l2); |
3066 | } |
3067 | |
3068 | DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t, |
3069 | double_raw_lock(_T->lock, _T->lock2), |
3070 | double_raw_unlock(_T->lock, _T->lock2)) |
3071 | |
3072 | /* |
3073 | * double_rq_unlock - safely unlock two runqueues |
3074 | * |
3075 | * Note this does not restore interrupts like task_rq_unlock, |
3076 | * you need to do so manually after calling. |
3077 | */ |
3078 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
3079 | __releases(rq1->lock) |
3080 | __releases(rq2->lock) |
3081 | { |
3082 | if (__rq_lockp(rq: rq1) != __rq_lockp(rq: rq2)) |
3083 | raw_spin_rq_unlock(rq: rq2); |
3084 | else |
3085 | __release(rq2->lock); |
3086 | raw_spin_rq_unlock(rq: rq1); |
3087 | } |
3088 | |
3089 | extern void set_rq_online (struct rq *rq); |
3090 | extern void set_rq_offline(struct rq *rq); |
3091 | |
3092 | extern bool sched_smp_initialized; |
3093 | |
3094 | #else /* !CONFIG_SMP: */ |
3095 | |
3096 | /* |
3097 | * double_rq_lock - safely lock two runqueues |
3098 | * |
3099 | * Note this does not disable interrupts like task_rq_lock, |
3100 | * you need to do so manually before calling. |
3101 | */ |
3102 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
3103 | __acquires(rq1->lock) |
3104 | __acquires(rq2->lock) |
3105 | { |
3106 | WARN_ON_ONCE(!irqs_disabled()); |
3107 | WARN_ON_ONCE(rq1 != rq2); |
3108 | raw_spin_rq_lock(rq1); |
3109 | __acquire(rq2->lock); /* Fake it out ;) */ |
3110 | double_rq_clock_clear_update(rq1, rq2); |
3111 | } |
3112 | |
3113 | /* |
3114 | * double_rq_unlock - safely unlock two runqueues |
3115 | * |
3116 | * Note this does not restore interrupts like task_rq_unlock, |
3117 | * you need to do so manually after calling. |
3118 | */ |
3119 | static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
3120 | __releases(rq1->lock) |
3121 | __releases(rq2->lock) |
3122 | { |
3123 | WARN_ON_ONCE(rq1 != rq2); |
3124 | raw_spin_rq_unlock(rq1); |
3125 | __release(rq2->lock); |
3126 | } |
3127 | |
3128 | #endif /* !CONFIG_SMP */ |
3129 | |
3130 | DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, |
3131 | double_rq_lock(_T->lock, _T->lock2), |
3132 | double_rq_unlock(_T->lock, _T->lock2)) |
3133 | |
3134 | extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq); |
3135 | extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
3136 | extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
3137 | |
3138 | extern bool sched_debug_verbose; |
3139 | |
3140 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
3141 | extern void print_rt_stats(struct seq_file *m, int cpu); |
3142 | extern void print_dl_stats(struct seq_file *m, int cpu); |
3143 | extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
3144 | extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); |
3145 | extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); |
3146 | |
3147 | extern void resched_latency_warn(int cpu, u64 latency); |
3148 | #ifdef CONFIG_NUMA_BALANCING |
3149 | extern void show_numa_stats(struct task_struct *p, struct seq_file *m); |
3150 | extern void |
3151 | print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
3152 | unsigned long tpf, unsigned long gsf, unsigned long gpf); |
3153 | #endif /* CONFIG_NUMA_BALANCING */ |
3154 | |
3155 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
3156 | extern void init_rt_rq(struct rt_rq *rt_rq); |
3157 | extern void init_dl_rq(struct dl_rq *dl_rq); |
3158 | |
3159 | extern void cfs_bandwidth_usage_inc(void); |
3160 | extern void cfs_bandwidth_usage_dec(void); |
3161 | |
3162 | #ifdef CONFIG_NO_HZ_COMMON |
3163 | |
3164 | #define NOHZ_BALANCE_KICK_BIT 0 |
3165 | #define NOHZ_STATS_KICK_BIT 1 |
3166 | #define NOHZ_NEWILB_KICK_BIT 2 |
3167 | #define NOHZ_NEXT_KICK_BIT 3 |
3168 | |
3169 | /* Run sched_balance_domains() */ |
3170 | #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) |
3171 | /* Update blocked load */ |
3172 | #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) |
3173 | /* Update blocked load when entering idle */ |
3174 | #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) |
3175 | /* Update nohz.next_balance */ |
3176 | #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) |
3177 | |
3178 | #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) |
3179 | |
3180 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
3181 | |
3182 | extern void nohz_balance_exit_idle(struct rq *rq); |
3183 | #else /* !CONFIG_NO_HZ_COMMON: */ |
3184 | static inline void nohz_balance_exit_idle(struct rq *rq) { } |
3185 | #endif /* !CONFIG_NO_HZ_COMMON */ |
3186 | |
3187 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
3188 | extern void nohz_run_idle_balance(int cpu); |
3189 | #else |
3190 | static inline void nohz_run_idle_balance(int cpu) { } |
3191 | #endif |
3192 | |
3193 | #include "stats.h" |
3194 | |
3195 | #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) |
3196 | |
3197 | extern void __sched_core_account_forceidle(struct rq *rq); |
3198 | |
3199 | static inline void sched_core_account_forceidle(struct rq *rq) |
3200 | { |
3201 | if (schedstat_enabled()) |
3202 | __sched_core_account_forceidle(rq); |
3203 | } |
3204 | |
3205 | extern void __sched_core_tick(struct rq *rq); |
3206 | |
3207 | static inline void sched_core_tick(struct rq *rq) |
3208 | { |
3209 | if (sched_core_enabled(rq) && schedstat_enabled()) |
3210 | __sched_core_tick(rq); |
3211 | } |
3212 | |
3213 | #else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */ |
3214 | |
3215 | static inline void sched_core_account_forceidle(struct rq *rq) { } |
3216 | |
3217 | static inline void sched_core_tick(struct rq *rq) { } |
3218 | |
3219 | #endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */ |
3220 | |
3221 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
3222 | |
3223 | struct irqtime { |
3224 | u64 total; |
3225 | u64 tick_delta; |
3226 | u64 irq_start_time; |
3227 | struct u64_stats_sync sync; |
3228 | }; |
3229 | |
3230 | DECLARE_PER_CPU(struct irqtime, cpu_irqtime); |
3231 | extern int sched_clock_irqtime; |
3232 | |
3233 | static inline int irqtime_enabled(void) |
3234 | { |
3235 | return sched_clock_irqtime; |
3236 | } |
3237 | |
3238 | /* |
3239 | * Returns the irqtime minus the softirq time computed by ksoftirqd. |
3240 | * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime |
3241 | * and never move forward. |
3242 | */ |
3243 | static inline u64 irq_time_read(int cpu) |
3244 | { |
3245 | struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); |
3246 | unsigned int seq; |
3247 | u64 total; |
3248 | |
3249 | do { |
3250 | seq = __u64_stats_fetch_begin(syncp: &irqtime->sync); |
3251 | total = irqtime->total; |
3252 | } while (__u64_stats_fetch_retry(syncp: &irqtime->sync, start: seq)); |
3253 | |
3254 | return total; |
3255 | } |
3256 | |
3257 | #else |
3258 | |
3259 | static inline int irqtime_enabled(void) |
3260 | { |
3261 | return 0; |
3262 | } |
3263 | |
3264 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
3265 | |
3266 | #ifdef CONFIG_CPU_FREQ |
3267 | |
3268 | DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); |
3269 | |
3270 | /** |
3271 | * cpufreq_update_util - Take a note about CPU utilization changes. |
3272 | * @rq: Runqueue to carry out the update for. |
3273 | * @flags: Update reason flags. |
3274 | * |
3275 | * This function is called by the scheduler on the CPU whose utilization is |
3276 | * being updated. |
3277 | * |
3278 | * It can only be called from RCU-sched read-side critical sections. |
3279 | * |
3280 | * The way cpufreq is currently arranged requires it to evaluate the CPU |
3281 | * performance state (frequency/voltage) on a regular basis to prevent it from |
3282 | * being stuck in a completely inadequate performance level for too long. |
3283 | * That is not guaranteed to happen if the updates are only triggered from CFS |
3284 | * and DL, though, because they may not be coming in if only RT tasks are |
3285 | * active all the time (or there are RT tasks only). |
3286 | * |
3287 | * As a workaround for that issue, this function is called periodically by the |
3288 | * RT sched class to trigger extra cpufreq updates to prevent it from stalling, |
3289 | * but that really is a band-aid. Going forward it should be replaced with |
3290 | * solutions targeted more specifically at RT tasks. |
3291 | */ |
3292 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
3293 | { |
3294 | struct update_util_data *data; |
3295 | |
3296 | data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, |
3297 | cpu_of(rq))); |
3298 | if (data) |
3299 | data->func(data, rq_clock(rq), flags); |
3300 | } |
3301 | #else /* !CONFIG_CPU_FREQ: */ |
3302 | static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { } |
3303 | #endif /* !CONFIG_CPU_FREQ */ |
3304 | |
3305 | #ifdef arch_scale_freq_capacity |
3306 | # ifndef arch_scale_freq_invariant |
3307 | # define arch_scale_freq_invariant() true |
3308 | # endif |
3309 | #else |
3310 | # define arch_scale_freq_invariant() false |
3311 | #endif |
3312 | |
3313 | #ifdef CONFIG_SMP |
3314 | |
3315 | unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, |
3316 | unsigned long *min, |
3317 | unsigned long *max); |
3318 | |
3319 | unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, |
3320 | unsigned long min, |
3321 | unsigned long max); |
3322 | |
3323 | |
3324 | /* |
3325 | * Verify the fitness of task @p to run on @cpu taking into account the |
3326 | * CPU original capacity and the runtime/deadline ratio of the task. |
3327 | * |
3328 | * The function will return true if the original capacity of @cpu is |
3329 | * greater than or equal to task's deadline density right shifted by |
3330 | * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. |
3331 | */ |
3332 | static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) |
3333 | { |
3334 | unsigned long cap = arch_scale_cpu_capacity(cpu); |
3335 | |
3336 | return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); |
3337 | } |
3338 | |
3339 | static inline unsigned long cpu_bw_dl(struct rq *rq) |
3340 | { |
3341 | return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; |
3342 | } |
3343 | |
3344 | static inline unsigned long cpu_util_dl(struct rq *rq) |
3345 | { |
3346 | return READ_ONCE(rq->avg_dl.util_avg); |
3347 | } |
3348 | |
3349 | |
3350 | extern unsigned long cpu_util_cfs(int cpu); |
3351 | extern unsigned long cpu_util_cfs_boost(int cpu); |
3352 | |
3353 | static inline unsigned long cpu_util_rt(struct rq *rq) |
3354 | { |
3355 | return READ_ONCE(rq->avg_rt.util_avg); |
3356 | } |
3357 | |
3358 | #else /* !CONFIG_SMP */ |
3359 | static inline bool update_other_load_avgs(struct rq *rq) { return false; } |
3360 | #endif /* CONFIG_SMP */ |
3361 | |
3362 | #ifdef CONFIG_UCLAMP_TASK |
3363 | |
3364 | unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); |
3365 | |
3366 | /* |
3367 | * When uclamp is compiled in, the aggregation at rq level is 'turned off' |
3368 | * by default in the fast path and only gets turned on once userspace performs |
3369 | * an operation that requires it. |
3370 | * |
3371 | * Returns true if userspace opted-in to use uclamp and aggregation at rq level |
3372 | * hence is active. |
3373 | */ |
3374 | static inline bool uclamp_is_used(void) |
3375 | { |
3376 | return static_branch_likely(&sched_uclamp_used); |
3377 | } |
3378 | |
3379 | /* |
3380 | * Enabling static branches would get the cpus_read_lock(), |
3381 | * check whether uclamp_is_used before enable it to avoid always |
3382 | * calling cpus_read_lock(). Because we never disable this |
3383 | * static key once enable it. |
3384 | */ |
3385 | static inline void sched_uclamp_enable(void) |
3386 | { |
3387 | if (!uclamp_is_used()) |
3388 | static_branch_enable(&sched_uclamp_used); |
3389 | } |
3390 | |
3391 | static inline unsigned long uclamp_rq_get(struct rq *rq, |
3392 | enum uclamp_id clamp_id) |
3393 | { |
3394 | return READ_ONCE(rq->uclamp[clamp_id].value); |
3395 | } |
3396 | |
3397 | static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, |
3398 | unsigned int value) |
3399 | { |
3400 | WRITE_ONCE(rq->uclamp[clamp_id].value, value); |
3401 | } |
3402 | |
3403 | static inline bool uclamp_rq_is_idle(struct rq *rq) |
3404 | { |
3405 | return rq->uclamp_flags & UCLAMP_FLAG_IDLE; |
3406 | } |
3407 | |
3408 | /* Is the rq being capped/throttled by uclamp_max? */ |
3409 | static inline bool uclamp_rq_is_capped(struct rq *rq) |
3410 | { |
3411 | unsigned long rq_util; |
3412 | unsigned long max_util; |
3413 | |
3414 | if (!uclamp_is_used()) |
3415 | return false; |
3416 | |
3417 | rq_util = cpu_util_cfs(cpu: cpu_of(rq)) + cpu_util_rt(rq); |
3418 | max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); |
3419 | |
3420 | return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; |
3421 | } |
3422 | |
3423 | #define for_each_clamp_id(clamp_id) \ |
3424 | for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) |
3425 | |
3426 | extern unsigned int sysctl_sched_uclamp_util_min_rt_default; |
3427 | |
3428 | |
3429 | static inline unsigned int uclamp_none(enum uclamp_id clamp_id) |
3430 | { |
3431 | if (clamp_id == UCLAMP_MIN) |
3432 | return 0; |
3433 | return SCHED_CAPACITY_SCALE; |
3434 | } |
3435 | |
3436 | /* Integer rounded range for each bucket */ |
3437 | #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) |
3438 | |
3439 | static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) |
3440 | { |
3441 | return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); |
3442 | } |
3443 | |
3444 | static inline void |
3445 | uclamp_se_set(struct uclamp_se *uc_se, unsigned int value, bool user_defined) |
3446 | { |
3447 | uc_se->value = value; |
3448 | uc_se->bucket_id = uclamp_bucket_id(clamp_value: value); |
3449 | uc_se->user_defined = user_defined; |
3450 | } |
3451 | |
3452 | #else /* !CONFIG_UCLAMP_TASK: */ |
3453 | |
3454 | static inline unsigned long |
3455 | uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) |
3456 | { |
3457 | if (clamp_id == UCLAMP_MIN) |
3458 | return 0; |
3459 | |
3460 | return SCHED_CAPACITY_SCALE; |
3461 | } |
3462 | |
3463 | static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } |
3464 | |
3465 | static inline bool uclamp_is_used(void) |
3466 | { |
3467 | return false; |
3468 | } |
3469 | |
3470 | static inline void sched_uclamp_enable(void) {} |
3471 | |
3472 | static inline unsigned long |
3473 | uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) |
3474 | { |
3475 | if (clamp_id == UCLAMP_MIN) |
3476 | return 0; |
3477 | |
3478 | return SCHED_CAPACITY_SCALE; |
3479 | } |
3480 | |
3481 | static inline void |
3482 | uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) |
3483 | { |
3484 | } |
3485 | |
3486 | static inline bool uclamp_rq_is_idle(struct rq *rq) |
3487 | { |
3488 | return false; |
3489 | } |
3490 | |
3491 | #endif /* !CONFIG_UCLAMP_TASK */ |
3492 | |
3493 | #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
3494 | |
3495 | static inline unsigned long cpu_util_irq(struct rq *rq) |
3496 | { |
3497 | return READ_ONCE(rq->avg_irq.util_avg); |
3498 | } |
3499 | |
3500 | static inline |
3501 | unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) |
3502 | { |
3503 | util *= (max - irq); |
3504 | util /= max; |
3505 | |
3506 | return util; |
3507 | |
3508 | } |
3509 | |
3510 | #else /* !CONFIG_HAVE_SCHED_AVG_IRQ: */ |
3511 | |
3512 | static inline unsigned long cpu_util_irq(struct rq *rq) |
3513 | { |
3514 | return 0; |
3515 | } |
3516 | |
3517 | static inline |
3518 | unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) |
3519 | { |
3520 | return util; |
3521 | } |
3522 | |
3523 | #endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */ |
3524 | |
3525 | extern void __setparam_fair(struct task_struct *p, const struct sched_attr *attr); |
3526 | |
3527 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
3528 | |
3529 | #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) |
3530 | |
3531 | DECLARE_STATIC_KEY_FALSE(sched_energy_present); |
3532 | |
3533 | static inline bool sched_energy_enabled(void) |
3534 | { |
3535 | return static_branch_unlikely(&sched_energy_present); |
3536 | } |
3537 | |
3538 | #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ |
3539 | |
3540 | #define perf_domain_span(pd) NULL |
3541 | |
3542 | static inline bool sched_energy_enabled(void) { return false; } |
3543 | |
3544 | #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ |
3545 | |
3546 | #ifdef CONFIG_MEMBARRIER |
3547 | |
3548 | /* |
3549 | * The scheduler provides memory barriers required by membarrier between: |
3550 | * - prior user-space memory accesses and store to rq->membarrier_state, |
3551 | * - store to rq->membarrier_state and following user-space memory accesses. |
3552 | * In the same way it provides those guarantees around store to rq->curr. |
3553 | */ |
3554 | static inline void membarrier_switch_mm(struct rq *rq, |
3555 | struct mm_struct *prev_mm, |
3556 | struct mm_struct *next_mm) |
3557 | { |
3558 | int membarrier_state; |
3559 | |
3560 | if (prev_mm == next_mm) |
3561 | return; |
3562 | |
3563 | membarrier_state = atomic_read(v: &next_mm->membarrier_state); |
3564 | if (READ_ONCE(rq->membarrier_state) == membarrier_state) |
3565 | return; |
3566 | |
3567 | WRITE_ONCE(rq->membarrier_state, membarrier_state); |
3568 | } |
3569 | |
3570 | #else /* !CONFIG_MEMBARRIER :*/ |
3571 | |
3572 | static inline void membarrier_switch_mm(struct rq *rq, |
3573 | struct mm_struct *prev_mm, |
3574 | struct mm_struct *next_mm) |
3575 | { |
3576 | } |
3577 | |
3578 | #endif /* !CONFIG_MEMBARRIER */ |
3579 | |
3580 | #ifdef CONFIG_SMP |
3581 | static inline bool is_per_cpu_kthread(struct task_struct *p) |
3582 | { |
3583 | if (!(p->flags & PF_KTHREAD)) |
3584 | return false; |
3585 | |
3586 | if (p->nr_cpus_allowed != 1) |
3587 | return false; |
3588 | |
3589 | return true; |
3590 | } |
3591 | #endif |
3592 | |
3593 | extern void swake_up_all_locked(struct swait_queue_head *q); |
3594 | extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); |
3595 | |
3596 | extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags); |
3597 | |
3598 | #ifdef CONFIG_PREEMPT_DYNAMIC |
3599 | extern int preempt_dynamic_mode; |
3600 | extern int sched_dynamic_mode(const char *str); |
3601 | extern void sched_dynamic_update(int mode); |
3602 | #endif |
3603 | extern const char *preempt_modes[]; |
3604 | |
3605 | #ifdef CONFIG_SCHED_MM_CID |
3606 | |
3607 | #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ |
3608 | #define MM_CID_SCAN_DELAY 100 /* 100ms */ |
3609 | |
3610 | extern raw_spinlock_t cid_lock; |
3611 | extern int use_cid_lock; |
3612 | |
3613 | extern void sched_mm_cid_migrate_from(struct task_struct *t); |
3614 | extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t); |
3615 | extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr); |
3616 | extern void init_sched_mm_cid(struct task_struct *t); |
3617 | |
3618 | static inline void __mm_cid_put(struct mm_struct *mm, int cid) |
3619 | { |
3620 | if (cid < 0) |
3621 | return; |
3622 | cpumask_clear_cpu(cpu: cid, dstp: mm_cidmask(mm)); |
3623 | } |
3624 | |
3625 | /* |
3626 | * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to |
3627 | * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to |
3628 | * be held to transition to other states. |
3629 | * |
3630 | * State transitions synchronized with cmpxchg or try_cmpxchg need to be |
3631 | * consistent across CPUs, which prevents use of this_cpu_cmpxchg. |
3632 | */ |
3633 | static inline void mm_cid_put_lazy(struct task_struct *t) |
3634 | { |
3635 | struct mm_struct *mm = t->mm; |
3636 | struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
3637 | int cid; |
3638 | |
3639 | lockdep_assert_irqs_disabled(); |
3640 | cid = __this_cpu_read(pcpu_cid->cid); |
3641 | if (!mm_cid_is_lazy_put(cid) || |
3642 | !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) |
3643 | return; |
3644 | __mm_cid_put(mm, cid: mm_cid_clear_lazy_put(cid)); |
3645 | } |
3646 | |
3647 | static inline int mm_cid_pcpu_unset(struct mm_struct *mm) |
3648 | { |
3649 | struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
3650 | int cid, res; |
3651 | |
3652 | lockdep_assert_irqs_disabled(); |
3653 | cid = __this_cpu_read(pcpu_cid->cid); |
3654 | for (;;) { |
3655 | if (mm_cid_is_unset(cid)) |
3656 | return MM_CID_UNSET; |
3657 | /* |
3658 | * Attempt transition from valid or lazy-put to unset. |
3659 | */ |
3660 | res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); |
3661 | if (res == cid) |
3662 | break; |
3663 | cid = res; |
3664 | } |
3665 | return cid; |
3666 | } |
3667 | |
3668 | static inline void mm_cid_put(struct mm_struct *mm) |
3669 | { |
3670 | int cid; |
3671 | |
3672 | lockdep_assert_irqs_disabled(); |
3673 | cid = mm_cid_pcpu_unset(mm); |
3674 | if (cid == MM_CID_UNSET) |
3675 | return; |
3676 | __mm_cid_put(mm, cid: mm_cid_clear_lazy_put(cid)); |
3677 | } |
3678 | |
3679 | static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm) |
3680 | { |
3681 | struct cpumask *cidmask = mm_cidmask(mm); |
3682 | struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
3683 | int cid, max_nr_cid, allowed_max_nr_cid; |
3684 | |
3685 | /* |
3686 | * After shrinking the number of threads or reducing the number |
3687 | * of allowed cpus, reduce the value of max_nr_cid so expansion |
3688 | * of cid allocation will preserve cache locality if the number |
3689 | * of threads or allowed cpus increase again. |
3690 | */ |
3691 | max_nr_cid = atomic_read(v: &mm->max_nr_cid); |
3692 | while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), |
3693 | atomic_read(&mm->mm_users))), |
3694 | max_nr_cid > allowed_max_nr_cid) { |
3695 | /* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */ |
3696 | if (atomic_try_cmpxchg(v: &mm->max_nr_cid, old: &max_nr_cid, new: allowed_max_nr_cid)) { |
3697 | max_nr_cid = allowed_max_nr_cid; |
3698 | break; |
3699 | } |
3700 | } |
3701 | /* Try to re-use recent cid. This improves cache locality. */ |
3702 | cid = __this_cpu_read(pcpu_cid->recent_cid); |
3703 | if (!mm_cid_is_unset(cid) && cid < max_nr_cid && |
3704 | !cpumask_test_and_set_cpu(cpu: cid, cpumask: cidmask)) |
3705 | return cid; |
3706 | /* |
3707 | * Expand cid allocation if the maximum number of concurrency |
3708 | * IDs allocated (max_nr_cid) is below the number cpus allowed |
3709 | * and number of threads. Expanding cid allocation as much as |
3710 | * possible improves cache locality. |
3711 | */ |
3712 | cid = max_nr_cid; |
3713 | while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(v: &mm->mm_users)) { |
3714 | /* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */ |
3715 | if (!atomic_try_cmpxchg(v: &mm->max_nr_cid, old: &cid, new: cid + 1)) |
3716 | continue; |
3717 | if (!cpumask_test_and_set_cpu(cpu: cid, cpumask: cidmask)) |
3718 | return cid; |
3719 | } |
3720 | /* |
3721 | * Find the first available concurrency id. |
3722 | * Retry finding first zero bit if the mask is temporarily |
3723 | * filled. This only happens during concurrent remote-clear |
3724 | * which owns a cid without holding a rq lock. |
3725 | */ |
3726 | for (;;) { |
3727 | cid = cpumask_first_zero(srcp: cidmask); |
3728 | if (cid < READ_ONCE(mm->nr_cpus_allowed)) |
3729 | break; |
3730 | cpu_relax(); |
3731 | } |
3732 | if (cpumask_test_and_set_cpu(cpu: cid, cpumask: cidmask)) |
3733 | return -1; |
3734 | |
3735 | return cid; |
3736 | } |
3737 | |
3738 | /* |
3739 | * Save a snapshot of the current runqueue time of this cpu |
3740 | * with the per-cpu cid value, allowing to estimate how recently it was used. |
3741 | */ |
3742 | static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) |
3743 | { |
3744 | struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); |
3745 | |
3746 | lockdep_assert_rq_held(rq); |
3747 | WRITE_ONCE(pcpu_cid->time, rq->clock); |
3748 | } |
3749 | |
3750 | static inline int __mm_cid_get(struct rq *rq, struct task_struct *t, |
3751 | struct mm_struct *mm) |
3752 | { |
3753 | int cid; |
3754 | |
3755 | /* |
3756 | * All allocations (even those using the cid_lock) are lock-free. If |
3757 | * use_cid_lock is set, hold the cid_lock to perform cid allocation to |
3758 | * guarantee forward progress. |
3759 | */ |
3760 | if (!READ_ONCE(use_cid_lock)) { |
3761 | cid = __mm_cid_try_get(t, mm); |
3762 | if (cid >= 0) |
3763 | goto end; |
3764 | raw_spin_lock(&cid_lock); |
3765 | } else { |
3766 | raw_spin_lock(&cid_lock); |
3767 | cid = __mm_cid_try_get(t, mm); |
3768 | if (cid >= 0) |
3769 | goto unlock; |
3770 | } |
3771 | |
3772 | /* |
3773 | * cid concurrently allocated. Retry while forcing following |
3774 | * allocations to use the cid_lock to ensure forward progress. |
3775 | */ |
3776 | WRITE_ONCE(use_cid_lock, 1); |
3777 | /* |
3778 | * Set use_cid_lock before allocation. Only care about program order |
3779 | * because this is only required for forward progress. |
3780 | */ |
3781 | barrier(); |
3782 | /* |
3783 | * Retry until it succeeds. It is guaranteed to eventually succeed once |
3784 | * all newcoming allocations observe the use_cid_lock flag set. |
3785 | */ |
3786 | do { |
3787 | cid = __mm_cid_try_get(t, mm); |
3788 | cpu_relax(); |
3789 | } while (cid < 0); |
3790 | /* |
3791 | * Allocate before clearing use_cid_lock. Only care about |
3792 | * program order because this is for forward progress. |
3793 | */ |
3794 | barrier(); |
3795 | WRITE_ONCE(use_cid_lock, 0); |
3796 | unlock: |
3797 | raw_spin_unlock(&cid_lock); |
3798 | end: |
3799 | mm_cid_snapshot_time(rq, mm); |
3800 | |
3801 | return cid; |
3802 | } |
3803 | |
3804 | static inline int mm_cid_get(struct rq *rq, struct task_struct *t, |
3805 | struct mm_struct *mm) |
3806 | { |
3807 | struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
3808 | struct cpumask *cpumask; |
3809 | int cid; |
3810 | |
3811 | lockdep_assert_rq_held(rq); |
3812 | cpumask = mm_cidmask(mm); |
3813 | cid = __this_cpu_read(pcpu_cid->cid); |
3814 | if (mm_cid_is_valid(cid)) { |
3815 | mm_cid_snapshot_time(rq, mm); |
3816 | return cid; |
3817 | } |
3818 | if (mm_cid_is_lazy_put(cid)) { |
3819 | if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) |
3820 | __mm_cid_put(mm, cid: mm_cid_clear_lazy_put(cid)); |
3821 | } |
3822 | cid = __mm_cid_get(rq, t, mm); |
3823 | __this_cpu_write(pcpu_cid->cid, cid); |
3824 | __this_cpu_write(pcpu_cid->recent_cid, cid); |
3825 | |
3826 | return cid; |
3827 | } |
3828 | |
3829 | static inline void switch_mm_cid(struct rq *rq, |
3830 | struct task_struct *prev, |
3831 | struct task_struct *next) |
3832 | { |
3833 | /* |
3834 | * Provide a memory barrier between rq->curr store and load of |
3835 | * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. |
3836 | * |
3837 | * Should be adapted if context_switch() is modified. |
3838 | */ |
3839 | if (!next->mm) { // to kernel |
3840 | /* |
3841 | * user -> kernel transition does not guarantee a barrier, but |
3842 | * we can use the fact that it performs an atomic operation in |
3843 | * mmgrab(). |
3844 | */ |
3845 | if (prev->mm) // from user |
3846 | smp_mb__after_mmgrab(); |
3847 | /* |
3848 | * kernel -> kernel transition does not change rq->curr->mm |
3849 | * state. It stays NULL. |
3850 | */ |
3851 | } else { // to user |
3852 | /* |
3853 | * kernel -> user transition does not provide a barrier |
3854 | * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. |
3855 | * Provide it here. |
3856 | */ |
3857 | if (!prev->mm) { // from kernel |
3858 | smp_mb(); |
3859 | } else { // from user |
3860 | /* |
3861 | * user->user transition relies on an implicit |
3862 | * memory barrier in switch_mm() when |
3863 | * current->mm changes. If the architecture |
3864 | * switch_mm() does not have an implicit memory |
3865 | * barrier, it is emitted here. If current->mm |
3866 | * is unchanged, no barrier is needed. |
3867 | */ |
3868 | smp_mb__after_switch_mm(); |
3869 | } |
3870 | } |
3871 | if (prev->mm_cid_active) { |
3872 | mm_cid_snapshot_time(rq, mm: prev->mm); |
3873 | mm_cid_put_lazy(t: prev); |
3874 | prev->mm_cid = -1; |
3875 | } |
3876 | if (next->mm_cid_active) |
3877 | next->last_mm_cid = next->mm_cid = mm_cid_get(rq, t: next, mm: next->mm); |
3878 | } |
3879 | |
3880 | #else /* !CONFIG_SCHED_MM_CID: */ |
3881 | static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { } |
3882 | static inline void sched_mm_cid_migrate_from(struct task_struct *t) { } |
3883 | static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } |
3884 | static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } |
3885 | static inline void init_sched_mm_cid(struct task_struct *t) { } |
3886 | #endif /* !CONFIG_SCHED_MM_CID */ |
3887 | |
3888 | extern u64 avg_vruntime(struct cfs_rq *cfs_rq); |
3889 | extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); |
3890 | #ifdef CONFIG_SMP |
3891 | static inline |
3892 | void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task) |
3893 | { |
3894 | lockdep_assert_rq_held(rq: src_rq); |
3895 | lockdep_assert_rq_held(rq: dst_rq); |
3896 | |
3897 | deactivate_task(rq: src_rq, p: task, flags: 0); |
3898 | set_task_cpu(p: task, cpu: dst_rq->cpu); |
3899 | activate_task(rq: dst_rq, p: task, flags: 0); |
3900 | } |
3901 | |
3902 | static inline |
3903 | bool task_is_pushable(struct rq *rq, struct task_struct *p, int cpu) |
3904 | { |
3905 | if (!task_on_cpu(rq, p) && |
3906 | cpumask_test_cpu(cpu, cpumask: &p->cpus_mask)) |
3907 | return true; |
3908 | |
3909 | return false; |
3910 | } |
3911 | #endif |
3912 | |
3913 | #ifdef CONFIG_RT_MUTEXES |
3914 | |
3915 | static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) |
3916 | { |
3917 | if (pi_task) |
3918 | prio = min(prio, pi_task->prio); |
3919 | |
3920 | return prio; |
3921 | } |
3922 | |
3923 | static inline int rt_effective_prio(struct task_struct *p, int prio) |
3924 | { |
3925 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
3926 | |
3927 | return __rt_effective_prio(pi_task, prio); |
3928 | } |
3929 | |
3930 | #else /* !CONFIG_RT_MUTEXES: */ |
3931 | |
3932 | static inline int rt_effective_prio(struct task_struct *p, int prio) |
3933 | { |
3934 | return prio; |
3935 | } |
3936 | |
3937 | #endif /* !CONFIG_RT_MUTEXES */ |
3938 | |
3939 | extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi); |
3940 | extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); |
3941 | extern const struct sched_class *__setscheduler_class(int policy, int prio); |
3942 | extern void set_load_weight(struct task_struct *p, bool update_load); |
3943 | extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); |
3944 | extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); |
3945 | |
3946 | extern void check_class_changing(struct rq *rq, struct task_struct *p, |
3947 | const struct sched_class *prev_class); |
3948 | extern void check_class_changed(struct rq *rq, struct task_struct *p, |
3949 | const struct sched_class *prev_class, |
3950 | int oldprio); |
3951 | |
3952 | #ifdef CONFIG_SMP |
3953 | extern struct balance_callback *splice_balance_callbacks(struct rq *rq); |
3954 | extern void balance_callbacks(struct rq *rq, struct balance_callback *head); |
3955 | #else |
3956 | |
3957 | static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) |
3958 | { |
3959 | return NULL; |
3960 | } |
3961 | |
3962 | static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) |
3963 | { |
3964 | } |
3965 | |
3966 | #endif |
3967 | |
3968 | #ifdef CONFIG_SCHED_CLASS_EXT |
3969 | /* |
3970 | * Used by SCX in the enable/disable paths to move tasks between sched_classes |
3971 | * and establish invariants. |
3972 | */ |
3973 | struct sched_enq_and_set_ctx { |
3974 | struct task_struct *p; |
3975 | int queue_flags; |
3976 | bool queued; |
3977 | bool running; |
3978 | }; |
3979 | |
3980 | void sched_deq_and_put_task(struct task_struct *p, int queue_flags, |
3981 | struct sched_enq_and_set_ctx *ctx); |
3982 | void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx); |
3983 | |
3984 | #endif /* CONFIG_SCHED_CLASS_EXT */ |
3985 | |
3986 | #include "ext.h" |
3987 | |
3988 | #endif /* _KERNEL_SCHED_SCHED_H */ |
3989 |
Definitions
- asym_cap_data
- idle_policy
- normal_policy
- fair_policy
- rt_policy
- dl_policy
- valid_policy
- task_has_idle_policy
- task_has_rt_policy
- task_has_dl_policy
- update_avg
- sched_weight_from_cgroup
- sched_weight_to_cgroup
- dl_entity_is_special
- dl_entity_preempt
- rt_prio_array
- rt_bandwidth
- dl_bandwidth_enabled
- dl_bw
- dl_server_active
- cfs_bandwidth
- task_group
- walk_tg_tree
- css_tg
- balance_callback
- cfs_rq
- rt_bandwidth_enabled
- rt_rq
- rt_rq_is_runnable
- dl_rq
- se_update_runnable
- se_runnable
- se_weight
- sched_asym_prefer
- perf_domain
- root_domain
- get_rd_overloaded
- set_rd_overloaded
- uclamp_bucket
- uclamp_rq
- rq
- rq_of
- cpu_of
- is_migration_disabled
- rq_set_donor
- sched_core_enabled
- sched_core_disabled
- rq_lockp
- __rq_lockp
- sched_cpu_cookie_match
- sched_core_cookie_match
- sched_group_cookie_match
- sched_core_enqueued
- rt_group_sched_enabled
- lockdep_assert_rq_held
- raw_spin_rq_lock
- raw_spin_rq_lock_irq
- raw_spin_rq_unlock_irq
- _raw_spin_rq_lock_irqsave
- raw_spin_rq_unlock_irqrestore
- update_idle_core
- task_of
- task_cfs_rq
- cfs_rq_of
- group_cfs_rq
- assert_clock_updated
- rq_clock
- rq_clock_task
- rq_clock_skip_update
- rq_clock_cancel_skipupdate
- rq_clock_start_loop_update
- rq_clock_stop_loop_update
- rq_flags
- scx_rq_clock_update
- scx_rq_clock_invalidate
- rq_pin_lock
- rq_unpin_lock
- rq_repin_lock
- __task_rq_unlock
- task_rq_unlock
- rq_lock_irqsave
- rq_lock_irq
- rq_lock
- rq_unlock_irqrestore
- rq_unlock_irq
- rq_unlock
- this_rq_lock_irq
- numa_topology_type
- numa_faults_stats
- queue_balance_callback
- SD_SHARED_CHILD_MASK
- highest_flag_domain
- lowest_flag_domain
- sched_asym_cpucap_active
- sched_group_capacity
- sched_group
- sched_group_span
- group_balance_mask
- task_user_cpus
- task_group
- set_task_rq
- __set_task_cpu
- global_rt_period
- global_rt_runtime
- task_current
- task_current_donor
- task_on_cpu
- task_on_rq_queued
- task_on_rq_migrating
- affinity_context
- sched_class
- put_prev_task
- set_next_task
- __put_prev_set_next_dl_server
- put_prev_set_next_task
- next_active_class
- sched_stop_runnable
- sched_dl_runnable
- sched_rt_runnable
- sched_fair_runnable
- task_allowed_on_cpu
- alloc_user_cpus_ptr
- get_push_task
- idle_set_state
- idle_get_state
- sched_tick_offload_init
- sched_update_tick_dependency
- add_nr_running
- sub_nr_running
- __block_task
- hrtick_enabled
- hrtick_enabled_fair
- hrtick_enabled_dl
- double_rq_clock_clear_update
- rq_order_less
- _double_lock_balance
- double_lock_balance
- double_unlock_balance
- double_lock
- double_lock_irq
- double_raw_lock
- double_raw_unlock
- double_rq_unlock
- sched_core_account_forceidle
- sched_core_tick
- irqtime
- irqtime_enabled
- irq_time_read
- cpufreq_update_util
- dl_task_fits_capacity
- cpu_bw_dl
- cpu_util_dl
- cpu_util_rt
- uclamp_is_used
- sched_uclamp_enable
- uclamp_rq_get
- uclamp_rq_set
- uclamp_rq_is_idle
- uclamp_rq_is_capped
- uclamp_none
- uclamp_bucket_id
- uclamp_se_set
- cpu_util_irq
- scale_irq_capacity
- sched_energy_enabled
- membarrier_switch_mm
- is_per_cpu_kthread
- __mm_cid_put
- mm_cid_put_lazy
- mm_cid_pcpu_unset
- mm_cid_put
- __mm_cid_try_get
- mm_cid_snapshot_time
- __mm_cid_get
- mm_cid_get
- switch_mm_cid
- move_queued_task_locked
- task_is_pushable
- __rt_effective_prio
Improve your Profiling and Debugging skills
Find out more