1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * stop-task scheduling class. |
4 | * |
5 | * The stop task is the highest priority task in the system, it preempts |
6 | * everything and will be preempted by nothing. |
7 | * |
8 | * See kernel/stop_machine.c |
9 | */ |
10 | |
11 | #ifdef CONFIG_SMP |
12 | static int |
13 | select_task_rq_stop(struct task_struct *p, int cpu, int flags) |
14 | { |
15 | return task_cpu(p); /* stop tasks as never migrate */ |
16 | } |
17 | |
18 | static int |
19 | balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
20 | { |
21 | return sched_stop_runnable(rq); |
22 | } |
23 | #endif /* CONFIG_SMP */ |
24 | |
25 | static void |
26 | wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) |
27 | { |
28 | /* we're never preempted */ |
29 | } |
30 | |
31 | static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) |
32 | { |
33 | stop->se.exec_start = rq_clock_task(rq); |
34 | } |
35 | |
36 | static struct task_struct *pick_task_stop(struct rq *rq) |
37 | { |
38 | if (!sched_stop_runnable(rq)) |
39 | return NULL; |
40 | |
41 | return rq->stop; |
42 | } |
43 | |
44 | static struct task_struct *pick_next_task_stop(struct rq *rq) |
45 | { |
46 | struct task_struct *p = pick_task_stop(rq); |
47 | |
48 | if (p) |
49 | set_next_task_stop(rq, stop: p, first: true); |
50 | |
51 | return p; |
52 | } |
53 | |
54 | static void |
55 | enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) |
56 | { |
57 | add_nr_running(rq, count: 1); |
58 | } |
59 | |
60 | static void |
61 | dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) |
62 | { |
63 | sub_nr_running(rq, count: 1); |
64 | } |
65 | |
66 | static void yield_task_stop(struct rq *rq) |
67 | { |
68 | BUG(); /* the stop task should never yield, its pointless. */ |
69 | } |
70 | |
71 | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) |
72 | { |
73 | update_curr_common(rq); |
74 | } |
75 | |
76 | /* |
77 | * scheduler tick hitting a task of our scheduling class. |
78 | * |
79 | * NOTE: This function can be called remotely by the tick offload that |
80 | * goes along full dynticks. Therefore no local assumption can be made |
81 | * and everything must be accessed through the @rq and @curr passed in |
82 | * parameters. |
83 | */ |
84 | static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) |
85 | { |
86 | } |
87 | |
88 | static void switched_to_stop(struct rq *rq, struct task_struct *p) |
89 | { |
90 | BUG(); /* its impossible to change to this class */ |
91 | } |
92 | |
93 | static void |
94 | prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) |
95 | { |
96 | BUG(); /* how!?, what priority? */ |
97 | } |
98 | |
99 | static void update_curr_stop(struct rq *rq) |
100 | { |
101 | } |
102 | |
103 | /* |
104 | * Simple, special scheduling class for the per-CPU stop tasks: |
105 | */ |
106 | DEFINE_SCHED_CLASS(stop) = { |
107 | |
108 | .enqueue_task = enqueue_task_stop, |
109 | .dequeue_task = dequeue_task_stop, |
110 | .yield_task = yield_task_stop, |
111 | |
112 | .wakeup_preempt = wakeup_preempt_stop, |
113 | |
114 | .pick_next_task = pick_next_task_stop, |
115 | .put_prev_task = put_prev_task_stop, |
116 | .set_next_task = set_next_task_stop, |
117 | |
118 | #ifdef CONFIG_SMP |
119 | .balance = balance_stop, |
120 | .pick_task = pick_task_stop, |
121 | .select_task_rq = select_task_rq_stop, |
122 | .set_cpus_allowed = set_cpus_allowed_common, |
123 | #endif |
124 | |
125 | .task_tick = task_tick_stop, |
126 | |
127 | .prio_changed = prio_changed_stop, |
128 | .switched_to = switched_to_stop, |
129 | .update_curr = update_curr_stop, |
130 | }; |
131 | |