1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * SMP initialisation and IPI support |
4 | * Based on arch/arm64/kernel/smp.c |
5 | * |
6 | * Copyright (C) 2012 ARM Ltd. |
7 | * Copyright (C) 2015 Regents of the University of California |
8 | * Copyright (C) 2017 SiFive |
9 | */ |
10 | |
11 | #include <linux/cpu.h> |
12 | #include <linux/clockchips.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/module.h> |
15 | #include <linux/kexec.h> |
16 | #include <linux/percpu.h> |
17 | #include <linux/profile.h> |
18 | #include <linux/smp.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/seq_file.h> |
21 | #include <linux/delay.h> |
22 | #include <linux/irq.h> |
23 | #include <linux/irq_work.h> |
24 | |
25 | #include <asm/tlbflush.h> |
26 | #include <asm/cacheflush.h> |
27 | #include <asm/cpu_ops.h> |
28 | |
29 | enum ipi_message_type { |
30 | IPI_RESCHEDULE, |
31 | IPI_CALL_FUNC, |
32 | IPI_CPU_STOP, |
33 | IPI_CPU_CRASH_STOP, |
34 | IPI_IRQ_WORK, |
35 | IPI_TIMER, |
36 | IPI_MAX |
37 | }; |
38 | |
39 | unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { |
40 | [0 ... NR_CPUS-1] = INVALID_HARTID |
41 | }; |
42 | |
43 | void __init smp_setup_processor_id(void) |
44 | { |
45 | cpuid_to_hartid_map(0) = boot_cpu_hartid; |
46 | } |
47 | |
48 | static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev); |
49 | static int ipi_virq_base __ro_after_init; |
50 | static int nr_ipi __ro_after_init = IPI_MAX; |
51 | static struct irq_desc *ipi_desc[IPI_MAX] __read_mostly; |
52 | |
53 | int riscv_hartid_to_cpuid(unsigned long hartid) |
54 | { |
55 | int i; |
56 | |
57 | for (i = 0; i < NR_CPUS; i++) |
58 | if (cpuid_to_hartid_map(i) == hartid) |
59 | return i; |
60 | |
61 | return -ENOENT; |
62 | } |
63 | |
64 | static void ipi_stop(void) |
65 | { |
66 | set_cpu_online(smp_processor_id(), online: false); |
67 | while (1) |
68 | wait_for_interrupt(); |
69 | } |
70 | |
71 | #ifdef CONFIG_KEXEC_CORE |
72 | static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); |
73 | |
74 | static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) |
75 | { |
76 | crash_save_cpu(regs, cpu); |
77 | |
78 | atomic_dec(v: &waiting_for_crash_ipi); |
79 | |
80 | local_irq_disable(); |
81 | |
82 | #ifdef CONFIG_HOTPLUG_CPU |
83 | if (cpu_has_hotplug(cpu)) |
84 | cpu_ops->cpu_stop(); |
85 | #endif |
86 | |
87 | for(;;) |
88 | wait_for_interrupt(); |
89 | } |
90 | #else |
91 | static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) |
92 | { |
93 | unreachable(); |
94 | } |
95 | #endif |
96 | |
97 | static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) |
98 | { |
99 | __ipi_send_mask(desc: ipi_desc[op], dest: mask); |
100 | } |
101 | |
102 | static void send_ipi_single(int cpu, enum ipi_message_type op) |
103 | { |
104 | __ipi_send_mask(desc: ipi_desc[op], cpumask_of(cpu)); |
105 | } |
106 | |
107 | #ifdef CONFIG_IRQ_WORK |
108 | void arch_irq_work_raise(void) |
109 | { |
110 | send_ipi_single(smp_processor_id(), op: IPI_IRQ_WORK); |
111 | } |
112 | #endif |
113 | |
114 | static irqreturn_t handle_IPI(int irq, void *data) |
115 | { |
116 | int ipi = irq - ipi_virq_base; |
117 | |
118 | switch (ipi) { |
119 | case IPI_RESCHEDULE: |
120 | scheduler_ipi(); |
121 | break; |
122 | case IPI_CALL_FUNC: |
123 | generic_smp_call_function_interrupt(); |
124 | break; |
125 | case IPI_CPU_STOP: |
126 | ipi_stop(); |
127 | break; |
128 | case IPI_CPU_CRASH_STOP: |
129 | ipi_cpu_crash_stop(smp_processor_id(), regs: get_irq_regs()); |
130 | break; |
131 | case IPI_IRQ_WORK: |
132 | irq_work_run(); |
133 | break; |
134 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
135 | case IPI_TIMER: |
136 | tick_receive_broadcast(); |
137 | break; |
138 | #endif |
139 | default: |
140 | pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); |
141 | break; |
142 | } |
143 | |
144 | return IRQ_HANDLED; |
145 | } |
146 | |
147 | void riscv_ipi_enable(void) |
148 | { |
149 | int i; |
150 | |
151 | if (WARN_ON_ONCE(!ipi_virq_base)) |
152 | return; |
153 | |
154 | for (i = 0; i < nr_ipi; i++) |
155 | enable_percpu_irq(irq: ipi_virq_base + i, type: 0); |
156 | } |
157 | |
158 | void riscv_ipi_disable(void) |
159 | { |
160 | int i; |
161 | |
162 | if (WARN_ON_ONCE(!ipi_virq_base)) |
163 | return; |
164 | |
165 | for (i = 0; i < nr_ipi; i++) |
166 | disable_percpu_irq(irq: ipi_virq_base + i); |
167 | } |
168 | |
169 | bool riscv_ipi_have_virq_range(void) |
170 | { |
171 | return (ipi_virq_base) ? true : false; |
172 | } |
173 | |
174 | DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence); |
175 | EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence); |
176 | |
177 | void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence) |
178 | { |
179 | int i, err; |
180 | |
181 | if (WARN_ON(ipi_virq_base)) |
182 | return; |
183 | |
184 | WARN_ON(nr < IPI_MAX); |
185 | nr_ipi = min(nr, IPI_MAX); |
186 | ipi_virq_base = virq; |
187 | |
188 | /* Request IPIs */ |
189 | for (i = 0; i < nr_ipi; i++) { |
190 | err = request_percpu_irq(irq: ipi_virq_base + i, handler: handle_IPI, |
191 | devname: "IPI", percpu_dev_id: &ipi_dummy_dev); |
192 | WARN_ON(err); |
193 | |
194 | ipi_desc[i] = irq_to_desc(irq: ipi_virq_base + i); |
195 | irq_set_status_flags(irq: ipi_virq_base + i, set: IRQ_HIDDEN); |
196 | } |
197 | |
198 | /* Enabled IPIs for boot CPU immediately */ |
199 | riscv_ipi_enable(); |
200 | |
201 | /* Update RFENCE static key */ |
202 | if (use_for_rfence) |
203 | static_branch_enable(&riscv_ipi_for_rfence); |
204 | else |
205 | static_branch_disable(&riscv_ipi_for_rfence); |
206 | } |
207 | |
208 | static const char * const ipi_names[] = { |
209 | [IPI_RESCHEDULE] = "Rescheduling interrupts", |
210 | [IPI_CALL_FUNC] = "Function call interrupts", |
211 | [IPI_CPU_STOP] = "CPU stop interrupts", |
212 | [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", |
213 | [IPI_IRQ_WORK] = "IRQ work interrupts", |
214 | [IPI_TIMER] = "Timer broadcast interrupts", |
215 | }; |
216 | |
217 | void show_ipi_stats(struct seq_file *p, int prec) |
218 | { |
219 | unsigned int cpu, i; |
220 | |
221 | for (i = 0; i < IPI_MAX; i++) { |
222 | seq_printf(m: p, fmt: "%*s%u:%s", prec - 1, "IPI", i, |
223 | prec >= 4 ? " ": ""); |
224 | for_each_online_cpu(cpu) |
225 | seq_printf(m: p, fmt: "%10u ", irq_desc_kstat_cpu(desc: ipi_desc[i], cpu)); |
226 | seq_printf(m: p, fmt: " %s\n", ipi_names[i]); |
227 | } |
228 | } |
229 | |
230 | void arch_send_call_function_ipi_mask(struct cpumask *mask) |
231 | { |
232 | send_ipi_mask(mask, op: IPI_CALL_FUNC); |
233 | } |
234 | |
235 | void arch_send_call_function_single_ipi(int cpu) |
236 | { |
237 | send_ipi_single(cpu, op: IPI_CALL_FUNC); |
238 | } |
239 | |
240 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
241 | void tick_broadcast(const struct cpumask *mask) |
242 | { |
243 | send_ipi_mask(mask, IPI_TIMER); |
244 | } |
245 | #endif |
246 | |
247 | void smp_send_stop(void) |
248 | { |
249 | unsigned long timeout; |
250 | |
251 | if (num_online_cpus() > 1) { |
252 | cpumask_t mask; |
253 | |
254 | cpumask_copy(dstp: &mask, cpu_online_mask); |
255 | cpumask_clear_cpu(smp_processor_id(), dstp: &mask); |
256 | |
257 | if (system_state <= SYSTEM_RUNNING) |
258 | pr_crit("SMP: stopping secondary CPUs\n"); |
259 | send_ipi_mask(mask: &mask, op: IPI_CPU_STOP); |
260 | } |
261 | |
262 | /* Wait up to one second for other CPUs to stop */ |
263 | timeout = USEC_PER_SEC; |
264 | while (num_online_cpus() > 1 && timeout--) |
265 | udelay(1); |
266 | |
267 | if (num_online_cpus() > 1) |
268 | pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
269 | cpumask_pr_args(cpu_online_mask)); |
270 | } |
271 | |
272 | #ifdef CONFIG_KEXEC_CORE |
273 | /* |
274 | * The number of CPUs online, not counting this CPU (which may not be |
275 | * fully online and so not counted in num_online_cpus()). |
276 | */ |
277 | static inline unsigned int num_other_online_cpus(void) |
278 | { |
279 | unsigned int this_cpu_online = cpu_online(smp_processor_id()); |
280 | |
281 | return num_online_cpus() - this_cpu_online; |
282 | } |
283 | |
284 | void crash_smp_send_stop(void) |
285 | { |
286 | static int cpus_stopped; |
287 | cpumask_t mask; |
288 | unsigned long timeout; |
289 | |
290 | /* |
291 | * This function can be called twice in panic path, but obviously |
292 | * we execute this only once. |
293 | */ |
294 | if (cpus_stopped) |
295 | return; |
296 | |
297 | cpus_stopped = 1; |
298 | |
299 | /* |
300 | * If this cpu is the only one alive at this point in time, online or |
301 | * not, there are no stop messages to be sent around, so just back out. |
302 | */ |
303 | if (num_other_online_cpus() == 0) |
304 | return; |
305 | |
306 | cpumask_copy(dstp: &mask, cpu_online_mask); |
307 | cpumask_clear_cpu(smp_processor_id(), dstp: &mask); |
308 | |
309 | atomic_set(v: &waiting_for_crash_ipi, i: num_other_online_cpus()); |
310 | |
311 | pr_crit("SMP: stopping secondary CPUs\n"); |
312 | send_ipi_mask(mask: &mask, op: IPI_CPU_CRASH_STOP); |
313 | |
314 | /* Wait up to one second for other CPUs to stop */ |
315 | timeout = USEC_PER_SEC; |
316 | while ((atomic_read(v: &waiting_for_crash_ipi) > 0) && timeout--) |
317 | udelay(1); |
318 | |
319 | if (atomic_read(v: &waiting_for_crash_ipi) > 0) |
320 | pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
321 | cpumask_pr_args(&mask)); |
322 | } |
323 | |
324 | bool smp_crash_stop_failed(void) |
325 | { |
326 | return (atomic_read(v: &waiting_for_crash_ipi) > 0); |
327 | } |
328 | #endif |
329 | |
330 | void arch_smp_send_reschedule(int cpu) |
331 | { |
332 | send_ipi_single(cpu, op: IPI_RESCHEDULE); |
333 | } |
334 | EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); |
335 |
Definitions
- ipi_message_type
- __cpuid_to_hartid_map
- smp_setup_processor_id
- ipi_dummy_dev
- ipi_virq_base
- nr_ipi
- ipi_desc
- riscv_hartid_to_cpuid
- ipi_stop
- waiting_for_crash_ipi
- ipi_cpu_crash_stop
- send_ipi_mask
- send_ipi_single
- arch_irq_work_raise
- handle_IPI
- riscv_ipi_enable
- riscv_ipi_disable
- riscv_ipi_have_virq_range
- riscv_ipi_for_rfence
- riscv_ipi_set_virq_range
- ipi_names
- show_ipi_stats
- arch_send_call_function_ipi_mask
- arch_send_call_function_single_ipi
- smp_send_stop
- num_other_online_cpus
- crash_smp_send_stop
- smp_crash_stop_failed
Improve your Profiling and Debugging skills
Find out more