1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
4 | * |
5 | * RajeshwarR: Dec 11, 2007 |
6 | * -- Added support for Inter Processor Interrupts |
7 | * |
8 | * Vineetg: Nov 1st, 2007 |
9 | * -- Initial Write (Borrowed heavily from ARM) |
10 | */ |
11 | |
12 | #include <linux/spinlock.h> |
13 | #include <linux/sched/mm.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/profile.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/cpu.h> |
18 | #include <linux/irq.h> |
19 | #include <linux/atomic.h> |
20 | #include <linux/cpumask.h> |
21 | #include <linux/reboot.h> |
22 | #include <linux/irqdomain.h> |
23 | #include <linux/export.h> |
24 | #include <linux/of_fdt.h> |
25 | |
26 | #include <asm/mach_desc.h> |
27 | #include <asm/setup.h> |
28 | #include <asm/smp.h> |
29 | #include <asm/processor.h> |
30 | |
31 | #ifndef CONFIG_ARC_HAS_LLSC |
32 | arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
33 | |
34 | EXPORT_SYMBOL_GPL(smp_atomic_ops_lock); |
35 | #endif |
36 | |
37 | struct plat_smp_ops __weak plat_smp_ops; |
38 | |
39 | /* XXX: per cpu ? Only needed once in early secondary boot */ |
40 | struct task_struct *secondary_idle_tsk; |
41 | |
42 | static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) |
43 | { |
44 | unsigned long dt_root = of_get_flat_dt_root(); |
45 | const char *buf; |
46 | |
47 | buf = of_get_flat_dt_prop(node: dt_root, name, NULL); |
48 | if (!buf) |
49 | return -EINVAL; |
50 | |
51 | if (cpulist_parse(buf, dstp: cpumask)) |
52 | return -EINVAL; |
53 | |
54 | return 0; |
55 | } |
56 | |
57 | /* |
58 | * Read from DeviceTree and setup cpu possible mask. If there is no |
59 | * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist. |
60 | */ |
61 | static void __init arc_init_cpu_possible(void) |
62 | { |
63 | struct cpumask cpumask; |
64 | |
65 | if (arc_get_cpu_map(name: "possible-cpus" , cpumask: &cpumask)) { |
66 | pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n" , |
67 | NR_CPUS); |
68 | |
69 | cpumask_setall(dstp: &cpumask); |
70 | } |
71 | |
72 | if (!cpumask_test_cpu(cpu: 0, cpumask: &cpumask)) |
73 | panic(fmt: "Master cpu (cpu[0]) is missed in cpu possible mask!" ); |
74 | |
75 | init_cpu_possible(src: &cpumask); |
76 | } |
77 | |
78 | /* |
79 | * Called from setup_arch() before calling setup_processor() |
80 | * |
81 | * - Initialise the CPU possible map early - this describes the CPUs |
82 | * which may be present or become present in the system. |
83 | * - Call early smp init hook. This can initialize a specific multi-core |
84 | * IP which is say common to several platforms (hence not part of |
85 | * platform specific int_early() hook) |
86 | */ |
87 | void __init smp_init_cpus(void) |
88 | { |
89 | arc_init_cpu_possible(); |
90 | |
91 | if (plat_smp_ops.init_early_smp) |
92 | plat_smp_ops.init_early_smp(); |
93 | } |
94 | |
95 | /* called from init ( ) => process 1 */ |
96 | void __init smp_prepare_cpus(unsigned int max_cpus) |
97 | { |
98 | /* |
99 | * if platform didn't set the present map already, do it now |
100 | * boot cpu is set to present already by init/main.c |
101 | */ |
102 | if (num_present_cpus() <= 1) |
103 | init_cpu_present(cpu_possible_mask); |
104 | } |
105 | |
106 | void __init smp_cpus_done(unsigned int max_cpus) |
107 | { |
108 | |
109 | } |
110 | |
111 | /* |
112 | * Default smp boot helper for Run-on-reset case where all cores start off |
113 | * together. Non-masters need to wait for Master to start running. |
114 | * This is implemented using a flag in memory, which Non-masters spin-wait on. |
115 | * Master sets it to cpu-id of core to "ungate" it. |
116 | */ |
117 | static volatile int wake_flag; |
118 | |
119 | #ifdef CONFIG_ISA_ARCOMPACT |
120 | |
121 | #define __boot_read(f) f |
122 | #define __boot_write(f, v) f = v |
123 | |
124 | #else |
125 | |
126 | #define __boot_read(f) arc_read_uncached_32(&f) |
127 | #define __boot_write(f, v) arc_write_uncached_32(&f, v) |
128 | |
129 | #endif |
130 | |
131 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) |
132 | { |
133 | BUG_ON(cpu == 0); |
134 | |
135 | __boot_write(wake_flag, cpu); |
136 | } |
137 | |
138 | void arc_platform_smp_wait_to_boot(int cpu) |
139 | { |
140 | /* for halt-on-reset, we've waited already */ |
141 | if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) |
142 | return; |
143 | |
144 | while (__boot_read(wake_flag) != cpu) |
145 | ; |
146 | |
147 | __boot_write(wake_flag, 0); |
148 | } |
149 | |
150 | const char *arc_platform_smp_cpuinfo(void) |
151 | { |
152 | return plat_smp_ops.info ? : "" ; |
153 | } |
154 | |
155 | /* |
156 | * The very first "C" code executed by secondary |
157 | * Called from asm stub in head.S |
158 | * "current"/R25 already setup by low level boot code |
159 | */ |
160 | void start_kernel_secondary(void) |
161 | { |
162 | struct mm_struct *mm = &init_mm; |
163 | unsigned int cpu = smp_processor_id(); |
164 | |
165 | /* MMU, Caches, Vector Table, Interrupts etc */ |
166 | setup_processor(); |
167 | |
168 | mmget(mm); |
169 | mmgrab(mm); |
170 | current->active_mm = mm; |
171 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm)); |
172 | |
173 | /* Some SMP H/w setup - for each cpu */ |
174 | if (plat_smp_ops.init_per_cpu) |
175 | plat_smp_ops.init_per_cpu(cpu); |
176 | |
177 | if (machine_desc->init_per_cpu) |
178 | machine_desc->init_per_cpu(cpu); |
179 | |
180 | notify_cpu_starting(cpu); |
181 | set_cpu_online(cpu, online: true); |
182 | |
183 | pr_info("## CPU%u LIVE ##: Executing Code...\n" , cpu); |
184 | |
185 | local_irq_enable(); |
186 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
187 | } |
188 | |
189 | /* |
190 | * Called from kernel_init( ) -> smp_init( ) - for each CPU |
191 | * |
192 | * At this point, Secondary Processor is "HALT"ed: |
193 | * -It booted, but was halted in head.S |
194 | * -It was configured to halt-on-reset |
195 | * So need to wake it up. |
196 | * |
197 | * Essential requirements being where to run from (PC) and stack (SP) |
198 | */ |
199 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
200 | { |
201 | unsigned long wait_till; |
202 | |
203 | secondary_idle_tsk = idle; |
204 | |
205 | pr_info("Idle Task [%d] %p" , cpu, idle); |
206 | pr_info("Trying to bring up CPU%u ...\n" , cpu); |
207 | |
208 | if (plat_smp_ops.cpu_kick) |
209 | plat_smp_ops.cpu_kick(cpu, |
210 | (unsigned long)first_lines_of_secondary); |
211 | else |
212 | arc_default_smp_cpu_kick(cpu, pc: (unsigned long)NULL); |
213 | |
214 | /* wait for 1 sec after kicking the secondary */ |
215 | wait_till = jiffies + HZ; |
216 | while (time_before(jiffies, wait_till)) { |
217 | if (cpu_online(cpu)) |
218 | break; |
219 | } |
220 | |
221 | if (!cpu_online(cpu)) { |
222 | pr_info("Timeout: CPU%u FAILED to come up !!!\n" , cpu); |
223 | return -1; |
224 | } |
225 | |
226 | secondary_idle_tsk = NULL; |
227 | |
228 | return 0; |
229 | } |
230 | |
231 | /*****************************************************************************/ |
232 | /* Inter Processor Interrupt Handling */ |
233 | /*****************************************************************************/ |
234 | |
235 | enum ipi_msg_type { |
236 | IPI_EMPTY = 0, |
237 | IPI_RESCHEDULE = 1, |
238 | IPI_CALL_FUNC, |
239 | IPI_CPU_STOP, |
240 | }; |
241 | |
242 | /* |
243 | * In arches with IRQ for each msg type (above), receiver can use IRQ-id to |
244 | * figure out what msg was sent. For those which don't (ARC has dedicated IPI |
245 | * IRQ), the msg-type needs to be conveyed via per-cpu data |
246 | */ |
247 | |
248 | static DEFINE_PER_CPU(unsigned long, ipi_data); |
249 | |
250 | static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) |
251 | { |
252 | unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); |
253 | unsigned long old, new; |
254 | unsigned long flags; |
255 | |
256 | pr_debug("%d Sending msg [%d] to %d\n" , smp_processor_id(), msg, cpu); |
257 | |
258 | local_irq_save(flags); |
259 | |
260 | /* |
261 | * Atomically write new msg bit (in case others are writing too), |
262 | * and read back old value |
263 | */ |
264 | do { |
265 | new = old = *ipi_data_ptr; |
266 | new |= 1U << msg; |
267 | } while (cmpxchg(ipi_data_ptr, old, new) != old); |
268 | |
269 | /* |
270 | * Call the platform specific IPI kick function, but avoid if possible: |
271 | * Only do so if there's no pending msg from other concurrent sender(s). |
272 | * Otherwise, receiver will see this msg as well when it takes the |
273 | * IPI corresponding to that msg. This is true, even if it is already in |
274 | * IPI handler, because !@old means it has not yet dequeued the msg(s) |
275 | * so @new msg can be a free-loader |
276 | */ |
277 | if (plat_smp_ops.ipi_send && !old) |
278 | plat_smp_ops.ipi_send(cpu); |
279 | |
280 | local_irq_restore(flags); |
281 | } |
282 | |
283 | static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) |
284 | { |
285 | unsigned int cpu; |
286 | |
287 | for_each_cpu(cpu, callmap) |
288 | ipi_send_msg_one(cpu, msg); |
289 | } |
290 | |
291 | void arch_smp_send_reschedule(int cpu) |
292 | { |
293 | ipi_send_msg_one(cpu, msg: IPI_RESCHEDULE); |
294 | } |
295 | |
296 | void smp_send_stop(void) |
297 | { |
298 | struct cpumask targets; |
299 | cpumask_copy(dstp: &targets, cpu_online_mask); |
300 | cpumask_clear_cpu(smp_processor_id(), dstp: &targets); |
301 | ipi_send_msg(callmap: &targets, msg: IPI_CPU_STOP); |
302 | } |
303 | |
304 | void arch_send_call_function_single_ipi(int cpu) |
305 | { |
306 | ipi_send_msg_one(cpu, msg: IPI_CALL_FUNC); |
307 | } |
308 | |
309 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
310 | { |
311 | ipi_send_msg(callmap: mask, msg: IPI_CALL_FUNC); |
312 | } |
313 | |
314 | /* |
315 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
316 | */ |
317 | static void ipi_cpu_stop(void) |
318 | { |
319 | machine_halt(); |
320 | } |
321 | |
322 | static inline int __do_IPI(unsigned long msg) |
323 | { |
324 | int rc = 0; |
325 | |
326 | switch (msg) { |
327 | case IPI_RESCHEDULE: |
328 | scheduler_ipi(); |
329 | break; |
330 | |
331 | case IPI_CALL_FUNC: |
332 | generic_smp_call_function_interrupt(); |
333 | break; |
334 | |
335 | case IPI_CPU_STOP: |
336 | ipi_cpu_stop(); |
337 | break; |
338 | |
339 | default: |
340 | rc = 1; |
341 | } |
342 | |
343 | return rc; |
344 | } |
345 | |
346 | /* |
347 | * arch-common ISR to handle for inter-processor interrupts |
348 | * Has hooks for platform specific IPI |
349 | */ |
350 | static irqreturn_t do_IPI(int irq, void *dev_id) |
351 | { |
352 | unsigned long pending; |
353 | unsigned long __maybe_unused copy; |
354 | |
355 | pr_debug("IPI [%ld] received on cpu %d\n" , |
356 | *this_cpu_ptr(&ipi_data), smp_processor_id()); |
357 | |
358 | if (plat_smp_ops.ipi_clear) |
359 | plat_smp_ops.ipi_clear(irq); |
360 | |
361 | /* |
362 | * "dequeue" the msg corresponding to this IPI (and possibly other |
363 | * piggybacked msg from elided IPIs: see ipi_send_msg_one() above) |
364 | */ |
365 | copy = pending = xchg(this_cpu_ptr(&ipi_data), 0); |
366 | |
367 | do { |
368 | unsigned long msg = __ffs(pending); |
369 | int rc; |
370 | |
371 | rc = __do_IPI(msg); |
372 | if (rc) |
373 | pr_info("IPI with bogus msg %ld in %ld\n" , msg, copy); |
374 | pending &= ~(1U << msg); |
375 | } while (pending); |
376 | |
377 | return IRQ_HANDLED; |
378 | } |
379 | |
380 | /* |
381 | * API called by platform code to hookup arch-common ISR to their IPI IRQ |
382 | * |
383 | * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map |
384 | * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise |
385 | * request_percpu_irq() below will fail |
386 | */ |
387 | static DEFINE_PER_CPU(int, ipi_dev); |
388 | |
389 | int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq) |
390 | { |
391 | int *dev = per_cpu_ptr(&ipi_dev, cpu); |
392 | unsigned int virq = irq_find_mapping(NULL, hwirq); |
393 | |
394 | if (!virq) |
395 | panic(fmt: "Cannot find virq for root domain and hwirq=%lu" , hwirq); |
396 | |
397 | /* Boot cpu calls request, all call enable */ |
398 | if (!cpu) { |
399 | int rc; |
400 | |
401 | rc = request_percpu_irq(irq: virq, handler: do_IPI, devname: "IPI Interrupt" , percpu_dev_id: dev); |
402 | if (rc) |
403 | panic(fmt: "Percpu IRQ request failed for %u\n" , virq); |
404 | } |
405 | |
406 | enable_percpu_irq(irq: virq, type: 0); |
407 | |
408 | return 0; |
409 | } |
410 | |