1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/arch/arm/kernel/smp.c |
4 | * |
5 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
6 | */ |
7 | #include <linux/module.h> |
8 | #include <linux/delay.h> |
9 | #include <linux/init.h> |
10 | #include <linux/spinlock.h> |
11 | #include <linux/sched/mm.h> |
12 | #include <linux/sched/hotplug.h> |
13 | #include <linux/sched/task_stack.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/cache.h> |
16 | #include <linux/profile.h> |
17 | #include <linux/errno.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/err.h> |
20 | #include <linux/cpu.h> |
21 | #include <linux/seq_file.h> |
22 | #include <linux/irq.h> |
23 | #include <linux/nmi.h> |
24 | #include <linux/percpu.h> |
25 | #include <linux/clockchips.h> |
26 | #include <linux/completion.h> |
27 | #include <linux/cpufreq.h> |
28 | #include <linux/irq_work.h> |
29 | #include <linux/kernel_stat.h> |
30 | |
31 | #include <linux/atomic.h> |
32 | #include <asm/bugs.h> |
33 | #include <asm/smp.h> |
34 | #include <asm/cacheflush.h> |
35 | #include <asm/cpu.h> |
36 | #include <asm/cputype.h> |
37 | #include <asm/exception.h> |
38 | #include <asm/idmap.h> |
39 | #include <asm/topology.h> |
40 | #include <asm/mmu_context.h> |
41 | #include <asm/procinfo.h> |
42 | #include <asm/processor.h> |
43 | #include <asm/sections.h> |
44 | #include <asm/tlbflush.h> |
45 | #include <asm/ptrace.h> |
46 | #include <asm/smp_plat.h> |
47 | #include <asm/virt.h> |
48 | #include <asm/mach/arch.h> |
49 | #include <asm/mpu.h> |
50 | |
51 | #include <trace/events/ipi.h> |
52 | |
53 | /* |
54 | * as from 2.5, kernels no longer have an init_tasks structure |
55 | * so we need some other way of telling a new secondary core |
56 | * where to place its SVC stack |
57 | */ |
58 | struct secondary_data secondary_data; |
59 | |
60 | enum ipi_msg_type { |
61 | IPI_WAKEUP, |
62 | IPI_TIMER, |
63 | IPI_RESCHEDULE, |
64 | IPI_CALL_FUNC, |
65 | IPI_CPU_STOP, |
66 | IPI_IRQ_WORK, |
67 | IPI_COMPLETION, |
68 | NR_IPI, |
69 | /* |
70 | * CPU_BACKTRACE is special and not included in NR_IPI |
71 | * or tracable with trace_ipi_* |
72 | */ |
73 | IPI_CPU_BACKTRACE = NR_IPI, |
74 | /* |
75 | * SGI8-15 can be reserved by secure firmware, and thus may |
76 | * not be usable by the kernel. Please keep the above limited |
77 | * to at most 8 entries. |
78 | */ |
79 | MAX_IPI |
80 | }; |
81 | |
82 | static int ipi_irq_base __read_mostly; |
83 | static int nr_ipi __read_mostly = NR_IPI; |
84 | static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; |
85 | |
86 | static void ipi_setup(int cpu); |
87 | |
88 | static DECLARE_COMPLETION(cpu_running); |
89 | |
90 | static struct smp_operations smp_ops __ro_after_init; |
91 | |
92 | void __init smp_set_ops(const struct smp_operations *ops) |
93 | { |
94 | if (ops) |
95 | smp_ops = *ops; |
96 | }; |
97 | |
98 | static unsigned long get_arch_pgd(pgd_t *pgd) |
99 | { |
100 | #ifdef CONFIG_ARM_LPAE |
101 | return __phys_to_pfn(virt_to_phys(pgd)); |
102 | #else |
103 | return virt_to_phys(address: pgd); |
104 | #endif |
105 | } |
106 | |
107 | #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
108 | static int secondary_biglittle_prepare(unsigned int cpu) |
109 | { |
110 | if (!cpu_vtable[cpu]) |
111 | cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); |
112 | |
113 | return cpu_vtable[cpu] ? 0 : -ENOMEM; |
114 | } |
115 | |
116 | static void secondary_biglittle_init(void) |
117 | { |
118 | init_proc_vtable(lookup_processor(read_cpuid_id())->proc); |
119 | } |
120 | #else |
121 | static int secondary_biglittle_prepare(unsigned int cpu) |
122 | { |
123 | return 0; |
124 | } |
125 | |
126 | static void secondary_biglittle_init(void) |
127 | { |
128 | } |
129 | #endif |
130 | |
131 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
132 | { |
133 | int ret; |
134 | |
135 | if (!smp_ops.smp_boot_secondary) |
136 | return -ENOSYS; |
137 | |
138 | ret = secondary_biglittle_prepare(cpu); |
139 | if (ret) |
140 | return ret; |
141 | |
142 | /* |
143 | * We need to tell the secondary core where to find |
144 | * its stack and the page tables. |
145 | */ |
146 | secondary_data.stack = task_stack_page(task: idle) + THREAD_START_SP; |
147 | #ifdef CONFIG_ARM_MPU |
148 | secondary_data.mpu_rgn_info = &mpu_rgn_info; |
149 | #endif |
150 | |
151 | #ifdef CONFIG_MMU |
152 | secondary_data.pgdir = virt_to_phys(address: idmap_pgd); |
153 | secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); |
154 | #endif |
155 | secondary_data.task = idle; |
156 | sync_cache_w(&secondary_data); |
157 | |
158 | /* |
159 | * Now bring the CPU into our world. |
160 | */ |
161 | ret = smp_ops.smp_boot_secondary(cpu, idle); |
162 | if (ret == 0) { |
163 | /* |
164 | * CPU was successfully started, wait for it |
165 | * to come online or time out. |
166 | */ |
167 | wait_for_completion_timeout(x: &cpu_running, |
168 | timeout: msecs_to_jiffies(m: 1000)); |
169 | |
170 | if (!cpu_online(cpu)) { |
171 | pr_crit("CPU%u: failed to come online\n" , cpu); |
172 | ret = -EIO; |
173 | } |
174 | } else { |
175 | pr_err("CPU%u: failed to boot: %d\n" , cpu, ret); |
176 | } |
177 | |
178 | |
179 | memset(&secondary_data, 0, sizeof(secondary_data)); |
180 | return ret; |
181 | } |
182 | |
183 | /* platform specific SMP operations */ |
184 | void __init smp_init_cpus(void) |
185 | { |
186 | if (smp_ops.smp_init_cpus) |
187 | smp_ops.smp_init_cpus(); |
188 | } |
189 | |
190 | int platform_can_secondary_boot(void) |
191 | { |
192 | return !!smp_ops.smp_boot_secondary; |
193 | } |
194 | |
195 | int platform_can_cpu_hotplug(void) |
196 | { |
197 | #ifdef CONFIG_HOTPLUG_CPU |
198 | if (smp_ops.cpu_kill) |
199 | return 1; |
200 | #endif |
201 | |
202 | return 0; |
203 | } |
204 | |
205 | #ifdef CONFIG_HOTPLUG_CPU |
206 | static int platform_cpu_kill(unsigned int cpu) |
207 | { |
208 | if (smp_ops.cpu_kill) |
209 | return smp_ops.cpu_kill(cpu); |
210 | return 1; |
211 | } |
212 | |
213 | static int platform_cpu_disable(unsigned int cpu) |
214 | { |
215 | if (smp_ops.cpu_disable) |
216 | return smp_ops.cpu_disable(cpu); |
217 | |
218 | return 0; |
219 | } |
220 | |
221 | int platform_can_hotplug_cpu(unsigned int cpu) |
222 | { |
223 | /* cpu_die must be specified to support hotplug */ |
224 | if (!smp_ops.cpu_die) |
225 | return 0; |
226 | |
227 | if (smp_ops.cpu_can_disable) |
228 | return smp_ops.cpu_can_disable(cpu); |
229 | |
230 | /* |
231 | * By default, allow disabling all CPUs except the first one, |
232 | * since this is special on a lot of platforms, e.g. because |
233 | * of clock tick interrupts. |
234 | */ |
235 | return cpu != 0; |
236 | } |
237 | |
238 | static void ipi_teardown(int cpu) |
239 | { |
240 | int i; |
241 | |
242 | if (WARN_ON_ONCE(!ipi_irq_base)) |
243 | return; |
244 | |
245 | for (i = 0; i < nr_ipi; i++) |
246 | disable_percpu_irq(irq: ipi_irq_base + i); |
247 | } |
248 | |
249 | /* |
250 | * __cpu_disable runs on the processor to be shutdown. |
251 | */ |
252 | int __cpu_disable(void) |
253 | { |
254 | unsigned int cpu = smp_processor_id(); |
255 | int ret; |
256 | |
257 | ret = platform_cpu_disable(cpu); |
258 | if (ret) |
259 | return ret; |
260 | |
261 | #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY |
262 | remove_cpu_topology(cpu); |
263 | #endif |
264 | |
265 | /* |
266 | * Take this CPU offline. Once we clear this, we can't return, |
267 | * and we must not schedule until we're ready to give up the cpu. |
268 | */ |
269 | set_cpu_online(cpu, online: false); |
270 | ipi_teardown(cpu); |
271 | |
272 | /* |
273 | * OK - migrate IRQs away from this CPU |
274 | */ |
275 | irq_migrate_all_off_this_cpu(); |
276 | |
277 | /* |
278 | * Flush user cache and TLB mappings, and then remove this CPU |
279 | * from the vm mask set of all processes. |
280 | * |
281 | * Caches are flushed to the Level of Unification Inner Shareable |
282 | * to write-back dirty lines to unified caches shared by all CPUs. |
283 | */ |
284 | flush_cache_louis(); |
285 | local_flush_tlb_all(); |
286 | |
287 | return 0; |
288 | } |
289 | |
290 | /* |
291 | * called on the thread which is asking for a CPU to be shutdown after the |
292 | * shutdown completed. |
293 | */ |
294 | void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) |
295 | { |
296 | pr_debug("CPU%u: shutdown\n" , cpu); |
297 | |
298 | clear_tasks_mm_cpumask(cpu); |
299 | /* |
300 | * platform_cpu_kill() is generally expected to do the powering off |
301 | * and/or cutting of clocks to the dying CPU. Optionally, this may |
302 | * be done by the CPU which is dying in preference to supporting |
303 | * this call, but that means there is _no_ synchronisation between |
304 | * the requesting CPU and the dying CPU actually losing power. |
305 | */ |
306 | if (!platform_cpu_kill(cpu)) |
307 | pr_err("CPU%u: unable to kill\n" , cpu); |
308 | } |
309 | |
310 | /* |
311 | * Called from the idle thread for the CPU which has been shutdown. |
312 | * |
313 | * Note that we disable IRQs here, but do not re-enable them |
314 | * before returning to the caller. This is also the behaviour |
315 | * of the other hotplug-cpu capable cores, so presumably coming |
316 | * out of idle fixes this. |
317 | */ |
318 | void __noreturn arch_cpu_idle_dead(void) |
319 | { |
320 | unsigned int cpu = smp_processor_id(); |
321 | |
322 | idle_task_exit(); |
323 | |
324 | local_irq_disable(); |
325 | |
326 | /* |
327 | * Flush the data out of the L1 cache for this CPU. This must be |
328 | * before the completion to ensure that data is safely written out |
329 | * before platform_cpu_kill() gets called - which may disable |
330 | * *this* CPU and power down its cache. |
331 | */ |
332 | flush_cache_louis(); |
333 | |
334 | /* |
335 | * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose |
336 | * of. Once this returns, power and/or clocks can be removed at |
337 | * any point from this CPU and its cache by platform_cpu_kill(). |
338 | */ |
339 | cpuhp_ap_report_dead(); |
340 | |
341 | /* |
342 | * Ensure that the cache lines associated with that completion are |
343 | * written out. This covers the case where _this_ CPU is doing the |
344 | * powering down, to ensure that the completion is visible to the |
345 | * CPU waiting for this one. |
346 | */ |
347 | flush_cache_louis(); |
348 | |
349 | /* |
350 | * The actual CPU shutdown procedure is at least platform (if not |
351 | * CPU) specific. This may remove power, or it may simply spin. |
352 | * |
353 | * Platforms are generally expected *NOT* to return from this call, |
354 | * although there are some which do because they have no way to |
355 | * power down the CPU. These platforms are the _only_ reason we |
356 | * have a return path which uses the fragment of assembly below. |
357 | * |
358 | * The return path should not be used for platforms which can |
359 | * power off the CPU. |
360 | */ |
361 | if (smp_ops.cpu_die) |
362 | smp_ops.cpu_die(cpu); |
363 | |
364 | pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n" , |
365 | cpu); |
366 | |
367 | /* |
368 | * Do not return to the idle loop - jump back to the secondary |
369 | * cpu initialisation. There's some initialisation which needs |
370 | * to be repeated to undo the effects of taking the CPU offline. |
371 | */ |
372 | __asm__("mov sp, %0\n" |
373 | " mov fp, #0\n" |
374 | " mov r0, %1\n" |
375 | " b secondary_start_kernel" |
376 | : |
377 | : "r" (task_stack_page(current) + THREAD_SIZE - 8), |
378 | "r" (current) |
379 | : "r0" ); |
380 | |
381 | unreachable(); |
382 | } |
383 | #endif /* CONFIG_HOTPLUG_CPU */ |
384 | |
385 | /* |
386 | * Called by both boot and secondaries to move global data into |
387 | * per-processor storage. |
388 | */ |
389 | static void smp_store_cpu_info(unsigned int cpuid) |
390 | { |
391 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); |
392 | |
393 | cpu_info->loops_per_jiffy = loops_per_jiffy; |
394 | cpu_info->cpuid = read_cpuid_id(); |
395 | |
396 | store_cpu_topology(cpuid); |
397 | check_cpu_icache_size(cpuid); |
398 | } |
399 | |
400 | static void set_current(struct task_struct *cur) |
401 | { |
402 | /* Set TPIDRURO */ |
403 | asm("mcr p15, 0, %0, c13, c0, 3" :: "r" (cur) : "memory" ); |
404 | } |
405 | |
406 | /* |
407 | * This is the secondary CPU boot entry. We're using this CPUs |
408 | * idle thread stack, but a set of temporary page tables. |
409 | */ |
410 | asmlinkage void secondary_start_kernel(struct task_struct *task) |
411 | { |
412 | struct mm_struct *mm = &init_mm; |
413 | unsigned int cpu; |
414 | |
415 | set_current(task); |
416 | |
417 | secondary_biglittle_init(); |
418 | |
419 | /* |
420 | * The identity mapping is uncached (strongly ordered), so |
421 | * switch away from it before attempting any exclusive accesses. |
422 | */ |
423 | cpu_switch_mm(mm->pgd, mm); |
424 | local_flush_bp_all(); |
425 | enter_lazy_tlb(mm, current); |
426 | local_flush_tlb_all(); |
427 | |
428 | /* |
429 | * All kernel threads share the same mm context; grab a |
430 | * reference and switch to it. |
431 | */ |
432 | cpu = smp_processor_id(); |
433 | mmgrab(mm); |
434 | current->active_mm = mm; |
435 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm)); |
436 | |
437 | cpu_init(); |
438 | |
439 | #ifndef CONFIG_MMU |
440 | setup_vectors_base(); |
441 | #endif |
442 | pr_debug("CPU%u: Booted secondary processor\n" , cpu); |
443 | |
444 | trace_hardirqs_off(); |
445 | |
446 | /* |
447 | * Give the platform a chance to do its own initialisation. |
448 | */ |
449 | if (smp_ops.smp_secondary_init) |
450 | smp_ops.smp_secondary_init(cpu); |
451 | |
452 | notify_cpu_starting(cpu); |
453 | |
454 | ipi_setup(cpu); |
455 | |
456 | calibrate_delay(); |
457 | |
458 | smp_store_cpu_info(id: cpu); |
459 | |
460 | /* |
461 | * OK, now it's safe to let the boot CPU continue. Wait for |
462 | * the CPU migration code to notice that the CPU is online |
463 | * before we continue - which happens after __cpu_up returns. |
464 | */ |
465 | set_cpu_online(cpu, online: true); |
466 | |
467 | check_other_bugs(); |
468 | |
469 | complete(&cpu_running); |
470 | |
471 | local_irq_enable(); |
472 | local_fiq_enable(); |
473 | local_abt_enable(); |
474 | |
475 | /* |
476 | * OK, it's off to the idle thread for us |
477 | */ |
478 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
479 | } |
480 | |
481 | void __init smp_cpus_done(unsigned int max_cpus) |
482 | { |
483 | int cpu; |
484 | unsigned long bogosum = 0; |
485 | |
486 | for_each_online_cpu(cpu) |
487 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; |
488 | |
489 | printk(KERN_INFO "SMP: Total of %d processors activated " |
490 | "(%lu.%02lu BogoMIPS).\n" , |
491 | num_online_cpus(), |
492 | bogosum / (500000/HZ), |
493 | (bogosum / (5000/HZ)) % 100); |
494 | |
495 | hyp_mode_check(); |
496 | } |
497 | |
498 | void __init smp_prepare_boot_cpu(void) |
499 | { |
500 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
501 | } |
502 | |
503 | void __init smp_prepare_cpus(unsigned int max_cpus) |
504 | { |
505 | unsigned int ncores = num_possible_cpus(); |
506 | |
507 | init_cpu_topology(); |
508 | |
509 | smp_store_cpu_info(smp_processor_id()); |
510 | |
511 | /* |
512 | * are we trying to boot more cores than exist? |
513 | */ |
514 | if (max_cpus > ncores) |
515 | max_cpus = ncores; |
516 | if (ncores > 1 && max_cpus) { |
517 | /* |
518 | * Initialise the present map, which describes the set of CPUs |
519 | * actually populated at the present time. A platform should |
520 | * re-initialize the map in the platforms smp_prepare_cpus() |
521 | * if present != possible (e.g. physical hotplug). |
522 | */ |
523 | init_cpu_present(cpu_possible_mask); |
524 | |
525 | /* |
526 | * Initialise the SCU if there are more than one CPU |
527 | * and let them know where to start. |
528 | */ |
529 | if (smp_ops.smp_prepare_cpus) |
530 | smp_ops.smp_prepare_cpus(max_cpus); |
531 | } |
532 | } |
533 | |
534 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
535 | [IPI_WAKEUP] = "CPU wakeup interrupts" , |
536 | [IPI_TIMER] = "Timer broadcast interrupts" , |
537 | [IPI_RESCHEDULE] = "Rescheduling interrupts" , |
538 | [IPI_CALL_FUNC] = "Function call interrupts" , |
539 | [IPI_CPU_STOP] = "CPU stop interrupts" , |
540 | [IPI_IRQ_WORK] = "IRQ work interrupts" , |
541 | [IPI_COMPLETION] = "completion interrupts" , |
542 | }; |
543 | |
544 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); |
545 | |
546 | void show_ipi_list(struct seq_file *p, int prec) |
547 | { |
548 | unsigned int cpu, i; |
549 | |
550 | for (i = 0; i < NR_IPI; i++) { |
551 | if (!ipi_desc[i]) |
552 | continue; |
553 | |
554 | seq_printf(m: p, fmt: "%*s%u: " , prec - 1, "IPI" , i); |
555 | |
556 | for_each_online_cpu(cpu) |
557 | seq_printf(m: p, fmt: "%10u " , irq_desc_kstat_cpu(desc: ipi_desc[i], cpu)); |
558 | |
559 | seq_printf(m: p, fmt: " %s\n" , ipi_types[i]); |
560 | } |
561 | } |
562 | |
563 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
564 | { |
565 | smp_cross_call(target: mask, ipinr: IPI_CALL_FUNC); |
566 | } |
567 | |
568 | void arch_send_wakeup_ipi_mask(const struct cpumask *mask) |
569 | { |
570 | smp_cross_call(target: mask, ipinr: IPI_WAKEUP); |
571 | } |
572 | |
573 | void arch_send_call_function_single_ipi(int cpu) |
574 | { |
575 | smp_cross_call(cpumask_of(cpu), ipinr: IPI_CALL_FUNC); |
576 | } |
577 | |
578 | #ifdef CONFIG_IRQ_WORK |
579 | void arch_irq_work_raise(void) |
580 | { |
581 | if (arch_irq_work_has_interrupt()) |
582 | smp_cross_call(cpumask_of(smp_processor_id()), ipinr: IPI_IRQ_WORK); |
583 | } |
584 | #endif |
585 | |
586 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
587 | void tick_broadcast(const struct cpumask *mask) |
588 | { |
589 | smp_cross_call(mask, IPI_TIMER); |
590 | } |
591 | #endif |
592 | |
593 | static DEFINE_RAW_SPINLOCK(stop_lock); |
594 | |
595 | /* |
596 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
597 | */ |
598 | static void ipi_cpu_stop(unsigned int cpu) |
599 | { |
600 | local_fiq_disable(); |
601 | |
602 | if (system_state <= SYSTEM_RUNNING) { |
603 | raw_spin_lock(&stop_lock); |
604 | pr_crit("CPU%u: stopping\n" , cpu); |
605 | dump_stack(); |
606 | raw_spin_unlock(&stop_lock); |
607 | } |
608 | |
609 | set_cpu_online(cpu, online: false); |
610 | |
611 | while (1) { |
612 | cpu_relax(); |
613 | wfe(); |
614 | } |
615 | } |
616 | |
617 | static DEFINE_PER_CPU(struct completion *, cpu_completion); |
618 | |
619 | int register_ipi_completion(struct completion *completion, int cpu) |
620 | { |
621 | per_cpu(cpu_completion, cpu) = completion; |
622 | return IPI_COMPLETION; |
623 | } |
624 | |
625 | static void ipi_complete(unsigned int cpu) |
626 | { |
627 | complete(per_cpu(cpu_completion, cpu)); |
628 | } |
629 | |
630 | /* |
631 | * Main handler for inter-processor interrupts |
632 | */ |
633 | static void do_handle_IPI(int ipinr) |
634 | { |
635 | unsigned int cpu = smp_processor_id(); |
636 | |
637 | if ((unsigned)ipinr < NR_IPI) |
638 | trace_ipi_entry(reason: ipi_types[ipinr]); |
639 | |
640 | switch (ipinr) { |
641 | case IPI_WAKEUP: |
642 | break; |
643 | |
644 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
645 | case IPI_TIMER: |
646 | tick_receive_broadcast(); |
647 | break; |
648 | #endif |
649 | |
650 | case IPI_RESCHEDULE: |
651 | scheduler_ipi(); |
652 | break; |
653 | |
654 | case IPI_CALL_FUNC: |
655 | generic_smp_call_function_interrupt(); |
656 | break; |
657 | |
658 | case IPI_CPU_STOP: |
659 | ipi_cpu_stop(cpu); |
660 | break; |
661 | |
662 | #ifdef CONFIG_IRQ_WORK |
663 | case IPI_IRQ_WORK: |
664 | irq_work_run(); |
665 | break; |
666 | #endif |
667 | |
668 | case IPI_COMPLETION: |
669 | ipi_complete(cpu); |
670 | break; |
671 | |
672 | case IPI_CPU_BACKTRACE: |
673 | printk_deferred_enter(); |
674 | nmi_cpu_backtrace(regs: get_irq_regs()); |
675 | printk_deferred_exit(); |
676 | break; |
677 | |
678 | default: |
679 | pr_crit("CPU%u: Unknown IPI message 0x%x\n" , |
680 | cpu, ipinr); |
681 | break; |
682 | } |
683 | |
684 | if ((unsigned)ipinr < NR_IPI) |
685 | trace_ipi_exit(reason: ipi_types[ipinr]); |
686 | } |
687 | |
688 | /* Legacy version, should go away once all irqchips have been converted */ |
689 | void handle_IPI(int ipinr, struct pt_regs *regs) |
690 | { |
691 | struct pt_regs *old_regs = set_irq_regs(regs); |
692 | |
693 | irq_enter(); |
694 | do_handle_IPI(ipinr); |
695 | irq_exit(); |
696 | |
697 | set_irq_regs(old_regs); |
698 | } |
699 | |
700 | static irqreturn_t ipi_handler(int irq, void *data) |
701 | { |
702 | do_handle_IPI(ipinr: irq - ipi_irq_base); |
703 | return IRQ_HANDLED; |
704 | } |
705 | |
706 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
707 | { |
708 | trace_ipi_raise(mask: target, reason: ipi_types[ipinr]); |
709 | __ipi_send_mask(desc: ipi_desc[ipinr], dest: target); |
710 | } |
711 | |
712 | static void ipi_setup(int cpu) |
713 | { |
714 | int i; |
715 | |
716 | if (WARN_ON_ONCE(!ipi_irq_base)) |
717 | return; |
718 | |
719 | for (i = 0; i < nr_ipi; i++) |
720 | enable_percpu_irq(irq: ipi_irq_base + i, type: 0); |
721 | } |
722 | |
723 | void __init set_smp_ipi_range(int ipi_base, int n) |
724 | { |
725 | int i; |
726 | |
727 | WARN_ON(n < MAX_IPI); |
728 | nr_ipi = min(n, MAX_IPI); |
729 | |
730 | for (i = 0; i < nr_ipi; i++) { |
731 | int err; |
732 | |
733 | err = request_percpu_irq(irq: ipi_base + i, handler: ipi_handler, |
734 | devname: "IPI" , percpu_dev_id: &irq_stat); |
735 | WARN_ON(err); |
736 | |
737 | ipi_desc[i] = irq_to_desc(irq: ipi_base + i); |
738 | irq_set_status_flags(irq: ipi_base + i, set: IRQ_HIDDEN); |
739 | } |
740 | |
741 | ipi_irq_base = ipi_base; |
742 | |
743 | /* Setup the boot CPU immediately */ |
744 | ipi_setup(smp_processor_id()); |
745 | } |
746 | |
747 | void arch_smp_send_reschedule(int cpu) |
748 | { |
749 | smp_cross_call(cpumask_of(cpu), ipinr: IPI_RESCHEDULE); |
750 | } |
751 | |
752 | void smp_send_stop(void) |
753 | { |
754 | unsigned long timeout; |
755 | struct cpumask mask; |
756 | |
757 | cpumask_copy(dstp: &mask, cpu_online_mask); |
758 | cpumask_clear_cpu(smp_processor_id(), dstp: &mask); |
759 | if (!cpumask_empty(srcp: &mask)) |
760 | smp_cross_call(target: &mask, ipinr: IPI_CPU_STOP); |
761 | |
762 | /* Wait up to one second for other CPUs to stop */ |
763 | timeout = USEC_PER_SEC; |
764 | while (num_online_cpus() > 1 && timeout--) |
765 | udelay(1); |
766 | |
767 | if (num_online_cpus() > 1) |
768 | pr_warn("SMP: failed to stop secondary CPUs\n" ); |
769 | } |
770 | |
771 | /* In case panic() and panic() called at the same time on CPU1 and CPU2, |
772 | * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop() |
773 | * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online, |
774 | * kdump fails. So split out the panic_smp_self_stop() and add |
775 | * set_cpu_online(smp_processor_id(), false). |
776 | */ |
777 | void __noreturn panic_smp_self_stop(void) |
778 | { |
779 | pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n" , |
780 | smp_processor_id()); |
781 | set_cpu_online(smp_processor_id(), online: false); |
782 | while (1) |
783 | cpu_relax(); |
784 | } |
785 | |
786 | #ifdef CONFIG_CPU_FREQ |
787 | |
788 | static DEFINE_PER_CPU(unsigned long, l_p_j_ref); |
789 | static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); |
790 | static unsigned long global_l_p_j_ref; |
791 | static unsigned long global_l_p_j_ref_freq; |
792 | |
793 | static int cpufreq_callback(struct notifier_block *nb, |
794 | unsigned long val, void *data) |
795 | { |
796 | struct cpufreq_freqs *freq = data; |
797 | struct cpumask *cpus = freq->policy->cpus; |
798 | int cpu, first = cpumask_first(srcp: cpus); |
799 | unsigned int lpj; |
800 | |
801 | if (freq->flags & CPUFREQ_CONST_LOOPS) |
802 | return NOTIFY_OK; |
803 | |
804 | if (!per_cpu(l_p_j_ref, first)) { |
805 | for_each_cpu(cpu, cpus) { |
806 | per_cpu(l_p_j_ref, cpu) = |
807 | per_cpu(cpu_data, cpu).loops_per_jiffy; |
808 | per_cpu(l_p_j_ref_freq, cpu) = freq->old; |
809 | } |
810 | |
811 | if (!global_l_p_j_ref) { |
812 | global_l_p_j_ref = loops_per_jiffy; |
813 | global_l_p_j_ref_freq = freq->old; |
814 | } |
815 | } |
816 | |
817 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || |
818 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { |
819 | loops_per_jiffy = cpufreq_scale(old: global_l_p_j_ref, |
820 | div: global_l_p_j_ref_freq, |
821 | mult: freq->new); |
822 | |
823 | lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), |
824 | per_cpu(l_p_j_ref_freq, first), mult: freq->new); |
825 | for_each_cpu(cpu, cpus) |
826 | per_cpu(cpu_data, cpu).loops_per_jiffy = lpj; |
827 | } |
828 | return NOTIFY_OK; |
829 | } |
830 | |
831 | static struct notifier_block cpufreq_notifier = { |
832 | .notifier_call = cpufreq_callback, |
833 | }; |
834 | |
835 | static int __init register_cpufreq_notifier(void) |
836 | { |
837 | return cpufreq_register_notifier(nb: &cpufreq_notifier, |
838 | CPUFREQ_TRANSITION_NOTIFIER); |
839 | } |
840 | core_initcall(register_cpufreq_notifier); |
841 | |
842 | #endif |
843 | |
844 | static void raise_nmi(cpumask_t *mask) |
845 | { |
846 | __ipi_send_mask(desc: ipi_desc[IPI_CPU_BACKTRACE], dest: mask); |
847 | } |
848 | |
849 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) |
850 | { |
851 | nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise: raise_nmi); |
852 | } |
853 | |