1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2002 ARM Ltd. |
4 | * All Rights Reserved |
5 | * |
6 | * This code is specific to the hardware found on ARM Realview and |
7 | * Versatile Express platforms where the CPUs are unable to be individually |
8 | * woken, and where there is no way to hot-unplug CPUs. Real platforms |
9 | * should not copy this code. |
10 | */ |
11 | #include <linux/init.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/device.h> |
15 | #include <linux/jiffies.h> |
16 | #include <linux/smp.h> |
17 | |
18 | #include <asm/cacheflush.h> |
19 | #include <asm/smp_plat.h> |
20 | |
21 | #include "platsmp.h" |
22 | |
23 | /* |
24 | * versatile_cpu_release controls the release of CPUs from the holding |
25 | * pen in headsmp.S, which exists because we are not always able to |
26 | * control the release of individual CPUs from the board firmware. |
27 | * Production platforms do not need this. |
28 | */ |
29 | volatile int versatile_cpu_release = -1; |
30 | |
31 | /* |
32 | * Write versatile_cpu_release in a way that is guaranteed to be visible to |
33 | * all observers, irrespective of whether they're taking part in coherency |
34 | * or not. This is necessary for the hotplug code to work reliably. |
35 | */ |
36 | static void versatile_write_cpu_release(int val) |
37 | { |
38 | versatile_cpu_release = val; |
39 | smp_wmb(); |
40 | sync_cache_w(&versatile_cpu_release); |
41 | } |
42 | |
43 | /* |
44 | * versatile_lock exists to avoid running the loops_per_jiffy delay loop |
45 | * calibrations on the secondary CPU while the requesting CPU is using |
46 | * the limited-bandwidth bus - which affects the calibration value. |
47 | * Production platforms do not need this. |
48 | */ |
49 | static DEFINE_RAW_SPINLOCK(versatile_lock); |
50 | |
51 | void versatile_secondary_init(unsigned int cpu) |
52 | { |
53 | /* |
54 | * let the primary processor know we're out of the |
55 | * pen, then head off into the C entry point |
56 | */ |
57 | versatile_write_cpu_release(val: -1); |
58 | |
59 | /* |
60 | * Synchronise with the boot thread. |
61 | */ |
62 | raw_spin_lock(&versatile_lock); |
63 | raw_spin_unlock(&versatile_lock); |
64 | } |
65 | |
66 | int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) |
67 | { |
68 | unsigned long timeout; |
69 | |
70 | /* |
71 | * Set synchronisation state between this boot processor |
72 | * and the secondary one |
73 | */ |
74 | raw_spin_lock(&versatile_lock); |
75 | |
76 | /* |
77 | * This is really belt and braces; we hold unintended secondary |
78 | * CPUs in the holding pen until we're ready for them. However, |
79 | * since we haven't sent them a soft interrupt, they shouldn't |
80 | * be there. |
81 | */ |
82 | versatile_write_cpu_release(val: cpu_logical_map(cpu)); |
83 | |
84 | /* |
85 | * Send the secondary CPU a soft interrupt, thereby causing |
86 | * the boot monitor to read the system wide flags register, |
87 | * and branch to the address found there. |
88 | */ |
89 | arch_send_wakeup_ipi_mask(cpumask_of(cpu)); |
90 | |
91 | timeout = jiffies + (1 * HZ); |
92 | while (time_before(jiffies, timeout)) { |
93 | smp_rmb(); |
94 | if (versatile_cpu_release == -1) |
95 | break; |
96 | |
97 | udelay(10); |
98 | } |
99 | |
100 | /* |
101 | * now the secondary core is starting up let it run its |
102 | * calibrations, then wait for it to finish |
103 | */ |
104 | raw_spin_unlock(&versatile_lock); |
105 | |
106 | return versatile_cpu_release != -1 ? -ENOSYS : 0; |
107 | } |
108 | |