1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (c) 2013 MundoReader S.L. |
4 | * Author: Heiko Stuebner <heiko@sntech.de> |
5 | */ |
6 | |
7 | #include <linux/delay.h> |
8 | #include <linux/init.h> |
9 | #include <linux/smp.h> |
10 | #include <linux/io.h> |
11 | #include <linux/of.h> |
12 | #include <linux/of_address.h> |
13 | #include <linux/regmap.h> |
14 | #include <linux/mfd/syscon.h> |
15 | |
16 | #include <linux/reset.h> |
17 | #include <linux/cpu.h> |
18 | #include <asm/cacheflush.h> |
19 | #include <asm/cp15.h> |
20 | #include <asm/smp_scu.h> |
21 | #include <asm/smp_plat.h> |
22 | #include <asm/mach/map.h> |
23 | |
24 | #include "core.h" |
25 | |
26 | static void __iomem *scu_base_addr; |
27 | static void __iomem *sram_base_addr; |
28 | static int ncores; |
29 | |
30 | #define PMU_PWRDN_CON 0x08 |
31 | #define PMU_PWRDN_ST 0x0c |
32 | |
33 | #define PMU_PWRDN_SCU 4 |
34 | |
35 | static struct regmap *pmu; |
36 | static int has_pmu = true; |
37 | |
38 | static int pmu_power_domain_is_on(int pd) |
39 | { |
40 | u32 val; |
41 | int ret; |
42 | |
43 | ret = regmap_read(map: pmu, PMU_PWRDN_ST, val: &val); |
44 | if (ret < 0) |
45 | return ret; |
46 | |
47 | return !(val & BIT(pd)); |
48 | } |
49 | |
50 | static struct reset_control *rockchip_get_core_reset(int cpu) |
51 | { |
52 | struct device *dev = get_cpu_device(cpu); |
53 | struct device_node *np; |
54 | |
55 | /* The cpu device is only available after the initial core bringup */ |
56 | if (dev) |
57 | np = dev->of_node; |
58 | else |
59 | np = of_get_cpu_node(cpu, NULL); |
60 | |
61 | return of_reset_control_get_exclusive(node: np, NULL); |
62 | } |
63 | |
64 | static int pmu_set_power_domain(int pd, bool on) |
65 | { |
66 | u32 val = (on) ? 0 : BIT(pd); |
67 | struct reset_control *rstc = rockchip_get_core_reset(cpu: pd); |
68 | int ret; |
69 | |
70 | if (IS_ERR(ptr: rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { |
71 | pr_err("%s: could not get reset control for core %d\n" , |
72 | __func__, pd); |
73 | return PTR_ERR(ptr: rstc); |
74 | } |
75 | |
76 | /* |
77 | * We need to soft reset the cpu when we turn off the cpu power domain, |
78 | * or else the active processors might be stalled when the individual |
79 | * processor is powered down. |
80 | */ |
81 | if (!IS_ERR(ptr: rstc) && !on) |
82 | reset_control_assert(rstc); |
83 | |
84 | if (has_pmu) { |
85 | ret = regmap_update_bits(map: pmu, PMU_PWRDN_CON, BIT(pd), val); |
86 | if (ret < 0) { |
87 | pr_err("%s: could not update power domain\n" , |
88 | __func__); |
89 | return ret; |
90 | } |
91 | |
92 | ret = -1; |
93 | while (ret != on) { |
94 | ret = pmu_power_domain_is_on(pd); |
95 | if (ret < 0) { |
96 | pr_err("%s: could not read power domain state\n" , |
97 | __func__); |
98 | return ret; |
99 | } |
100 | } |
101 | } |
102 | |
103 | if (!IS_ERR(ptr: rstc)) { |
104 | if (on) |
105 | reset_control_deassert(rstc); |
106 | reset_control_put(rstc); |
107 | } |
108 | |
109 | return 0; |
110 | } |
111 | |
112 | /* |
113 | * Handling of CPU cores |
114 | */ |
115 | |
116 | static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) |
117 | { |
118 | int ret; |
119 | |
120 | if (!sram_base_addr || (has_pmu && !pmu)) { |
121 | pr_err("%s: sram or pmu missing for cpu boot\n" , __func__); |
122 | return -ENXIO; |
123 | } |
124 | |
125 | if (cpu >= ncores) { |
126 | pr_err("%s: cpu %d outside maximum number of cpus %d\n" , |
127 | __func__, cpu, ncores); |
128 | return -ENXIO; |
129 | } |
130 | |
131 | /* start the core */ |
132 | ret = pmu_set_power_domain(pd: 0 + cpu, on: true); |
133 | if (ret < 0) |
134 | return ret; |
135 | |
136 | if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { |
137 | /* |
138 | * We communicate with the bootrom to active the cpus other |
139 | * than cpu0, after a blob of initialize code, they will |
140 | * stay at wfe state, once they are activated, they will check |
141 | * the mailbox: |
142 | * sram_base_addr + 4: 0xdeadbeaf |
143 | * sram_base_addr + 8: start address for pc |
144 | * The cpu0 need to wait the other cpus other than cpu0 entering |
145 | * the wfe state.The wait time is affected by many aspects. |
146 | * (e.g: cpu frequency, bootrom frequency, sram frequency, ...) |
147 | */ |
148 | mdelay(1); /* ensure the cpus other than cpu0 to startup */ |
149 | |
150 | writel(__pa_symbol(secondary_startup), sram_base_addr + 8); |
151 | writel(val: 0xDEADBEAF, addr: sram_base_addr + 4); |
152 | dsb_sev(); |
153 | } |
154 | |
155 | return 0; |
156 | } |
157 | |
158 | /** |
159 | * rockchip_smp_prepare_sram - populate necessary sram block |
160 | * Starting cores execute the code residing at the start of the on-chip sram |
161 | * after power-on. Therefore make sure, this sram region is reserved and |
162 | * big enough. After this check, copy the trampoline code that directs the |
163 | * core to the real startup code in ram into the sram-region. |
164 | * @node: mmio-sram device node |
165 | */ |
166 | static int __init rockchip_smp_prepare_sram(struct device_node *node) |
167 | { |
168 | unsigned int trampoline_sz = &rockchip_secondary_trampoline_end - |
169 | &rockchip_secondary_trampoline; |
170 | struct resource res; |
171 | unsigned int rsize; |
172 | int ret; |
173 | |
174 | ret = of_address_to_resource(dev: node, index: 0, r: &res); |
175 | if (ret < 0) { |
176 | pr_err("%s: could not get address for node %pOF\n" , |
177 | __func__, node); |
178 | return ret; |
179 | } |
180 | |
181 | rsize = resource_size(res: &res); |
182 | if (rsize < trampoline_sz) { |
183 | pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n" , |
184 | __func__, rsize, trampoline_sz); |
185 | return -EINVAL; |
186 | } |
187 | |
188 | /* set the boot function for the sram code */ |
189 | rockchip_boot_fn = __pa_symbol(secondary_startup); |
190 | |
191 | /* copy the trampoline to sram, that runs during startup of the core */ |
192 | memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz); |
193 | flush_cache_all(); |
194 | outer_clean_range(0, trampoline_sz); |
195 | |
196 | dsb_sev(); |
197 | |
198 | return 0; |
199 | } |
200 | |
201 | static const struct regmap_config rockchip_pmu_regmap_config = { |
202 | .name = "rockchip-pmu" , |
203 | .reg_bits = 32, |
204 | .val_bits = 32, |
205 | .reg_stride = 4, |
206 | }; |
207 | |
208 | static int __init rockchip_smp_prepare_pmu(void) |
209 | { |
210 | struct device_node *node; |
211 | void __iomem *pmu_base; |
212 | |
213 | /* |
214 | * This function is only called via smp_ops->smp_prepare_cpu(). |
215 | * That only happens if a "/cpus" device tree node exists |
216 | * and has an "enable-method" property that selects the SMP |
217 | * operations defined herein. |
218 | */ |
219 | node = of_find_node_by_path(path: "/cpus" ); |
220 | |
221 | pmu = syscon_regmap_lookup_by_phandle(np: node, property: "rockchip,pmu" ); |
222 | of_node_put(node); |
223 | if (!IS_ERR(ptr: pmu)) |
224 | return 0; |
225 | |
226 | pmu = syscon_regmap_lookup_by_compatible(s: "rockchip,rk3066-pmu" ); |
227 | if (!IS_ERR(ptr: pmu)) |
228 | return 0; |
229 | |
230 | /* fallback, create our own regmap for the pmu area */ |
231 | pmu = NULL; |
232 | node = of_find_compatible_node(NULL, NULL, compat: "rockchip,rk3066-pmu" ); |
233 | if (!node) { |
234 | pr_err("%s: could not find pmu dt node\n" , __func__); |
235 | return -ENODEV; |
236 | } |
237 | |
238 | pmu_base = of_iomap(node, index: 0); |
239 | of_node_put(node); |
240 | if (!pmu_base) { |
241 | pr_err("%s: could not map pmu registers\n" , __func__); |
242 | return -ENOMEM; |
243 | } |
244 | |
245 | pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config); |
246 | if (IS_ERR(ptr: pmu)) { |
247 | int ret = PTR_ERR(ptr: pmu); |
248 | |
249 | iounmap(addr: pmu_base); |
250 | pmu = NULL; |
251 | pr_err("%s: regmap init failed\n" , __func__); |
252 | return ret; |
253 | } |
254 | |
255 | return 0; |
256 | } |
257 | |
258 | static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) |
259 | { |
260 | struct device_node *node; |
261 | unsigned int i; |
262 | |
263 | node = of_find_compatible_node(NULL, NULL, compat: "rockchip,rk3066-smp-sram" ); |
264 | if (!node) { |
265 | pr_err("%s: could not find sram dt node\n" , __func__); |
266 | return; |
267 | } |
268 | |
269 | sram_base_addr = of_iomap(node, index: 0); |
270 | if (!sram_base_addr) { |
271 | pr_err("%s: could not map sram registers\n" , __func__); |
272 | of_node_put(node); |
273 | return; |
274 | } |
275 | |
276 | if (has_pmu && rockchip_smp_prepare_pmu()) { |
277 | of_node_put(node); |
278 | return; |
279 | } |
280 | |
281 | if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { |
282 | if (rockchip_smp_prepare_sram(node)) { |
283 | of_node_put(node); |
284 | return; |
285 | } |
286 | |
287 | /* enable the SCU power domain */ |
288 | pmu_set_power_domain(PMU_PWRDN_SCU, on: true); |
289 | |
290 | of_node_put(node); |
291 | node = of_find_compatible_node(NULL, NULL, compat: "arm,cortex-a9-scu" ); |
292 | if (!node) { |
293 | pr_err("%s: missing scu\n" , __func__); |
294 | return; |
295 | } |
296 | |
297 | scu_base_addr = of_iomap(node, index: 0); |
298 | if (!scu_base_addr) { |
299 | pr_err("%s: could not map scu registers\n" , __func__); |
300 | of_node_put(node); |
301 | return; |
302 | } |
303 | |
304 | /* |
305 | * While the number of cpus is gathered from dt, also get the |
306 | * number of cores from the scu to verify this value when |
307 | * booting the cores. |
308 | */ |
309 | ncores = scu_get_core_count(scu_base_addr); |
310 | pr_err("%s: ncores %d\n" , __func__, ncores); |
311 | |
312 | scu_enable(scu_base_addr); |
313 | } else { |
314 | unsigned int l2ctlr; |
315 | |
316 | asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); |
317 | ncores = ((l2ctlr >> 24) & 0x3) + 1; |
318 | } |
319 | of_node_put(node); |
320 | |
321 | /* Make sure that all cores except the first are really off */ |
322 | for (i = 1; i < ncores; i++) |
323 | pmu_set_power_domain(pd: 0 + i, on: false); |
324 | } |
325 | |
326 | static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus) |
327 | { |
328 | has_pmu = false; |
329 | |
330 | rockchip_smp_prepare_cpus(max_cpus); |
331 | } |
332 | |
333 | #ifdef CONFIG_HOTPLUG_CPU |
334 | static int rockchip_cpu_kill(unsigned int cpu) |
335 | { |
336 | /* |
337 | * We need a delay here to ensure that the dying CPU can finish |
338 | * executing v7_coherency_exit() and reach the WFI/WFE state |
339 | * prior to having the power domain disabled. |
340 | */ |
341 | mdelay(1); |
342 | |
343 | pmu_set_power_domain(pd: 0 + cpu, on: false); |
344 | return 1; |
345 | } |
346 | |
347 | static void rockchip_cpu_die(unsigned int cpu) |
348 | { |
349 | v7_exit_coherency_flush(louis); |
350 | while (1) |
351 | cpu_do_idle(); |
352 | } |
353 | #endif |
354 | |
355 | static const struct smp_operations rk3036_smp_ops __initconst = { |
356 | .smp_prepare_cpus = rk3036_smp_prepare_cpus, |
357 | .smp_boot_secondary = rockchip_boot_secondary, |
358 | #ifdef CONFIG_HOTPLUG_CPU |
359 | .cpu_kill = rockchip_cpu_kill, |
360 | .cpu_die = rockchip_cpu_die, |
361 | #endif |
362 | }; |
363 | |
364 | static const struct smp_operations rockchip_smp_ops __initconst = { |
365 | .smp_prepare_cpus = rockchip_smp_prepare_cpus, |
366 | .smp_boot_secondary = rockchip_boot_secondary, |
367 | #ifdef CONFIG_HOTPLUG_CPU |
368 | .cpu_kill = rockchip_cpu_kill, |
369 | .cpu_die = rockchip_cpu_die, |
370 | #endif |
371 | }; |
372 | |
373 | CPU_METHOD_OF_DECLARE(rk3036_smp, "rockchip,rk3036-smp" , &rk3036_smp_ops); |
374 | CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp" , &rockchip_smp_ops); |
375 | |