1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * PSCI CPU idle driver. |
4 | * |
5 | * Copyright (C) 2019 ARM Ltd. |
6 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) "CPUidle PSCI: " fmt |
10 | |
11 | #include <linux/cpuhotplug.h> |
12 | #include <linux/cpu_cooling.h> |
13 | #include <linux/cpuidle.h> |
14 | #include <linux/cpumask.h> |
15 | #include <linux/cpu_pm.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> |
18 | #include <linux/of.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/psci.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/string.h> |
25 | #include <linux/syscore_ops.h> |
26 | |
27 | #include <asm/cpuidle.h> |
28 | |
29 | #include "cpuidle-psci.h" |
30 | #include "dt_idle_states.h" |
31 | |
32 | struct psci_cpuidle_data { |
33 | u32 *psci_states; |
34 | struct device *dev; |
35 | }; |
36 | |
37 | static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); |
38 | static DEFINE_PER_CPU(u32, domain_state); |
39 | static bool psci_cpuidle_use_cpuhp; |
40 | |
41 | void psci_set_domain_state(u32 state) |
42 | { |
43 | __this_cpu_write(domain_state, state); |
44 | } |
45 | |
46 | static inline u32 psci_get_domain_state(void) |
47 | { |
48 | return __this_cpu_read(domain_state); |
49 | } |
50 | |
51 | static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev, |
52 | struct cpuidle_driver *drv, int idx, |
53 | bool s2idle) |
54 | { |
55 | struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); |
56 | u32 *states = data->psci_states; |
57 | struct device *pd_dev = data->dev; |
58 | u32 state; |
59 | int ret; |
60 | |
61 | ret = cpu_pm_enter(); |
62 | if (ret) |
63 | return -1; |
64 | |
65 | /* Do runtime PM to manage a hierarchical CPU toplogy. */ |
66 | if (s2idle) |
67 | dev_pm_genpd_suspend(dev: pd_dev); |
68 | else |
69 | pm_runtime_put_sync_suspend(dev: pd_dev); |
70 | |
71 | state = psci_get_domain_state(); |
72 | if (!state) |
73 | state = states[idx]; |
74 | |
75 | ret = psci_cpu_suspend_enter(state) ? -1 : idx; |
76 | |
77 | if (s2idle) |
78 | dev_pm_genpd_resume(dev: pd_dev); |
79 | else |
80 | pm_runtime_get_sync(dev: pd_dev); |
81 | |
82 | cpu_pm_exit(); |
83 | |
84 | /* Clear the domain state to start fresh when back from idle. */ |
85 | psci_set_domain_state(state: 0); |
86 | return ret; |
87 | } |
88 | |
89 | static int psci_enter_domain_idle_state(struct cpuidle_device *dev, |
90 | struct cpuidle_driver *drv, int idx) |
91 | { |
92 | return __psci_enter_domain_idle_state(dev, drv, idx, s2idle: false); |
93 | } |
94 | |
95 | static int psci_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, |
96 | struct cpuidle_driver *drv, |
97 | int idx) |
98 | { |
99 | return __psci_enter_domain_idle_state(dev, drv, idx, s2idle: true); |
100 | } |
101 | |
102 | static int psci_idle_cpuhp_up(unsigned int cpu) |
103 | { |
104 | struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); |
105 | |
106 | if (pd_dev) |
107 | pm_runtime_get_sync(dev: pd_dev); |
108 | |
109 | return 0; |
110 | } |
111 | |
112 | static int psci_idle_cpuhp_down(unsigned int cpu) |
113 | { |
114 | struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); |
115 | |
116 | if (pd_dev) { |
117 | pm_runtime_put_sync(dev: pd_dev); |
118 | /* Clear domain state to start fresh at next online. */ |
119 | psci_set_domain_state(state: 0); |
120 | } |
121 | |
122 | return 0; |
123 | } |
124 | |
125 | static void psci_idle_syscore_switch(bool suspend) |
126 | { |
127 | bool cleared = false; |
128 | struct device *dev; |
129 | int cpu; |
130 | |
131 | for_each_possible_cpu(cpu) { |
132 | dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev; |
133 | |
134 | if (dev && suspend) { |
135 | dev_pm_genpd_suspend(dev); |
136 | } else if (dev) { |
137 | dev_pm_genpd_resume(dev); |
138 | |
139 | /* Account for userspace having offlined a CPU. */ |
140 | if (pm_runtime_status_suspended(dev)) |
141 | pm_runtime_set_active(dev); |
142 | |
143 | /* Clear domain state to re-start fresh. */ |
144 | if (!cleared) { |
145 | psci_set_domain_state(state: 0); |
146 | cleared = true; |
147 | } |
148 | } |
149 | } |
150 | } |
151 | |
152 | static int psci_idle_syscore_suspend(void) |
153 | { |
154 | psci_idle_syscore_switch(suspend: true); |
155 | return 0; |
156 | } |
157 | |
158 | static void psci_idle_syscore_resume(void) |
159 | { |
160 | psci_idle_syscore_switch(suspend: false); |
161 | } |
162 | |
163 | static struct syscore_ops psci_idle_syscore_ops = { |
164 | .suspend = psci_idle_syscore_suspend, |
165 | .resume = psci_idle_syscore_resume, |
166 | }; |
167 | |
168 | static void psci_idle_init_cpuhp(void) |
169 | { |
170 | int err; |
171 | |
172 | if (!psci_cpuidle_use_cpuhp) |
173 | return; |
174 | |
175 | register_syscore_ops(ops: &psci_idle_syscore_ops); |
176 | |
177 | err = cpuhp_setup_state_nocalls(state: CPUHP_AP_CPU_PM_STARTING, |
178 | name: "cpuidle/psci:online" , |
179 | startup: psci_idle_cpuhp_up, |
180 | teardown: psci_idle_cpuhp_down); |
181 | if (err) |
182 | pr_warn("Failed %d while setup cpuhp state\n" , err); |
183 | } |
184 | |
185 | static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev, |
186 | struct cpuidle_driver *drv, int idx) |
187 | { |
188 | u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); |
189 | |
190 | return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]); |
191 | } |
192 | |
193 | static const struct of_device_id psci_idle_state_match[] = { |
194 | { .compatible = "arm,idle-state" , |
195 | .data = psci_enter_idle_state }, |
196 | { }, |
197 | }; |
198 | |
199 | int psci_dt_parse_state_node(struct device_node *np, u32 *state) |
200 | { |
201 | int err = of_property_read_u32(np, propname: "arm,psci-suspend-param" , out_value: state); |
202 | |
203 | if (err) { |
204 | pr_warn("%pOF missing arm,psci-suspend-param property\n" , np); |
205 | return err; |
206 | } |
207 | |
208 | if (!psci_power_state_is_valid(state: *state)) { |
209 | pr_warn("Invalid PSCI power state %#x\n" , *state); |
210 | return -EINVAL; |
211 | } |
212 | |
213 | return 0; |
214 | } |
215 | |
216 | static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, |
217 | struct psci_cpuidle_data *data, |
218 | unsigned int state_count, int cpu) |
219 | { |
220 | /* Currently limit the hierarchical topology to be used in OSI mode. */ |
221 | if (!psci_has_osi_support()) |
222 | return 0; |
223 | |
224 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
225 | return 0; |
226 | |
227 | data->dev = psci_dt_attach_cpu(cpu); |
228 | if (IS_ERR_OR_NULL(ptr: data->dev)) |
229 | return PTR_ERR_OR_ZERO(ptr: data->dev); |
230 | |
231 | /* |
232 | * Using the deepest state for the CPU to trigger a potential selection |
233 | * of a shared state for the domain, assumes the domain states are all |
234 | * deeper states. |
235 | */ |
236 | drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; |
237 | drv->states[state_count - 1].enter = psci_enter_domain_idle_state; |
238 | drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; |
239 | psci_cpuidle_use_cpuhp = true; |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, |
245 | struct device_node *cpu_node, |
246 | unsigned int state_count, int cpu) |
247 | { |
248 | int i, ret = 0; |
249 | u32 *psci_states; |
250 | struct device_node *state_node; |
251 | struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); |
252 | |
253 | state_count++; /* Add WFI state too */ |
254 | psci_states = devm_kcalloc(dev, n: state_count, size: sizeof(*psci_states), |
255 | GFP_KERNEL); |
256 | if (!psci_states) |
257 | return -ENOMEM; |
258 | |
259 | for (i = 1; i < state_count; i++) { |
260 | state_node = of_get_cpu_state_node(cpu_node, index: i - 1); |
261 | if (!state_node) |
262 | break; |
263 | |
264 | ret = psci_dt_parse_state_node(np: state_node, state: &psci_states[i]); |
265 | of_node_put(node: state_node); |
266 | |
267 | if (ret) |
268 | return ret; |
269 | |
270 | pr_debug("psci-power-state %#x index %d\n" , psci_states[i], i); |
271 | } |
272 | |
273 | if (i != state_count) |
274 | return -ENODEV; |
275 | |
276 | /* Initialize optional data, used for the hierarchical topology. */ |
277 | ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu); |
278 | if (ret < 0) |
279 | return ret; |
280 | |
281 | /* Idle states parsed correctly, store them in the per-cpu struct. */ |
282 | data->psci_states = psci_states; |
283 | return 0; |
284 | } |
285 | |
286 | static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, |
287 | unsigned int cpu, unsigned int state_count) |
288 | { |
289 | struct device_node *cpu_node; |
290 | int ret; |
291 | |
292 | /* |
293 | * If the PSCI cpu_suspend function hook has not been initialized |
294 | * idle states must not be enabled, so bail out |
295 | */ |
296 | if (!psci_ops.cpu_suspend) |
297 | return -EOPNOTSUPP; |
298 | |
299 | cpu_node = of_cpu_device_node_get(cpu); |
300 | if (!cpu_node) |
301 | return -ENODEV; |
302 | |
303 | ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu); |
304 | |
305 | of_node_put(node: cpu_node); |
306 | |
307 | return ret; |
308 | } |
309 | |
310 | static void psci_cpu_deinit_idle(int cpu) |
311 | { |
312 | struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); |
313 | |
314 | psci_dt_detach_cpu(dev: data->dev); |
315 | psci_cpuidle_use_cpuhp = false; |
316 | } |
317 | |
318 | static int psci_idle_init_cpu(struct device *dev, int cpu) |
319 | { |
320 | struct cpuidle_driver *drv; |
321 | struct device_node *cpu_node; |
322 | const char *enable_method; |
323 | int ret = 0; |
324 | |
325 | cpu_node = of_cpu_device_node_get(cpu); |
326 | if (!cpu_node) |
327 | return -ENODEV; |
328 | |
329 | /* |
330 | * Check whether the enable-method for the cpu is PSCI, fail |
331 | * if it is not. |
332 | */ |
333 | enable_method = of_get_property(node: cpu_node, name: "enable-method" , NULL); |
334 | if (!enable_method || (strcmp(enable_method, "psci" ))) |
335 | ret = -ENODEV; |
336 | |
337 | of_node_put(node: cpu_node); |
338 | if (ret) |
339 | return ret; |
340 | |
341 | drv = devm_kzalloc(dev, size: sizeof(*drv), GFP_KERNEL); |
342 | if (!drv) |
343 | return -ENOMEM; |
344 | |
345 | drv->name = "psci_idle" ; |
346 | drv->owner = THIS_MODULE; |
347 | drv->cpumask = (struct cpumask *)cpumask_of(cpu); |
348 | |
349 | /* |
350 | * PSCI idle states relies on architectural WFI to be represented as |
351 | * state index 0. |
352 | */ |
353 | drv->states[0].enter = psci_enter_idle_state; |
354 | drv->states[0].exit_latency = 1; |
355 | drv->states[0].target_residency = 1; |
356 | drv->states[0].power_usage = UINT_MAX; |
357 | strcpy(p: drv->states[0].name, q: "WFI" ); |
358 | strcpy(p: drv->states[0].desc, q: "ARM WFI" ); |
359 | |
360 | /* |
361 | * If no DT idle states are detected (ret == 0) let the driver |
362 | * initialization fail accordingly since there is no reason to |
363 | * initialize the idle driver if only wfi is supported, the |
364 | * default archictectural back-end already executes wfi |
365 | * on idle entry. |
366 | */ |
367 | ret = dt_init_idle_driver(drv, matches: psci_idle_state_match, start_idx: 1); |
368 | if (ret <= 0) |
369 | return ret ? : -ENODEV; |
370 | |
371 | /* |
372 | * Initialize PSCI idle states. |
373 | */ |
374 | ret = psci_cpu_init_idle(dev, drv, cpu, state_count: ret); |
375 | if (ret) { |
376 | pr_err("CPU %d failed to PSCI idle\n" , cpu); |
377 | return ret; |
378 | } |
379 | |
380 | ret = cpuidle_register(drv, NULL); |
381 | if (ret) |
382 | goto deinit; |
383 | |
384 | cpuidle_cooling_register(drv); |
385 | |
386 | return 0; |
387 | deinit: |
388 | psci_cpu_deinit_idle(cpu); |
389 | return ret; |
390 | } |
391 | |
392 | /* |
393 | * psci_idle_probe - Initializes PSCI cpuidle driver |
394 | * |
395 | * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails |
396 | * to register cpuidle driver then rollback to cancel all CPUs |
397 | * registration. |
398 | */ |
399 | static int psci_cpuidle_probe(struct platform_device *pdev) |
400 | { |
401 | int cpu, ret; |
402 | struct cpuidle_driver *drv; |
403 | struct cpuidle_device *dev; |
404 | |
405 | for_each_possible_cpu(cpu) { |
406 | ret = psci_idle_init_cpu(dev: &pdev->dev, cpu); |
407 | if (ret) |
408 | goto out_fail; |
409 | } |
410 | |
411 | psci_idle_init_cpuhp(); |
412 | return 0; |
413 | |
414 | out_fail: |
415 | while (--cpu >= 0) { |
416 | dev = per_cpu(cpuidle_devices, cpu); |
417 | drv = cpuidle_get_cpu_driver(dev); |
418 | cpuidle_unregister(drv); |
419 | psci_cpu_deinit_idle(cpu); |
420 | } |
421 | |
422 | return ret; |
423 | } |
424 | |
425 | static struct platform_driver psci_cpuidle_driver = { |
426 | .probe = psci_cpuidle_probe, |
427 | .driver = { |
428 | .name = "psci-cpuidle" , |
429 | }, |
430 | }; |
431 | |
432 | static int __init psci_idle_init(void) |
433 | { |
434 | struct platform_device *pdev; |
435 | int ret; |
436 | |
437 | ret = platform_driver_register(&psci_cpuidle_driver); |
438 | if (ret) |
439 | return ret; |
440 | |
441 | pdev = platform_device_register_simple(name: "psci-cpuidle" , id: -1, NULL, num: 0); |
442 | if (IS_ERR(ptr: pdev)) { |
443 | platform_driver_unregister(&psci_cpuidle_driver); |
444 | return PTR_ERR(ptr: pdev); |
445 | } |
446 | |
447 | return 0; |
448 | } |
449 | device_initcall(psci_idle_init); |
450 | |