1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * RISC-V SBI CPU idle driver. |
4 | * |
5 | * Copyright (c) 2021 Western Digital Corporation or its affiliates. |
6 | * Copyright (c) 2022 Ventana Micro Systems Inc. |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt |
10 | |
11 | #include <linux/cpuhotplug.h> |
12 | #include <linux/cpuidle.h> |
13 | #include <linux/cpumask.h> |
14 | #include <linux/cpu_pm.h> |
15 | #include <linux/cpu_cooling.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> |
18 | #include <linux/of.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <asm/cpuidle.h> |
24 | #include <asm/sbi.h> |
25 | #include <asm/smp.h> |
26 | #include <asm/suspend.h> |
27 | |
28 | #include "dt_idle_states.h" |
29 | #include "dt_idle_genpd.h" |
30 | |
31 | struct sbi_cpuidle_data { |
32 | u32 *states; |
33 | struct device *dev; |
34 | }; |
35 | |
36 | struct sbi_domain_state { |
37 | bool available; |
38 | u32 state; |
39 | }; |
40 | |
41 | static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); |
42 | static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); |
43 | static bool sbi_cpuidle_use_osi; |
44 | static bool sbi_cpuidle_use_cpuhp; |
45 | static bool sbi_cpuidle_pd_allow_domain_state; |
46 | |
47 | static inline void sbi_set_domain_state(u32 state) |
48 | { |
49 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); |
50 | |
51 | data->available = true; |
52 | data->state = state; |
53 | } |
54 | |
55 | static inline u32 sbi_get_domain_state(void) |
56 | { |
57 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); |
58 | |
59 | return data->state; |
60 | } |
61 | |
62 | static inline void sbi_clear_domain_state(void) |
63 | { |
64 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); |
65 | |
66 | data->available = false; |
67 | } |
68 | |
69 | static inline bool sbi_is_domain_state_available(void) |
70 | { |
71 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); |
72 | |
73 | return data->available; |
74 | } |
75 | |
76 | static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, |
77 | struct cpuidle_driver *drv, int idx) |
78 | { |
79 | u32 *states = __this_cpu_read(sbi_cpuidle_data.states); |
80 | u32 state = states[idx]; |
81 | |
82 | if (state & SBI_HSM_SUSP_NON_RET_BIT) |
83 | return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state); |
84 | else |
85 | return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, |
86 | idx, state); |
87 | } |
88 | |
89 | static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, |
90 | struct cpuidle_driver *drv, int idx, |
91 | bool s2idle) |
92 | { |
93 | struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data); |
94 | u32 *states = data->states; |
95 | struct device *pd_dev = data->dev; |
96 | u32 state; |
97 | int ret; |
98 | |
99 | ret = cpu_pm_enter(); |
100 | if (ret) |
101 | return -1; |
102 | |
103 | /* Do runtime PM to manage a hierarchical CPU toplogy. */ |
104 | if (s2idle) |
105 | dev_pm_genpd_suspend(dev: pd_dev); |
106 | else |
107 | pm_runtime_put_sync_suspend(dev: pd_dev); |
108 | |
109 | ct_cpuidle_enter(); |
110 | |
111 | if (sbi_is_domain_state_available()) |
112 | state = sbi_get_domain_state(); |
113 | else |
114 | state = states[idx]; |
115 | |
116 | ret = riscv_sbi_hart_suspend(state) ? -1 : idx; |
117 | |
118 | ct_cpuidle_exit(); |
119 | |
120 | if (s2idle) |
121 | dev_pm_genpd_resume(dev: pd_dev); |
122 | else |
123 | pm_runtime_get_sync(dev: pd_dev); |
124 | |
125 | cpu_pm_exit(); |
126 | |
127 | /* Clear the domain state to start fresh when back from idle. */ |
128 | sbi_clear_domain_state(); |
129 | return ret; |
130 | } |
131 | |
132 | static int sbi_enter_domain_idle_state(struct cpuidle_device *dev, |
133 | struct cpuidle_driver *drv, int idx) |
134 | { |
135 | return __sbi_enter_domain_idle_state(dev, drv, idx, s2idle: false); |
136 | } |
137 | |
138 | static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, |
139 | struct cpuidle_driver *drv, |
140 | int idx) |
141 | { |
142 | return __sbi_enter_domain_idle_state(dev, drv, idx, s2idle: true); |
143 | } |
144 | |
145 | static int sbi_cpuidle_cpuhp_up(unsigned int cpu) |
146 | { |
147 | struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); |
148 | |
149 | if (pd_dev) |
150 | pm_runtime_get_sync(dev: pd_dev); |
151 | |
152 | return 0; |
153 | } |
154 | |
155 | static int sbi_cpuidle_cpuhp_down(unsigned int cpu) |
156 | { |
157 | struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); |
158 | |
159 | if (pd_dev) { |
160 | pm_runtime_put_sync(dev: pd_dev); |
161 | /* Clear domain state to start fresh at next online. */ |
162 | sbi_clear_domain_state(); |
163 | } |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | static void sbi_idle_init_cpuhp(void) |
169 | { |
170 | int err; |
171 | |
172 | if (!sbi_cpuidle_use_cpuhp) |
173 | return; |
174 | |
175 | err = cpuhp_setup_state_nocalls(state: CPUHP_AP_CPU_PM_STARTING, |
176 | name: "cpuidle/sbi:online" , |
177 | startup: sbi_cpuidle_cpuhp_up, |
178 | teardown: sbi_cpuidle_cpuhp_down); |
179 | if (err) |
180 | pr_warn("Failed %d while setup cpuhp state\n" , err); |
181 | } |
182 | |
183 | static const struct of_device_id sbi_cpuidle_state_match[] = { |
184 | { .compatible = "riscv,idle-state" , |
185 | .data = sbi_cpuidle_enter_state }, |
186 | { }, |
187 | }; |
188 | |
189 | static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) |
190 | { |
191 | int err = of_property_read_u32(np, propname: "riscv,sbi-suspend-param" , out_value: state); |
192 | |
193 | if (err) { |
194 | pr_warn("%pOF missing riscv,sbi-suspend-param property\n" , np); |
195 | return err; |
196 | } |
197 | |
198 | if (!riscv_sbi_suspend_state_is_valid(*state)) { |
199 | pr_warn("Invalid SBI suspend state %#x\n" , *state); |
200 | return -EINVAL; |
201 | } |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv, |
207 | struct sbi_cpuidle_data *data, |
208 | unsigned int state_count, int cpu) |
209 | { |
210 | /* Currently limit the hierarchical topology to be used in OSI mode. */ |
211 | if (!sbi_cpuidle_use_osi) |
212 | return 0; |
213 | |
214 | data->dev = dt_idle_attach_cpu(cpu, name: "sbi" ); |
215 | if (IS_ERR_OR_NULL(ptr: data->dev)) |
216 | return PTR_ERR_OR_ZERO(ptr: data->dev); |
217 | |
218 | /* |
219 | * Using the deepest state for the CPU to trigger a potential selection |
220 | * of a shared state for the domain, assumes the domain states are all |
221 | * deeper states. |
222 | */ |
223 | drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; |
224 | drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; |
225 | drv->states[state_count - 1].enter_s2idle = |
226 | sbi_enter_s2idle_domain_idle_state; |
227 | sbi_cpuidle_use_cpuhp = true; |
228 | |
229 | return 0; |
230 | } |
231 | |
232 | static int sbi_cpuidle_dt_init_states(struct device *dev, |
233 | struct cpuidle_driver *drv, |
234 | unsigned int cpu, |
235 | unsigned int state_count) |
236 | { |
237 | struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); |
238 | struct device_node *state_node; |
239 | struct device_node *cpu_node; |
240 | u32 *states; |
241 | int i, ret; |
242 | |
243 | cpu_node = of_cpu_device_node_get(cpu); |
244 | if (!cpu_node) |
245 | return -ENODEV; |
246 | |
247 | states = devm_kcalloc(dev, n: state_count, size: sizeof(*states), GFP_KERNEL); |
248 | if (!states) { |
249 | ret = -ENOMEM; |
250 | goto fail; |
251 | } |
252 | |
253 | /* Parse SBI specific details from state DT nodes */ |
254 | for (i = 1; i < state_count; i++) { |
255 | state_node = of_get_cpu_state_node(cpu_node, index: i - 1); |
256 | if (!state_node) |
257 | break; |
258 | |
259 | ret = sbi_dt_parse_state_node(np: state_node, state: &states[i]); |
260 | of_node_put(node: state_node); |
261 | |
262 | if (ret) |
263 | return ret; |
264 | |
265 | pr_debug("sbi-state %#x index %d\n" , states[i], i); |
266 | } |
267 | if (i != state_count) { |
268 | ret = -ENODEV; |
269 | goto fail; |
270 | } |
271 | |
272 | /* Initialize optional data, used for the hierarchical topology. */ |
273 | ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); |
274 | if (ret < 0) |
275 | return ret; |
276 | |
277 | /* Store states in the per-cpu struct. */ |
278 | data->states = states; |
279 | |
280 | fail: |
281 | of_node_put(node: cpu_node); |
282 | |
283 | return ret; |
284 | } |
285 | |
286 | static void sbi_cpuidle_deinit_cpu(int cpu) |
287 | { |
288 | struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); |
289 | |
290 | dt_idle_detach_cpu(dev: data->dev); |
291 | sbi_cpuidle_use_cpuhp = false; |
292 | } |
293 | |
294 | static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) |
295 | { |
296 | struct cpuidle_driver *drv; |
297 | unsigned int state_count = 0; |
298 | int ret = 0; |
299 | |
300 | drv = devm_kzalloc(dev, size: sizeof(*drv), GFP_KERNEL); |
301 | if (!drv) |
302 | return -ENOMEM; |
303 | |
304 | drv->name = "sbi_cpuidle" ; |
305 | drv->owner = THIS_MODULE; |
306 | drv->cpumask = (struct cpumask *)cpumask_of(cpu); |
307 | |
308 | /* RISC-V architectural WFI to be represented as state index 0. */ |
309 | drv->states[0].enter = sbi_cpuidle_enter_state; |
310 | drv->states[0].exit_latency = 1; |
311 | drv->states[0].target_residency = 1; |
312 | drv->states[0].power_usage = UINT_MAX; |
313 | strcpy(p: drv->states[0].name, q: "WFI" ); |
314 | strcpy(p: drv->states[0].desc, q: "RISC-V WFI" ); |
315 | |
316 | /* |
317 | * If no DT idle states are detected (ret == 0) let the driver |
318 | * initialization fail accordingly since there is no reason to |
319 | * initialize the idle driver if only wfi is supported, the |
320 | * default archictectural back-end already executes wfi |
321 | * on idle entry. |
322 | */ |
323 | ret = dt_init_idle_driver(drv, matches: sbi_cpuidle_state_match, start_idx: 1); |
324 | if (ret <= 0) { |
325 | pr_debug("HART%ld: failed to parse DT idle states\n" , |
326 | cpuid_to_hartid_map(cpu)); |
327 | return ret ? : -ENODEV; |
328 | } |
329 | state_count = ret + 1; /* Include WFI state as well */ |
330 | |
331 | /* Initialize idle states from DT. */ |
332 | ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count); |
333 | if (ret) { |
334 | pr_err("HART%ld: failed to init idle states\n" , |
335 | cpuid_to_hartid_map(cpu)); |
336 | return ret; |
337 | } |
338 | |
339 | ret = cpuidle_register(drv, NULL); |
340 | if (ret) |
341 | goto deinit; |
342 | |
343 | cpuidle_cooling_register(drv); |
344 | |
345 | return 0; |
346 | deinit: |
347 | sbi_cpuidle_deinit_cpu(cpu); |
348 | return ret; |
349 | } |
350 | |
351 | static void sbi_cpuidle_domain_sync_state(struct device *dev) |
352 | { |
353 | /* |
354 | * All devices have now been attached/probed to the PM domain |
355 | * topology, hence it's fine to allow domain states to be picked. |
356 | */ |
357 | sbi_cpuidle_pd_allow_domain_state = true; |
358 | } |
359 | |
360 | #ifdef CONFIG_DT_IDLE_GENPD |
361 | |
362 | static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) |
363 | { |
364 | struct genpd_power_state *state = &pd->states[pd->state_idx]; |
365 | u32 *pd_state; |
366 | |
367 | if (!state->data) |
368 | return 0; |
369 | |
370 | if (!sbi_cpuidle_pd_allow_domain_state) |
371 | return -EBUSY; |
372 | |
373 | /* OSI mode is enabled, set the corresponding domain state. */ |
374 | pd_state = state->data; |
375 | sbi_set_domain_state(*pd_state); |
376 | |
377 | return 0; |
378 | } |
379 | |
380 | struct sbi_pd_provider { |
381 | struct list_head link; |
382 | struct device_node *node; |
383 | }; |
384 | |
385 | static LIST_HEAD(sbi_pd_providers); |
386 | |
387 | static int sbi_pd_init(struct device_node *np) |
388 | { |
389 | struct generic_pm_domain *pd; |
390 | struct sbi_pd_provider *pd_provider; |
391 | struct dev_power_governor *pd_gov; |
392 | int ret = -ENOMEM; |
393 | |
394 | pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node); |
395 | if (!pd) |
396 | goto out; |
397 | |
398 | pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); |
399 | if (!pd_provider) |
400 | goto free_pd; |
401 | |
402 | pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; |
403 | |
404 | /* Allow power off when OSI is available. */ |
405 | if (sbi_cpuidle_use_osi) |
406 | pd->power_off = sbi_cpuidle_pd_power_off; |
407 | else |
408 | pd->flags |= GENPD_FLAG_ALWAYS_ON; |
409 | |
410 | /* Use governor for CPU PM domains if it has some states to manage. */ |
411 | pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; |
412 | |
413 | ret = pm_genpd_init(pd, pd_gov, false); |
414 | if (ret) |
415 | goto free_pd_prov; |
416 | |
417 | ret = of_genpd_add_provider_simple(np, pd); |
418 | if (ret) |
419 | goto remove_pd; |
420 | |
421 | pd_provider->node = of_node_get(np); |
422 | list_add(&pd_provider->link, &sbi_pd_providers); |
423 | |
424 | pr_debug("init PM domain %s\n" , pd->name); |
425 | return 0; |
426 | |
427 | remove_pd: |
428 | pm_genpd_remove(pd); |
429 | free_pd_prov: |
430 | kfree(pd_provider); |
431 | free_pd: |
432 | dt_idle_pd_free(pd); |
433 | out: |
434 | pr_err("failed to init PM domain ret=%d %pOF\n" , ret, np); |
435 | return ret; |
436 | } |
437 | |
438 | static void sbi_pd_remove(void) |
439 | { |
440 | struct sbi_pd_provider *pd_provider, *it; |
441 | struct generic_pm_domain *genpd; |
442 | |
443 | list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) { |
444 | of_genpd_del_provider(pd_provider->node); |
445 | |
446 | genpd = of_genpd_remove_last(pd_provider->node); |
447 | if (!IS_ERR(genpd)) |
448 | kfree(genpd); |
449 | |
450 | of_node_put(pd_provider->node); |
451 | list_del(&pd_provider->link); |
452 | kfree(pd_provider); |
453 | } |
454 | } |
455 | |
456 | static int sbi_genpd_probe(struct device_node *np) |
457 | { |
458 | struct device_node *node; |
459 | int ret = 0, pd_count = 0; |
460 | |
461 | if (!np) |
462 | return -ENODEV; |
463 | |
464 | /* |
465 | * Parse child nodes for the "#power-domain-cells" property and |
466 | * initialize a genpd/genpd-of-provider pair when it's found. |
467 | */ |
468 | for_each_child_of_node(np, node) { |
469 | if (!of_property_present(node, "#power-domain-cells" )) |
470 | continue; |
471 | |
472 | ret = sbi_pd_init(node); |
473 | if (ret) |
474 | goto put_node; |
475 | |
476 | pd_count++; |
477 | } |
478 | |
479 | /* Bail out if not using the hierarchical CPU topology. */ |
480 | if (!pd_count) |
481 | goto no_pd; |
482 | |
483 | /* Link genpd masters/subdomains to model the CPU topology. */ |
484 | ret = dt_idle_pd_init_topology(np); |
485 | if (ret) |
486 | goto remove_pd; |
487 | |
488 | return 0; |
489 | |
490 | put_node: |
491 | of_node_put(node); |
492 | remove_pd: |
493 | sbi_pd_remove(); |
494 | pr_err("failed to create CPU PM domains ret=%d\n" , ret); |
495 | no_pd: |
496 | return ret; |
497 | } |
498 | |
499 | #else |
500 | |
501 | static inline int sbi_genpd_probe(struct device_node *np) |
502 | { |
503 | return 0; |
504 | } |
505 | |
506 | #endif |
507 | |
508 | static int sbi_cpuidle_probe(struct platform_device *pdev) |
509 | { |
510 | int cpu, ret; |
511 | struct cpuidle_driver *drv; |
512 | struct cpuidle_device *dev; |
513 | struct device_node *np, *pds_node; |
514 | |
515 | /* Detect OSI support based on CPU DT nodes */ |
516 | sbi_cpuidle_use_osi = true; |
517 | for_each_possible_cpu(cpu) { |
518 | np = of_cpu_device_node_get(cpu); |
519 | if (np && |
520 | of_property_present(np, propname: "power-domains" ) && |
521 | of_property_present(np, propname: "power-domain-names" )) { |
522 | continue; |
523 | } else { |
524 | sbi_cpuidle_use_osi = false; |
525 | break; |
526 | } |
527 | } |
528 | |
529 | /* Populate generic power domains from DT nodes */ |
530 | pds_node = of_find_node_by_path(path: "/cpus/power-domains" ); |
531 | if (pds_node) { |
532 | ret = sbi_genpd_probe(np: pds_node); |
533 | of_node_put(node: pds_node); |
534 | if (ret) |
535 | return ret; |
536 | } |
537 | |
538 | /* Initialize CPU idle driver for each CPU */ |
539 | for_each_possible_cpu(cpu) { |
540 | ret = sbi_cpuidle_init_cpu(dev: &pdev->dev, cpu); |
541 | if (ret) { |
542 | pr_debug("HART%ld: idle driver init failed\n" , |
543 | cpuid_to_hartid_map(cpu)); |
544 | goto out_fail; |
545 | } |
546 | } |
547 | |
548 | /* Setup CPU hotplut notifiers */ |
549 | sbi_idle_init_cpuhp(); |
550 | |
551 | pr_info("idle driver registered for all CPUs\n" ); |
552 | |
553 | return 0; |
554 | |
555 | out_fail: |
556 | while (--cpu >= 0) { |
557 | dev = per_cpu(cpuidle_devices, cpu); |
558 | drv = cpuidle_get_cpu_driver(dev); |
559 | cpuidle_unregister(drv); |
560 | sbi_cpuidle_deinit_cpu(cpu); |
561 | } |
562 | |
563 | return ret; |
564 | } |
565 | |
566 | static struct platform_driver sbi_cpuidle_driver = { |
567 | .probe = sbi_cpuidle_probe, |
568 | .driver = { |
569 | .name = "sbi-cpuidle" , |
570 | .sync_state = sbi_cpuidle_domain_sync_state, |
571 | }, |
572 | }; |
573 | |
574 | static int __init sbi_cpuidle_init(void) |
575 | { |
576 | int ret; |
577 | struct platform_device *pdev; |
578 | |
579 | if (!riscv_sbi_hsm_is_supported()) |
580 | return 0; |
581 | |
582 | ret = platform_driver_register(&sbi_cpuidle_driver); |
583 | if (ret) |
584 | return ret; |
585 | |
586 | pdev = platform_device_register_simple(name: "sbi-cpuidle" , |
587 | id: -1, NULL, num: 0); |
588 | if (IS_ERR(ptr: pdev)) { |
589 | platform_driver_unregister(&sbi_cpuidle_driver); |
590 | return PTR_ERR(ptr: pdev); |
591 | } |
592 | |
593 | return 0; |
594 | } |
595 | device_initcall(sbi_cpuidle_init); |
596 | |