1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Idle daemon for PowerPC. Idle daemon will handle any action |
4 | * that needs to be taken when the system becomes idle. |
5 | * |
6 | * Originally written by Cort Dougan (cort@cs.nmt.edu). |
7 | * Subsequent 32-bit hacking by Tom Rini, Armin Kuster, |
8 | * Paul Mackerras and others. |
9 | * |
10 | * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com> |
11 | * |
12 | * Additional shared processor, SMT, and firmware support |
13 | * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com> |
14 | * |
15 | * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org> |
16 | */ |
17 | |
18 | #include <linux/sched.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/smp.h> |
21 | #include <linux/cpu.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/tick.h> |
24 | |
25 | #include <asm/processor.h> |
26 | #include <asm/cputable.h> |
27 | #include <asm/time.h> |
28 | #include <asm/machdep.h> |
29 | #include <asm/runlatch.h> |
30 | #include <asm/smp.h> |
31 | |
32 | |
33 | unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; |
34 | EXPORT_SYMBOL(cpuidle_disable); |
35 | |
36 | static int __init powersave_off(char *arg) |
37 | { |
38 | ppc_md.power_save = NULL; |
39 | cpuidle_disable = IDLE_POWERSAVE_OFF; |
40 | return 1; |
41 | } |
42 | __setup("powersave=off" , powersave_off); |
43 | |
44 | void arch_cpu_idle(void) |
45 | { |
46 | ppc64_runlatch_off(); |
47 | |
48 | if (ppc_md.power_save) { |
49 | ppc_md.power_save(); |
50 | /* |
51 | * Some power_save functions return with |
52 | * interrupts enabled, some don't. |
53 | */ |
54 | if (!irqs_disabled()) |
55 | raw_local_irq_disable(); |
56 | } else { |
57 | /* |
58 | * Go into low thread priority and possibly |
59 | * low power mode. |
60 | */ |
61 | HMT_low(); |
62 | HMT_very_low(); |
63 | } |
64 | |
65 | HMT_medium(); |
66 | ppc64_runlatch_on(); |
67 | } |
68 | |
69 | int powersave_nap; |
70 | |
71 | #ifdef CONFIG_PPC_970_NAP |
72 | void power4_idle(void) |
73 | { |
74 | if (!cpu_has_feature(CPU_FTR_CAN_NAP)) |
75 | return; |
76 | |
77 | if (!powersave_nap) |
78 | return; |
79 | |
80 | if (!prep_irq_for_idle()) |
81 | return; |
82 | |
83 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
84 | asm volatile(PPC_DSSALL " ; sync" ::: "memory" ); |
85 | |
86 | power4_idle_nap(); |
87 | |
88 | /* |
89 | * power4_idle_nap returns with interrupts enabled (soft and hard). |
90 | * to our caller with interrupts enabled (soft and hard). Our caller |
91 | * can cope with either interrupts disabled or enabled upon return. |
92 | */ |
93 | } |
94 | #endif |
95 | |
96 | #ifdef CONFIG_SYSCTL |
97 | /* |
98 | * Register the sysctl to set/clear powersave_nap. |
99 | */ |
100 | static struct ctl_table powersave_nap_ctl_table[] = { |
101 | { |
102 | .procname = "powersave-nap" , |
103 | .data = &powersave_nap, |
104 | .maxlen = sizeof(int), |
105 | .mode = 0644, |
106 | .proc_handler = proc_dointvec, |
107 | }, |
108 | }; |
109 | |
110 | static int __init |
111 | register_powersave_nap_sysctl(void) |
112 | { |
113 | register_sysctl("kernel" , powersave_nap_ctl_table); |
114 | |
115 | return 0; |
116 | } |
117 | __initcall(register_powersave_nap_sysctl); |
118 | #endif |
119 | |