1 | /* |
2 | * cpuidle.h - a generic framework for CPU idle power management |
3 | * |
4 | * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
5 | * Shaohua Li <shaohua.li@intel.com> |
6 | * Adam Belay <abelay@novell.com> |
7 | * |
8 | * This code is licenced under the GPL. |
9 | */ |
10 | |
11 | #ifndef _LINUX_CPUIDLE_H |
12 | #define _LINUX_CPUIDLE_H |
13 | |
14 | #include <linux/percpu.h> |
15 | #include <linux/list.h> |
16 | #include <linux/hrtimer.h> |
17 | #include <linux/context_tracking.h> |
18 | |
19 | #define CPUIDLE_STATE_MAX 10 |
20 | #define CPUIDLE_NAME_LEN 16 |
21 | #define CPUIDLE_DESC_LEN 32 |
22 | |
23 | struct module; |
24 | |
25 | struct cpuidle_device; |
26 | struct cpuidle_driver; |
27 | |
28 | |
29 | /**************************** |
30 | * CPUIDLE DEVICE INTERFACE * |
31 | ****************************/ |
32 | |
33 | #define CPUIDLE_STATE_DISABLED_BY_USER BIT(0) |
34 | #define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1) |
35 | |
36 | struct cpuidle_state_usage { |
37 | unsigned long long disable; |
38 | unsigned long long usage; |
39 | u64 time_ns; |
40 | unsigned long long above; /* Number of times it's been too deep */ |
41 | unsigned long long below; /* Number of times it's been too shallow */ |
42 | unsigned long long rejected; /* Number of times idle entry was rejected */ |
43 | #ifdef CONFIG_SUSPEND |
44 | unsigned long long s2idle_usage; |
45 | unsigned long long s2idle_time; /* in US */ |
46 | #endif |
47 | }; |
48 | |
49 | struct cpuidle_state { |
50 | char name[CPUIDLE_NAME_LEN]; |
51 | char desc[CPUIDLE_DESC_LEN]; |
52 | |
53 | s64 exit_latency_ns; |
54 | s64 target_residency_ns; |
55 | unsigned int flags; |
56 | unsigned int exit_latency; /* in US */ |
57 | int power_usage; /* in mW */ |
58 | unsigned int target_residency; /* in US */ |
59 | |
60 | int (*enter) (struct cpuidle_device *dev, |
61 | struct cpuidle_driver *drv, |
62 | int index); |
63 | |
64 | int (*enter_dead) (struct cpuidle_device *dev, int index); |
65 | |
66 | /* |
67 | * CPUs execute ->enter_s2idle with the local tick or entire timekeeping |
68 | * suspended, so it must not re-enable interrupts at any point (even |
69 | * temporarily) or attempt to change states of clock event devices. |
70 | * |
71 | * This callback may point to the same function as ->enter if all of |
72 | * the above requirements are met by it. |
73 | */ |
74 | int (*enter_s2idle)(struct cpuidle_device *dev, |
75 | struct cpuidle_driver *drv, |
76 | int index); |
77 | }; |
78 | |
79 | /* Idle State Flags */ |
80 | #define CPUIDLE_FLAG_NONE (0x00) |
81 | #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ |
82 | #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ |
83 | #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ |
84 | #define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ |
85 | #define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ |
86 | #define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ |
87 | #define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */ |
88 | |
89 | struct cpuidle_device_kobj; |
90 | struct cpuidle_state_kobj; |
91 | struct cpuidle_driver_kobj; |
92 | |
93 | struct cpuidle_device { |
94 | unsigned int registered:1; |
95 | unsigned int enabled:1; |
96 | unsigned int poll_time_limit:1; |
97 | unsigned int cpu; |
98 | ktime_t next_hrtimer; |
99 | |
100 | int last_state_idx; |
101 | u64 last_residency_ns; |
102 | u64 poll_limit_ns; |
103 | u64 forced_idle_latency_limit_ns; |
104 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
105 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
106 | struct cpuidle_driver_kobj *kobj_driver; |
107 | struct cpuidle_device_kobj *kobj_dev; |
108 | struct list_head device_list; |
109 | |
110 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
111 | cpumask_t coupled_cpus; |
112 | struct cpuidle_coupled *coupled; |
113 | #endif |
114 | }; |
115 | |
116 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
117 | DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); |
118 | |
119 | static __always_inline void ct_cpuidle_enter(void) |
120 | { |
121 | lockdep_assert_irqs_disabled(); |
122 | /* |
123 | * Idle is allowed to (temporary) enable IRQs. It |
124 | * will return with IRQs disabled. |
125 | * |
126 | * Trace IRQs enable here, then switch off RCU, and have |
127 | * arch_cpu_idle() use raw_local_irq_enable(). Note that |
128 | * ct_idle_enter() relies on lockdep IRQ state, so switch that |
129 | * last -- this is very similar to the entry code. |
130 | */ |
131 | trace_hardirqs_on_prepare(); |
132 | lockdep_hardirqs_on_prepare(); |
133 | instrumentation_end(); |
134 | ct_idle_enter(); |
135 | lockdep_hardirqs_on(_RET_IP_); |
136 | } |
137 | |
138 | static __always_inline void ct_cpuidle_exit(void) |
139 | { |
140 | /* |
141 | * Carefully undo the above. |
142 | */ |
143 | lockdep_hardirqs_off(_RET_IP_); |
144 | ct_idle_exit(); |
145 | instrumentation_begin(); |
146 | } |
147 | |
148 | /**************************** |
149 | * CPUIDLE DRIVER INTERFACE * |
150 | ****************************/ |
151 | |
152 | struct cpuidle_driver { |
153 | const char *name; |
154 | struct module *owner; |
155 | |
156 | /* used by the cpuidle framework to setup the broadcast timer */ |
157 | unsigned int bctimer:1; |
158 | /* states array must be ordered in decreasing power consumption */ |
159 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; |
160 | int state_count; |
161 | int safe_state_index; |
162 | |
163 | /* the driver handles the cpus in cpumask */ |
164 | struct cpumask *cpumask; |
165 | |
166 | /* preferred governor to switch at register time */ |
167 | const char *governor; |
168 | }; |
169 | |
170 | #ifdef CONFIG_CPU_IDLE |
171 | extern void disable_cpuidle(void); |
172 | extern bool cpuidle_not_available(struct cpuidle_driver *drv, |
173 | struct cpuidle_device *dev); |
174 | |
175 | extern int cpuidle_select(struct cpuidle_driver *drv, |
176 | struct cpuidle_device *dev, |
177 | bool *stop_tick); |
178 | extern int cpuidle_enter(struct cpuidle_driver *drv, |
179 | struct cpuidle_device *dev, int index); |
180 | extern void cpuidle_reflect(struct cpuidle_device *dev, int index); |
181 | extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, |
182 | struct cpuidle_device *dev); |
183 | |
184 | extern int cpuidle_register_driver(struct cpuidle_driver *drv); |
185 | extern struct cpuidle_driver *cpuidle_get_driver(void); |
186 | extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, |
187 | bool disable); |
188 | extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); |
189 | extern int cpuidle_register_device(struct cpuidle_device *dev); |
190 | extern void cpuidle_unregister_device(struct cpuidle_device *dev); |
191 | extern int cpuidle_register(struct cpuidle_driver *drv, |
192 | const struct cpumask *const coupled_cpus); |
193 | extern void cpuidle_unregister(struct cpuidle_driver *drv); |
194 | extern void cpuidle_pause_and_lock(void); |
195 | extern void cpuidle_resume_and_unlock(void); |
196 | extern void cpuidle_pause(void); |
197 | extern void cpuidle_resume(void); |
198 | extern int cpuidle_enable_device(struct cpuidle_device *dev); |
199 | extern void cpuidle_disable_device(struct cpuidle_device *dev); |
200 | extern int cpuidle_play_dead(void); |
201 | |
202 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
203 | static inline struct cpuidle_device *cpuidle_get_device(void) |
204 | {return __this_cpu_read(cpuidle_devices); } |
205 | #else |
206 | static inline void disable_cpuidle(void) { } |
207 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, |
208 | struct cpuidle_device *dev) |
209 | {return true; } |
210 | static inline int cpuidle_select(struct cpuidle_driver *drv, |
211 | struct cpuidle_device *dev, bool *stop_tick) |
212 | {return -ENODEV; } |
213 | static inline int cpuidle_enter(struct cpuidle_driver *drv, |
214 | struct cpuidle_device *dev, int index) |
215 | {return -ENODEV; } |
216 | static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } |
217 | static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, |
218 | struct cpuidle_device *dev) |
219 | {return 0; } |
220 | static inline int cpuidle_register_driver(struct cpuidle_driver *drv) |
221 | {return -ENODEV; } |
222 | static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } |
223 | static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, |
224 | int idx, bool disable) { } |
225 | static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } |
226 | static inline int cpuidle_register_device(struct cpuidle_device *dev) |
227 | {return -ENODEV; } |
228 | static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } |
229 | static inline int cpuidle_register(struct cpuidle_driver *drv, |
230 | const struct cpumask *const coupled_cpus) |
231 | {return -ENODEV; } |
232 | static inline void cpuidle_unregister(struct cpuidle_driver *drv) { } |
233 | static inline void cpuidle_pause_and_lock(void) { } |
234 | static inline void cpuidle_resume_and_unlock(void) { } |
235 | static inline void cpuidle_pause(void) { } |
236 | static inline void cpuidle_resume(void) { } |
237 | static inline int cpuidle_enable_device(struct cpuidle_device *dev) |
238 | {return -ENODEV; } |
239 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } |
240 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
241 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
242 | struct cpuidle_device *dev) {return NULL; } |
243 | static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } |
244 | #endif |
245 | |
246 | #ifdef CONFIG_CPU_IDLE |
247 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
248 | struct cpuidle_device *dev, |
249 | u64 latency_limit_ns); |
250 | extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, |
251 | struct cpuidle_device *dev); |
252 | extern void cpuidle_use_deepest_state(u64 latency_limit_ns); |
253 | #else |
254 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
255 | struct cpuidle_device *dev, |
256 | u64 latency_limit_ns) |
257 | {return -ENODEV; } |
258 | static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, |
259 | struct cpuidle_device *dev) |
260 | {return -ENODEV; } |
261 | static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) |
262 | { |
263 | } |
264 | #endif |
265 | |
266 | /* kernel/sched/idle.c */ |
267 | extern void sched_idle_set_state(struct cpuidle_state *idle_state); |
268 | extern void default_idle_call(void); |
269 | |
270 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
271 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); |
272 | #else |
273 | static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) |
274 | { |
275 | } |
276 | #endif |
277 | |
278 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) |
279 | void cpuidle_poll_state_init(struct cpuidle_driver *drv); |
280 | #else |
281 | static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} |
282 | #endif |
283 | |
284 | /****************************** |
285 | * CPUIDLE GOVERNOR INTERFACE * |
286 | ******************************/ |
287 | |
288 | struct cpuidle_governor { |
289 | char name[CPUIDLE_NAME_LEN]; |
290 | struct list_head governor_list; |
291 | unsigned int rating; |
292 | |
293 | int (*enable) (struct cpuidle_driver *drv, |
294 | struct cpuidle_device *dev); |
295 | void (*disable) (struct cpuidle_driver *drv, |
296 | struct cpuidle_device *dev); |
297 | |
298 | int (*select) (struct cpuidle_driver *drv, |
299 | struct cpuidle_device *dev, |
300 | bool *stop_tick); |
301 | void (*reflect) (struct cpuidle_device *dev, int index); |
302 | }; |
303 | |
304 | extern int cpuidle_register_governor(struct cpuidle_governor *gov); |
305 | extern s64 cpuidle_governor_latency_req(unsigned int cpu); |
306 | |
307 | #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ |
308 | idx, \ |
309 | state, \ |
310 | is_retention, is_rcu) \ |
311 | ({ \ |
312 | int __ret = 0; \ |
313 | \ |
314 | if (!idx) { \ |
315 | cpu_do_idle(); \ |
316 | return idx; \ |
317 | } \ |
318 | \ |
319 | if (!is_retention) \ |
320 | __ret = cpu_pm_enter(); \ |
321 | if (!__ret) { \ |
322 | if (!is_rcu) \ |
323 | ct_cpuidle_enter(); \ |
324 | __ret = low_level_idle_enter(state); \ |
325 | if (!is_rcu) \ |
326 | ct_cpuidle_exit(); \ |
327 | if (!is_retention) \ |
328 | cpu_pm_exit(); \ |
329 | } \ |
330 | \ |
331 | __ret ? -1 : idx; \ |
332 | }) |
333 | |
334 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ |
335 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0) |
336 | |
337 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ |
338 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0) |
339 | |
340 | #define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ |
341 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0) |
342 | |
343 | #define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \ |
344 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1) |
345 | |
346 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ |
347 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0) |
348 | |
349 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \ |
350 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1) |
351 | |
352 | #endif /* _LINUX_CPUIDLE_H */ |
353 | |