1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/include/linux/cpufreq.h |
4 | * |
5 | * Copyright (C) 2001 Russell King |
6 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
7 | */ |
8 | #ifndef _LINUX_CPUFREQ_H |
9 | #define _LINUX_CPUFREQ_H |
10 | |
11 | #include <linux/clk.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/cpumask.h> |
14 | #include <linux/completion.h> |
15 | #include <linux/kobject.h> |
16 | #include <linux/notifier.h> |
17 | #include <linux/of.h> |
18 | #include <linux/pm_opp.h> |
19 | #include <linux/pm_qos.h> |
20 | #include <linux/spinlock.h> |
21 | #include <linux/sysfs.h> |
22 | #include <linux/minmax.h> |
23 | |
24 | /********************************************************************* |
25 | * CPUFREQ INTERFACE * |
26 | *********************************************************************/ |
27 | /* |
28 | * Frequency values here are CPU kHz |
29 | * |
30 | * Maximum transition latency is in nanoseconds - if it's unknown, |
31 | * CPUFREQ_ETERNAL shall be used. |
32 | */ |
33 | |
34 | #define CPUFREQ_ETERNAL (-1) |
35 | #define CPUFREQ_NAME_LEN 16 |
36 | /* Print length for names. Extra 1 space for accommodating '\n' in prints */ |
37 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) |
38 | |
39 | struct cpufreq_governor; |
40 | |
41 | enum cpufreq_table_sorting { |
42 | CPUFREQ_TABLE_UNSORTED, |
43 | CPUFREQ_TABLE_SORTED_ASCENDING, |
44 | CPUFREQ_TABLE_SORTED_DESCENDING |
45 | }; |
46 | |
47 | struct cpufreq_cpuinfo { |
48 | unsigned int max_freq; |
49 | unsigned int min_freq; |
50 | |
51 | /* in 10^(-9) s = nanoseconds */ |
52 | unsigned int transition_latency; |
53 | }; |
54 | |
55 | struct cpufreq_policy { |
56 | /* CPUs sharing clock, require sw coordination */ |
57 | cpumask_var_t cpus; /* Online CPUs only */ |
58 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ |
59 | cpumask_var_t real_cpus; /* Related and present */ |
60 | |
61 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
62 | should set cpufreq */ |
63 | unsigned int cpu; /* cpu managing this policy, must be online */ |
64 | |
65 | struct clk *clk; |
66 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
67 | |
68 | unsigned int min; /* in kHz */ |
69 | unsigned int max; /* in kHz */ |
70 | unsigned int cur; /* in kHz, only needed if cpufreq |
71 | * governors are used */ |
72 | unsigned int suspend_freq; /* freq to set during suspend */ |
73 | |
74 | unsigned int policy; /* see above */ |
75 | unsigned int last_policy; /* policy before unplug */ |
76 | struct cpufreq_governor *governor; /* see below */ |
77 | void *governor_data; |
78 | char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ |
79 | |
80 | struct work_struct update; /* if update_policy() needs to be |
81 | * called, but you're in IRQ context */ |
82 | |
83 | struct freq_constraints constraints; |
84 | struct freq_qos_request *min_freq_req; |
85 | struct freq_qos_request *max_freq_req; |
86 | |
87 | struct cpufreq_frequency_table *freq_table; |
88 | enum cpufreq_table_sorting freq_table_sorted; |
89 | |
90 | struct list_head policy_list; |
91 | struct kobject kobj; |
92 | struct completion kobj_unregister; |
93 | |
94 | /* |
95 | * The rules for this semaphore: |
96 | * - Any routine that wants to read from the policy structure will |
97 | * do a down_read on this semaphore. |
98 | * - Any routine that will write to the policy structure and/or may take away |
99 | * the policy altogether (eg. CPU hotplug), will hold this lock in write |
100 | * mode before doing so. |
101 | */ |
102 | struct rw_semaphore rwsem; |
103 | |
104 | /* |
105 | * Fast switch flags: |
106 | * - fast_switch_possible should be set by the driver if it can |
107 | * guarantee that frequency can be changed on any CPU sharing the |
108 | * policy and that the change will affect all of the policy CPUs then. |
109 | * - fast_switch_enabled is to be set by governors that support fast |
110 | * frequency switching with the help of cpufreq_enable_fast_switch(). |
111 | */ |
112 | bool fast_switch_possible; |
113 | bool fast_switch_enabled; |
114 | |
115 | /* |
116 | * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current |
117 | * governor. |
118 | */ |
119 | bool strict_target; |
120 | |
121 | /* |
122 | * Set if inefficient frequencies were found in the frequency table. |
123 | * This indicates if the relation flag CPUFREQ_RELATION_E can be |
124 | * honored. |
125 | */ |
126 | bool efficiencies_available; |
127 | |
128 | /* |
129 | * Preferred average time interval between consecutive invocations of |
130 | * the driver to set the frequency for this policy. To be set by the |
131 | * scaling driver (0, which is the default, means no preference). |
132 | */ |
133 | unsigned int transition_delay_us; |
134 | |
135 | /* |
136 | * Remote DVFS flag (Not added to the driver structure as we don't want |
137 | * to access another structure from scheduler hotpath). |
138 | * |
139 | * Should be set if CPUs can do DVFS on behalf of other CPUs from |
140 | * different cpufreq policies. |
141 | */ |
142 | bool dvfs_possible_from_any_cpu; |
143 | |
144 | /* Per policy boost enabled flag. */ |
145 | bool boost_enabled; |
146 | |
147 | /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ |
148 | unsigned int cached_target_freq; |
149 | unsigned int cached_resolved_idx; |
150 | |
151 | /* Synchronization for frequency transitions */ |
152 | bool transition_ongoing; /* Tracks transition status */ |
153 | spinlock_t transition_lock; |
154 | wait_queue_head_t transition_wait; |
155 | struct task_struct *transition_task; /* Task which is doing the transition */ |
156 | |
157 | /* cpufreq-stats */ |
158 | struct cpufreq_stats *stats; |
159 | |
160 | /* For cpufreq driver's internal use */ |
161 | void *driver_data; |
162 | |
163 | /* Pointer to the cooling device if used for thermal mitigation */ |
164 | struct thermal_cooling_device *cdev; |
165 | |
166 | struct notifier_block nb_min; |
167 | struct notifier_block nb_max; |
168 | }; |
169 | |
170 | /* |
171 | * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() |
172 | * callback for sanitization. That callback is only expected to modify the min |
173 | * and max values, if necessary, and specifically it must not update the |
174 | * frequency table. |
175 | */ |
176 | struct cpufreq_policy_data { |
177 | struct cpufreq_cpuinfo cpuinfo; |
178 | struct cpufreq_frequency_table *freq_table; |
179 | unsigned int cpu; |
180 | unsigned int min; /* in kHz */ |
181 | unsigned int max; /* in kHz */ |
182 | }; |
183 | |
184 | struct cpufreq_freqs { |
185 | struct cpufreq_policy *policy; |
186 | unsigned int old; |
187 | unsigned int new; |
188 | u8 flags; /* flags of cpufreq_driver, see below. */ |
189 | }; |
190 | |
191 | /* Only for ACPI */ |
192 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ |
193 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ |
194 | #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ |
195 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ |
196 | |
197 | #ifdef CONFIG_CPU_FREQ |
198 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); |
199 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
200 | void cpufreq_cpu_put(struct cpufreq_policy *policy); |
201 | #else |
202 | static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) |
203 | { |
204 | return NULL; |
205 | } |
206 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
207 | { |
208 | return NULL; |
209 | } |
210 | static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } |
211 | #endif |
212 | |
213 | static inline bool policy_is_inactive(struct cpufreq_policy *policy) |
214 | { |
215 | return cpumask_empty(srcp: policy->cpus); |
216 | } |
217 | |
218 | static inline bool policy_is_shared(struct cpufreq_policy *policy) |
219 | { |
220 | return cpumask_weight(srcp: policy->cpus) > 1; |
221 | } |
222 | |
223 | #ifdef CONFIG_CPU_FREQ |
224 | unsigned int cpufreq_get(unsigned int cpu); |
225 | unsigned int cpufreq_quick_get(unsigned int cpu); |
226 | unsigned int cpufreq_quick_get_max(unsigned int cpu); |
227 | unsigned int cpufreq_get_hw_max_freq(unsigned int cpu); |
228 | void disable_cpufreq(void); |
229 | |
230 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); |
231 | |
232 | struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); |
233 | void cpufreq_cpu_release(struct cpufreq_policy *policy); |
234 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); |
235 | void refresh_frequency_limits(struct cpufreq_policy *policy); |
236 | void cpufreq_update_policy(unsigned int cpu); |
237 | void cpufreq_update_limits(unsigned int cpu); |
238 | bool have_governor_per_policy(void); |
239 | bool cpufreq_supports_freq_invariance(void); |
240 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
241 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); |
242 | void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); |
243 | bool has_target_index(void); |
244 | #else |
245 | static inline unsigned int cpufreq_get(unsigned int cpu) |
246 | { |
247 | return 0; |
248 | } |
249 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) |
250 | { |
251 | return 0; |
252 | } |
253 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) |
254 | { |
255 | return 0; |
256 | } |
257 | static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) |
258 | { |
259 | return 0; |
260 | } |
261 | static inline bool cpufreq_supports_freq_invariance(void) |
262 | { |
263 | return false; |
264 | } |
265 | static inline void disable_cpufreq(void) { } |
266 | #endif |
267 | |
268 | #ifdef CONFIG_CPU_FREQ_STAT |
269 | void cpufreq_stats_create_table(struct cpufreq_policy *policy); |
270 | void cpufreq_stats_free_table(struct cpufreq_policy *policy); |
271 | void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
272 | unsigned int new_freq); |
273 | #else |
274 | static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } |
275 | static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } |
276 | static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
277 | unsigned int new_freq) { } |
278 | #endif /* CONFIG_CPU_FREQ_STAT */ |
279 | |
280 | /********************************************************************* |
281 | * CPUFREQ DRIVER INTERFACE * |
282 | *********************************************************************/ |
283 | |
284 | #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ |
285 | #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ |
286 | #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ |
287 | /* relation flags */ |
288 | #define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */ |
289 | |
290 | #define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E) |
291 | #define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E) |
292 | #define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E) |
293 | |
294 | struct freq_attr { |
295 | struct attribute attr; |
296 | ssize_t (*show)(struct cpufreq_policy *, char *); |
297 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); |
298 | }; |
299 | |
300 | #define cpufreq_freq_attr_ro(_name) \ |
301 | static struct freq_attr _name = \ |
302 | __ATTR(_name, 0444, show_##_name, NULL) |
303 | |
304 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ |
305 | static struct freq_attr _name = \ |
306 | __ATTR(_name, _perm, show_##_name, NULL) |
307 | |
308 | #define cpufreq_freq_attr_rw(_name) \ |
309 | static struct freq_attr _name = \ |
310 | __ATTR(_name, 0644, show_##_name, store_##_name) |
311 | |
312 | #define cpufreq_freq_attr_wo(_name) \ |
313 | static struct freq_attr _name = \ |
314 | __ATTR(_name, 0200, NULL, store_##_name) |
315 | |
316 | #define define_one_global_ro(_name) \ |
317 | static struct kobj_attribute _name = \ |
318 | __ATTR(_name, 0444, show_##_name, NULL) |
319 | |
320 | #define define_one_global_rw(_name) \ |
321 | static struct kobj_attribute _name = \ |
322 | __ATTR(_name, 0644, show_##_name, store_##_name) |
323 | |
324 | |
325 | struct cpufreq_driver { |
326 | char name[CPUFREQ_NAME_LEN]; |
327 | u16 flags; |
328 | void *driver_data; |
329 | |
330 | /* needed by all drivers */ |
331 | int (*init)(struct cpufreq_policy *policy); |
332 | int (*verify)(struct cpufreq_policy_data *policy); |
333 | |
334 | /* define one out of two */ |
335 | int (*setpolicy)(struct cpufreq_policy *policy); |
336 | |
337 | int (*target)(struct cpufreq_policy *policy, |
338 | unsigned int target_freq, |
339 | unsigned int relation); /* Deprecated */ |
340 | int (*target_index)(struct cpufreq_policy *policy, |
341 | unsigned int index); |
342 | unsigned int (*fast_switch)(struct cpufreq_policy *policy, |
343 | unsigned int target_freq); |
344 | /* |
345 | * ->fast_switch() replacement for drivers that use an internal |
346 | * representation of performance levels and can pass hints other than |
347 | * the target performance level to the hardware. This can only be set |
348 | * if ->fast_switch is set too, because in those cases (under specific |
349 | * conditions) scale invariance can be disabled, which causes the |
350 | * schedutil governor to fall back to the latter. |
351 | */ |
352 | void (*adjust_perf)(unsigned int cpu, |
353 | unsigned long min_perf, |
354 | unsigned long target_perf, |
355 | unsigned long capacity); |
356 | |
357 | /* |
358 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION |
359 | * unset. |
360 | * |
361 | * get_intermediate should return a stable intermediate frequency |
362 | * platform wants to switch to and target_intermediate() should set CPU |
363 | * to that frequency, before jumping to the frequency corresponding |
364 | * to 'index'. Core will take care of sending notifications and driver |
365 | * doesn't have to handle them in target_intermediate() or |
366 | * target_index(). |
367 | * |
368 | * Drivers can return '0' from get_intermediate() in case they don't |
369 | * wish to switch to intermediate frequency for some target frequency. |
370 | * In that case core will directly call ->target_index(). |
371 | */ |
372 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, |
373 | unsigned int index); |
374 | int (*target_intermediate)(struct cpufreq_policy *policy, |
375 | unsigned int index); |
376 | |
377 | /* should be defined, if possible, return 0 on error */ |
378 | unsigned int (*get)(unsigned int cpu); |
379 | |
380 | /* Called to update policy limits on firmware notifications. */ |
381 | void (*update_limits)(unsigned int cpu); |
382 | |
383 | /* optional */ |
384 | int (*bios_limit)(int cpu, unsigned int *limit); |
385 | |
386 | int (*online)(struct cpufreq_policy *policy); |
387 | int (*offline)(struct cpufreq_policy *policy); |
388 | int (*exit)(struct cpufreq_policy *policy); |
389 | int (*suspend)(struct cpufreq_policy *policy); |
390 | int (*resume)(struct cpufreq_policy *policy); |
391 | |
392 | /* Will be called after the driver is fully initialized */ |
393 | void (*ready)(struct cpufreq_policy *policy); |
394 | |
395 | struct freq_attr **attr; |
396 | |
397 | /* platform specific boost support code */ |
398 | bool boost_enabled; |
399 | int (*set_boost)(struct cpufreq_policy *policy, int state); |
400 | |
401 | /* |
402 | * Set by drivers that want to register with the energy model after the |
403 | * policy is properly initialized, but before the governor is started. |
404 | */ |
405 | void (*register_em)(struct cpufreq_policy *policy); |
406 | }; |
407 | |
408 | /* flags */ |
409 | |
410 | /* |
411 | * Set by drivers that need to update internal upper and lower boundaries along |
412 | * with the target frequency and so the core and governors should also invoke |
413 | * the diver if the target frequency does not change, but the policy min or max |
414 | * may have changed. |
415 | */ |
416 | #define CPUFREQ_NEED_UPDATE_LIMITS BIT(0) |
417 | |
418 | /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ |
419 | #define CPUFREQ_CONST_LOOPS BIT(1) |
420 | |
421 | /* |
422 | * Set by drivers that want the core to automatically register the cpufreq |
423 | * driver as a thermal cooling device. |
424 | */ |
425 | #define CPUFREQ_IS_COOLING_DEV BIT(2) |
426 | |
427 | /* |
428 | * This should be set by platforms having multiple clock-domains, i.e. |
429 | * supporting multiple policies. With this sysfs directories of governor would |
430 | * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same |
431 | * governor with different tunables for different clusters. |
432 | */ |
433 | #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) |
434 | |
435 | /* |
436 | * Driver will do POSTCHANGE notifications from outside of their ->target() |
437 | * routine and so must set cpufreq_driver->flags with this flag, so that core |
438 | * can handle them specially. |
439 | */ |
440 | #define CPUFREQ_ASYNC_NOTIFICATION BIT(4) |
441 | |
442 | /* |
443 | * Set by drivers which want cpufreq core to check if CPU is running at a |
444 | * frequency present in freq-table exposed by the driver. For these drivers if |
445 | * CPU is found running at an out of table freq, we will try to set it to a freq |
446 | * from the table. And if that fails, we will stop further boot process by |
447 | * issuing a BUG_ON(). |
448 | */ |
449 | #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) |
450 | |
451 | /* |
452 | * Set by drivers to disallow use of governors with "dynamic_switching" flag |
453 | * set. |
454 | */ |
455 | #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) |
456 | |
457 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); |
458 | void cpufreq_unregister_driver(struct cpufreq_driver *driver_data); |
459 | |
460 | bool cpufreq_driver_test_flags(u16 flags); |
461 | const char *cpufreq_get_current_driver(void); |
462 | void *cpufreq_get_driver_data(void); |
463 | |
464 | static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) |
465 | { |
466 | return IS_ENABLED(CONFIG_CPU_THERMAL) && |
467 | (drv->flags & CPUFREQ_IS_COOLING_DEV); |
468 | } |
469 | |
470 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy, |
471 | unsigned int min, |
472 | unsigned int max) |
473 | { |
474 | policy->max = clamp(policy->max, min, max); |
475 | policy->min = clamp(policy->min, min, policy->max); |
476 | } |
477 | |
478 | static inline void |
479 | cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy) |
480 | { |
481 | cpufreq_verify_within_limits(policy, min: policy->cpuinfo.min_freq, |
482 | max: policy->cpuinfo.max_freq); |
483 | } |
484 | |
485 | #ifdef CONFIG_CPU_FREQ |
486 | void cpufreq_suspend(void); |
487 | void cpufreq_resume(void); |
488 | int cpufreq_generic_suspend(struct cpufreq_policy *policy); |
489 | #else |
490 | static inline void cpufreq_suspend(void) {} |
491 | static inline void cpufreq_resume(void) {} |
492 | #endif |
493 | |
494 | /********************************************************************* |
495 | * CPUFREQ NOTIFIER INTERFACE * |
496 | *********************************************************************/ |
497 | |
498 | #define CPUFREQ_TRANSITION_NOTIFIER (0) |
499 | #define CPUFREQ_POLICY_NOTIFIER (1) |
500 | |
501 | /* Transition notifiers */ |
502 | #define CPUFREQ_PRECHANGE (0) |
503 | #define CPUFREQ_POSTCHANGE (1) |
504 | |
505 | /* Policy Notifiers */ |
506 | #define CPUFREQ_CREATE_POLICY (0) |
507 | #define CPUFREQ_REMOVE_POLICY (1) |
508 | |
509 | #ifdef CONFIG_CPU_FREQ |
510 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); |
511 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); |
512 | |
513 | void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, |
514 | struct cpufreq_freqs *freqs); |
515 | void cpufreq_freq_transition_end(struct cpufreq_policy *policy, |
516 | struct cpufreq_freqs *freqs, int transition_failed); |
517 | |
518 | #else /* CONFIG_CPU_FREQ */ |
519 | static inline int cpufreq_register_notifier(struct notifier_block *nb, |
520 | unsigned int list) |
521 | { |
522 | return 0; |
523 | } |
524 | static inline int cpufreq_unregister_notifier(struct notifier_block *nb, |
525 | unsigned int list) |
526 | { |
527 | return 0; |
528 | } |
529 | #endif /* !CONFIG_CPU_FREQ */ |
530 | |
531 | /** |
532 | * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch |
533 | * safe) |
534 | * @old: old value |
535 | * @div: divisor |
536 | * @mult: multiplier |
537 | * |
538 | * |
539 | * new = old * mult / div |
540 | */ |
541 | static inline unsigned long cpufreq_scale(unsigned long old, u_int div, |
542 | u_int mult) |
543 | { |
544 | #if BITS_PER_LONG == 32 |
545 | u64 result = ((u64) old) * ((u64) mult); |
546 | do_div(result, div); |
547 | return (unsigned long) result; |
548 | |
549 | #elif BITS_PER_LONG == 64 |
550 | unsigned long result = old * ((u64) mult); |
551 | result /= div; |
552 | return result; |
553 | #endif |
554 | } |
555 | |
556 | /********************************************************************* |
557 | * CPUFREQ GOVERNORS * |
558 | *********************************************************************/ |
559 | |
560 | #define CPUFREQ_POLICY_UNKNOWN (0) |
561 | /* |
562 | * If (cpufreq_driver->target) exists, the ->governor decides what frequency |
563 | * within the limits is used. If (cpufreq_driver->setpolicy> exists, these |
564 | * two generic policies are available: |
565 | */ |
566 | #define CPUFREQ_POLICY_POWERSAVE (1) |
567 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
568 | |
569 | /* |
570 | * The polling frequency depends on the capability of the processor. Default |
571 | * polling frequency is 1000 times the transition latency of the processor. The |
572 | * ondemand governor will work on any processor with transition latency <= 10ms, |
573 | * using appropriate sampling rate. |
574 | */ |
575 | #define LATENCY_MULTIPLIER (1000) |
576 | |
577 | struct cpufreq_governor { |
578 | char name[CPUFREQ_NAME_LEN]; |
579 | int (*init)(struct cpufreq_policy *policy); |
580 | void (*exit)(struct cpufreq_policy *policy); |
581 | int (*start)(struct cpufreq_policy *policy); |
582 | void (*stop)(struct cpufreq_policy *policy); |
583 | void (*limits)(struct cpufreq_policy *policy); |
584 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
585 | char *buf); |
586 | int (*store_setspeed) (struct cpufreq_policy *policy, |
587 | unsigned int freq); |
588 | struct list_head governor_list; |
589 | struct module *owner; |
590 | u8 flags; |
591 | }; |
592 | |
593 | /* Governor flags */ |
594 | |
595 | /* For governors which change frequency dynamically by themselves */ |
596 | #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) |
597 | |
598 | /* For governors wanting the target frequency to be set exactly */ |
599 | #define CPUFREQ_GOV_STRICT_TARGET BIT(1) |
600 | |
601 | |
602 | /* Pass a target to the cpufreq driver */ |
603 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
604 | unsigned int target_freq); |
605 | void cpufreq_driver_adjust_perf(unsigned int cpu, |
606 | unsigned long min_perf, |
607 | unsigned long target_perf, |
608 | unsigned long capacity); |
609 | bool cpufreq_driver_has_adjust_perf(void); |
610 | int cpufreq_driver_target(struct cpufreq_policy *policy, |
611 | unsigned int target_freq, |
612 | unsigned int relation); |
613 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
614 | unsigned int target_freq, |
615 | unsigned int relation); |
616 | unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, |
617 | unsigned int target_freq); |
618 | unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); |
619 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
620 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
621 | int cpufreq_start_governor(struct cpufreq_policy *policy); |
622 | void cpufreq_stop_governor(struct cpufreq_policy *policy); |
623 | |
624 | #define cpufreq_governor_init(__governor) \ |
625 | static int __init __governor##_init(void) \ |
626 | { \ |
627 | return cpufreq_register_governor(&__governor); \ |
628 | } \ |
629 | core_initcall(__governor##_init) |
630 | |
631 | #define cpufreq_governor_exit(__governor) \ |
632 | static void __exit __governor##_exit(void) \ |
633 | { \ |
634 | return cpufreq_unregister_governor(&__governor); \ |
635 | } \ |
636 | module_exit(__governor##_exit) |
637 | |
638 | struct cpufreq_governor *cpufreq_default_governor(void); |
639 | struct cpufreq_governor *cpufreq_fallback_governor(void); |
640 | |
641 | static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) |
642 | { |
643 | if (policy->max < policy->cur) |
644 | __cpufreq_driver_target(policy, target_freq: policy->max, |
645 | CPUFREQ_RELATION_HE); |
646 | else if (policy->min > policy->cur) |
647 | __cpufreq_driver_target(policy, target_freq: policy->min, |
648 | CPUFREQ_RELATION_LE); |
649 | } |
650 | |
651 | /* Governor attribute set */ |
652 | struct gov_attr_set { |
653 | struct kobject kobj; |
654 | struct list_head policy_list; |
655 | struct mutex update_lock; |
656 | int usage_count; |
657 | }; |
658 | |
659 | /* sysfs ops for cpufreq governors */ |
660 | extern const struct sysfs_ops governor_sysfs_ops; |
661 | |
662 | static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) |
663 | { |
664 | return container_of(kobj, struct gov_attr_set, kobj); |
665 | } |
666 | |
667 | void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); |
668 | void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); |
669 | unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); |
670 | |
671 | /* Governor sysfs attribute */ |
672 | struct governor_attr { |
673 | struct attribute attr; |
674 | ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); |
675 | ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, |
676 | size_t count); |
677 | }; |
678 | |
679 | /********************************************************************* |
680 | * FREQUENCY TABLE HELPERS * |
681 | *********************************************************************/ |
682 | |
683 | /* Special Values of .frequency field */ |
684 | #define CPUFREQ_ENTRY_INVALID ~0u |
685 | #define CPUFREQ_TABLE_END ~1u |
686 | /* Special Values of .flags field */ |
687 | #define CPUFREQ_BOOST_FREQ (1 << 0) |
688 | #define CPUFREQ_INEFFICIENT_FREQ (1 << 1) |
689 | |
690 | struct cpufreq_frequency_table { |
691 | unsigned int flags; |
692 | unsigned int driver_data; /* driver specific data, not used by core */ |
693 | unsigned int frequency; /* kHz - doesn't need to be in ascending |
694 | * order */ |
695 | }; |
696 | |
697 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) |
698 | int dev_pm_opp_init_cpufreq_table(struct device *dev, |
699 | struct cpufreq_frequency_table **table); |
700 | void dev_pm_opp_free_cpufreq_table(struct device *dev, |
701 | struct cpufreq_frequency_table **table); |
702 | #else |
703 | static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, |
704 | struct cpufreq_frequency_table |
705 | **table) |
706 | { |
707 | return -EINVAL; |
708 | } |
709 | |
710 | static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, |
711 | struct cpufreq_frequency_table |
712 | **table) |
713 | { |
714 | } |
715 | #endif |
716 | |
717 | /* |
718 | * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table |
719 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
720 | * @table: the cpufreq_frequency_table * to iterate over. |
721 | */ |
722 | |
723 | #define cpufreq_for_each_entry(pos, table) \ |
724 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) |
725 | |
726 | /* |
727 | * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table |
728 | * with index |
729 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
730 | * @table: the cpufreq_frequency_table * to iterate over. |
731 | * @idx: the table entry currently being processed |
732 | */ |
733 | |
734 | #define cpufreq_for_each_entry_idx(pos, table, idx) \ |
735 | for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ |
736 | pos++, idx++) |
737 | |
738 | /* |
739 | * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table |
740 | * excluding CPUFREQ_ENTRY_INVALID frequencies. |
741 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
742 | * @table: the cpufreq_frequency_table * to iterate over. |
743 | */ |
744 | |
745 | #define cpufreq_for_each_valid_entry(pos, table) \ |
746 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ |
747 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ |
748 | continue; \ |
749 | else |
750 | |
751 | /* |
752 | * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq |
753 | * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. |
754 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
755 | * @table: the cpufreq_frequency_table * to iterate over. |
756 | * @idx: the table entry currently being processed |
757 | */ |
758 | |
759 | #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ |
760 | cpufreq_for_each_entry_idx(pos, table, idx) \ |
761 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ |
762 | continue; \ |
763 | else |
764 | |
765 | /** |
766 | * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq |
767 | * frequency_table excluding CPUFREQ_ENTRY_INVALID and |
768 | * CPUFREQ_INEFFICIENT_FREQ frequencies. |
769 | * @pos: the &struct cpufreq_frequency_table to use as a loop cursor. |
770 | * @table: the &struct cpufreq_frequency_table to iterate over. |
771 | * @idx: the table entry currently being processed. |
772 | * @efficiencies: set to true to only iterate over efficient frequencies. |
773 | */ |
774 | |
775 | #define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \ |
776 | cpufreq_for_each_valid_entry_idx(pos, table, idx) \ |
777 | if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \ |
778 | continue; \ |
779 | else |
780 | |
781 | |
782 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
783 | struct cpufreq_frequency_table *table); |
784 | |
785 | int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, |
786 | struct cpufreq_frequency_table *table); |
787 | int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); |
788 | |
789 | int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, |
790 | unsigned int target_freq, |
791 | unsigned int relation); |
792 | int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, |
793 | unsigned int freq); |
794 | |
795 | ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); |
796 | |
797 | #ifdef CONFIG_CPU_FREQ |
798 | int cpufreq_boost_trigger_state(int state); |
799 | int cpufreq_boost_enabled(void); |
800 | int cpufreq_enable_boost_support(void); |
801 | bool policy_has_boost_freq(struct cpufreq_policy *policy); |
802 | |
803 | /* Find lowest freq at or above target in a table in ascending order */ |
804 | static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, |
805 | unsigned int target_freq, |
806 | bool efficiencies) |
807 | { |
808 | struct cpufreq_frequency_table *table = policy->freq_table; |
809 | struct cpufreq_frequency_table *pos; |
810 | unsigned int freq; |
811 | int idx, best = -1; |
812 | |
813 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
814 | freq = pos->frequency; |
815 | |
816 | if (freq >= target_freq) |
817 | return idx; |
818 | |
819 | best = idx; |
820 | } |
821 | |
822 | return best; |
823 | } |
824 | |
825 | /* Find lowest freq at or above target in a table in descending order */ |
826 | static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, |
827 | unsigned int target_freq, |
828 | bool efficiencies) |
829 | { |
830 | struct cpufreq_frequency_table *table = policy->freq_table; |
831 | struct cpufreq_frequency_table *pos; |
832 | unsigned int freq; |
833 | int idx, best = -1; |
834 | |
835 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
836 | freq = pos->frequency; |
837 | |
838 | if (freq == target_freq) |
839 | return idx; |
840 | |
841 | if (freq > target_freq) { |
842 | best = idx; |
843 | continue; |
844 | } |
845 | |
846 | /* No freq found above target_freq */ |
847 | if (best == -1) |
848 | return idx; |
849 | |
850 | return best; |
851 | } |
852 | |
853 | return best; |
854 | } |
855 | |
856 | /* Works only on sorted freq-tables */ |
857 | static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, |
858 | unsigned int target_freq, |
859 | bool efficiencies) |
860 | { |
861 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
862 | |
863 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
864 | return cpufreq_table_find_index_al(policy, target_freq, |
865 | efficiencies); |
866 | else |
867 | return cpufreq_table_find_index_dl(policy, target_freq, |
868 | efficiencies); |
869 | } |
870 | |
871 | /* Find highest freq at or below target in a table in ascending order */ |
872 | static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, |
873 | unsigned int target_freq, |
874 | bool efficiencies) |
875 | { |
876 | struct cpufreq_frequency_table *table = policy->freq_table; |
877 | struct cpufreq_frequency_table *pos; |
878 | unsigned int freq; |
879 | int idx, best = -1; |
880 | |
881 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
882 | freq = pos->frequency; |
883 | |
884 | if (freq == target_freq) |
885 | return idx; |
886 | |
887 | if (freq < target_freq) { |
888 | best = idx; |
889 | continue; |
890 | } |
891 | |
892 | /* No freq found below target_freq */ |
893 | if (best == -1) |
894 | return idx; |
895 | |
896 | return best; |
897 | } |
898 | |
899 | return best; |
900 | } |
901 | |
902 | /* Find highest freq at or below target in a table in descending order */ |
903 | static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, |
904 | unsigned int target_freq, |
905 | bool efficiencies) |
906 | { |
907 | struct cpufreq_frequency_table *table = policy->freq_table; |
908 | struct cpufreq_frequency_table *pos; |
909 | unsigned int freq; |
910 | int idx, best = -1; |
911 | |
912 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
913 | freq = pos->frequency; |
914 | |
915 | if (freq <= target_freq) |
916 | return idx; |
917 | |
918 | best = idx; |
919 | } |
920 | |
921 | return best; |
922 | } |
923 | |
924 | /* Works only on sorted freq-tables */ |
925 | static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, |
926 | unsigned int target_freq, |
927 | bool efficiencies) |
928 | { |
929 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
930 | |
931 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
932 | return cpufreq_table_find_index_ah(policy, target_freq, |
933 | efficiencies); |
934 | else |
935 | return cpufreq_table_find_index_dh(policy, target_freq, |
936 | efficiencies); |
937 | } |
938 | |
939 | /* Find closest freq to target in a table in ascending order */ |
940 | static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, |
941 | unsigned int target_freq, |
942 | bool efficiencies) |
943 | { |
944 | struct cpufreq_frequency_table *table = policy->freq_table; |
945 | struct cpufreq_frequency_table *pos; |
946 | unsigned int freq; |
947 | int idx, best = -1; |
948 | |
949 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
950 | freq = pos->frequency; |
951 | |
952 | if (freq == target_freq) |
953 | return idx; |
954 | |
955 | if (freq < target_freq) { |
956 | best = idx; |
957 | continue; |
958 | } |
959 | |
960 | /* No freq found below target_freq */ |
961 | if (best == -1) |
962 | return idx; |
963 | |
964 | /* Choose the closest freq */ |
965 | if (target_freq - table[best].frequency > freq - target_freq) |
966 | return idx; |
967 | |
968 | return best; |
969 | } |
970 | |
971 | return best; |
972 | } |
973 | |
974 | /* Find closest freq to target in a table in descending order */ |
975 | static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, |
976 | unsigned int target_freq, |
977 | bool efficiencies) |
978 | { |
979 | struct cpufreq_frequency_table *table = policy->freq_table; |
980 | struct cpufreq_frequency_table *pos; |
981 | unsigned int freq; |
982 | int idx, best = -1; |
983 | |
984 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
985 | freq = pos->frequency; |
986 | |
987 | if (freq == target_freq) |
988 | return idx; |
989 | |
990 | if (freq > target_freq) { |
991 | best = idx; |
992 | continue; |
993 | } |
994 | |
995 | /* No freq found above target_freq */ |
996 | if (best == -1) |
997 | return idx; |
998 | |
999 | /* Choose the closest freq */ |
1000 | if (table[best].frequency - target_freq > target_freq - freq) |
1001 | return idx; |
1002 | |
1003 | return best; |
1004 | } |
1005 | |
1006 | return best; |
1007 | } |
1008 | |
1009 | /* Works only on sorted freq-tables */ |
1010 | static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, |
1011 | unsigned int target_freq, |
1012 | bool efficiencies) |
1013 | { |
1014 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
1015 | |
1016 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
1017 | return cpufreq_table_find_index_ac(policy, target_freq, |
1018 | efficiencies); |
1019 | else |
1020 | return cpufreq_table_find_index_dc(policy, target_freq, |
1021 | efficiencies); |
1022 | } |
1023 | |
1024 | static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
1025 | unsigned int target_freq, |
1026 | unsigned int relation) |
1027 | { |
1028 | bool efficiencies = policy->efficiencies_available && |
1029 | (relation & CPUFREQ_RELATION_E); |
1030 | int idx; |
1031 | |
1032 | /* cpufreq_table_index_unsorted() has no use for this flag anyway */ |
1033 | relation &= ~CPUFREQ_RELATION_E; |
1034 | |
1035 | if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) |
1036 | return cpufreq_table_index_unsorted(policy, target_freq, |
1037 | relation); |
1038 | retry: |
1039 | switch (relation) { |
1040 | case CPUFREQ_RELATION_L: |
1041 | idx = cpufreq_table_find_index_l(policy, target_freq, |
1042 | efficiencies); |
1043 | break; |
1044 | case CPUFREQ_RELATION_H: |
1045 | idx = cpufreq_table_find_index_h(policy, target_freq, |
1046 | efficiencies); |
1047 | break; |
1048 | case CPUFREQ_RELATION_C: |
1049 | idx = cpufreq_table_find_index_c(policy, target_freq, |
1050 | efficiencies); |
1051 | break; |
1052 | default: |
1053 | WARN_ON_ONCE(1); |
1054 | return 0; |
1055 | } |
1056 | |
1057 | if (idx < 0 && efficiencies) { |
1058 | efficiencies = false; |
1059 | goto retry; |
1060 | } |
1061 | |
1062 | return idx; |
1063 | } |
1064 | |
1065 | static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) |
1066 | { |
1067 | struct cpufreq_frequency_table *pos; |
1068 | int count = 0; |
1069 | |
1070 | if (unlikely(!policy->freq_table)) |
1071 | return 0; |
1072 | |
1073 | cpufreq_for_each_valid_entry(pos, policy->freq_table) |
1074 | count++; |
1075 | |
1076 | return count; |
1077 | } |
1078 | |
1079 | /** |
1080 | * cpufreq_table_set_inefficient() - Mark a frequency as inefficient |
1081 | * @policy: the &struct cpufreq_policy containing the inefficient frequency |
1082 | * @frequency: the inefficient frequency |
1083 | * |
1084 | * The &struct cpufreq_policy must use a sorted frequency table |
1085 | * |
1086 | * Return: %0 on success or a negative errno code |
1087 | */ |
1088 | |
1089 | static inline int |
1090 | cpufreq_table_set_inefficient(struct cpufreq_policy *policy, |
1091 | unsigned int frequency) |
1092 | { |
1093 | struct cpufreq_frequency_table *pos; |
1094 | |
1095 | /* Not supported */ |
1096 | if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) |
1097 | return -EINVAL; |
1098 | |
1099 | cpufreq_for_each_valid_entry(pos, policy->freq_table) { |
1100 | if (pos->frequency == frequency) { |
1101 | pos->flags |= CPUFREQ_INEFFICIENT_FREQ; |
1102 | policy->efficiencies_available = true; |
1103 | return 0; |
1104 | } |
1105 | } |
1106 | |
1107 | return -EINVAL; |
1108 | } |
1109 | |
1110 | static inline int parse_perf_domain(int cpu, const char *list_name, |
1111 | const char *cell_name, |
1112 | struct of_phandle_args *args) |
1113 | { |
1114 | struct device_node *cpu_np; |
1115 | int ret; |
1116 | |
1117 | cpu_np = of_cpu_device_node_get(cpu); |
1118 | if (!cpu_np) |
1119 | return -ENODEV; |
1120 | |
1121 | ret = of_parse_phandle_with_args(np: cpu_np, list_name, cells_name: cell_name, index: 0, |
1122 | out_args: args); |
1123 | if (ret < 0) |
1124 | return ret; |
1125 | |
1126 | of_node_put(node: cpu_np); |
1127 | |
1128 | return 0; |
1129 | } |
1130 | |
1131 | static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, |
1132 | const char *cell_name, struct cpumask *cpumask, |
1133 | struct of_phandle_args *pargs) |
1134 | { |
1135 | int cpu, ret; |
1136 | struct of_phandle_args args; |
1137 | |
1138 | ret = parse_perf_domain(cpu: pcpu, list_name, cell_name, args: pargs); |
1139 | if (ret < 0) |
1140 | return ret; |
1141 | |
1142 | cpumask_set_cpu(cpu: pcpu, dstp: cpumask); |
1143 | |
1144 | for_each_possible_cpu(cpu) { |
1145 | if (cpu == pcpu) |
1146 | continue; |
1147 | |
1148 | ret = parse_perf_domain(cpu, list_name, cell_name, args: &args); |
1149 | if (ret < 0) |
1150 | continue; |
1151 | |
1152 | if (pargs->np == args.np && pargs->args_count == args.args_count && |
1153 | !memcmp(p: pargs->args, q: args.args, size: sizeof(args.args[0]) * args.args_count)) |
1154 | cpumask_set_cpu(cpu, dstp: cpumask); |
1155 | |
1156 | of_node_put(node: args.np); |
1157 | } |
1158 | |
1159 | return 0; |
1160 | } |
1161 | #else |
1162 | static inline int cpufreq_boost_trigger_state(int state) |
1163 | { |
1164 | return 0; |
1165 | } |
1166 | static inline int cpufreq_boost_enabled(void) |
1167 | { |
1168 | return 0; |
1169 | } |
1170 | |
1171 | static inline int cpufreq_enable_boost_support(void) |
1172 | { |
1173 | return -EINVAL; |
1174 | } |
1175 | |
1176 | static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) |
1177 | { |
1178 | return false; |
1179 | } |
1180 | |
1181 | static inline int |
1182 | cpufreq_table_set_inefficient(struct cpufreq_policy *policy, |
1183 | unsigned int frequency) |
1184 | { |
1185 | return -EINVAL; |
1186 | } |
1187 | |
1188 | static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, |
1189 | const char *cell_name, struct cpumask *cpumask, |
1190 | struct of_phandle_args *pargs) |
1191 | { |
1192 | return -EOPNOTSUPP; |
1193 | } |
1194 | #endif |
1195 | |
1196 | extern unsigned int arch_freq_get_on_cpu(int cpu); |
1197 | |
1198 | #ifndef arch_set_freq_scale |
1199 | static __always_inline |
1200 | void arch_set_freq_scale(const struct cpumask *cpus, |
1201 | unsigned long cur_freq, |
1202 | unsigned long max_freq) |
1203 | { |
1204 | } |
1205 | #endif |
1206 | /* the following are really really optional */ |
1207 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
1208 | extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; |
1209 | extern struct freq_attr *cpufreq_generic_attr[]; |
1210 | int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); |
1211 | |
1212 | unsigned int cpufreq_generic_get(unsigned int cpu); |
1213 | void cpufreq_generic_init(struct cpufreq_policy *policy, |
1214 | struct cpufreq_frequency_table *table, |
1215 | unsigned int transition_latency); |
1216 | |
1217 | static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) |
1218 | { |
1219 | dev_pm_opp_of_register_em(dev: get_cpu_device(cpu: policy->cpu), |
1220 | cpus: policy->related_cpus); |
1221 | } |
1222 | #endif /* _LINUX_CPUFREQ_H */ |
1223 | |