1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * acpi-cpufreq.c - ACPI Processor P-States Driver |
4 | * |
5 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
6 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
7 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> |
8 | * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> |
9 | */ |
10 | |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> |
15 | #include <linux/init.h> |
16 | #include <linux/smp.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/cpufreq.h> |
19 | #include <linux/compiler.h> |
20 | #include <linux/dmi.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/string_helpers.h> |
23 | #include <linux/platform_device.h> |
24 | |
25 | #include <linux/acpi.h> |
26 | #include <linux/io.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/uaccess.h> |
29 | |
30 | #include <acpi/processor.h> |
31 | #include <acpi/cppc_acpi.h> |
32 | |
33 | #include <asm/msr.h> |
34 | #include <asm/processor.h> |
35 | #include <asm/cpufeature.h> |
36 | #include <asm/cpu_device_id.h> |
37 | |
38 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski" ); |
39 | MODULE_DESCRIPTION("ACPI Processor P-States Driver" ); |
40 | MODULE_LICENSE("GPL" ); |
41 | |
42 | enum { |
43 | UNDEFINED_CAPABLE = 0, |
44 | SYSTEM_INTEL_MSR_CAPABLE, |
45 | SYSTEM_AMD_MSR_CAPABLE, |
46 | SYSTEM_IO_CAPABLE, |
47 | }; |
48 | |
49 | #define INTEL_MSR_RANGE (0xffff) |
50 | #define AMD_MSR_RANGE (0x7) |
51 | #define HYGON_MSR_RANGE (0x7) |
52 | |
53 | #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) |
54 | |
55 | struct acpi_cpufreq_data { |
56 | unsigned int resume; |
57 | unsigned int cpu_feature; |
58 | unsigned int acpi_perf_cpu; |
59 | cpumask_var_t freqdomain_cpus; |
60 | void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val); |
61 | u32 (*cpu_freq_read)(struct acpi_pct_register *reg); |
62 | }; |
63 | |
64 | /* acpi_perf_data is a pointer to percpu data. */ |
65 | static struct acpi_processor_performance __percpu *acpi_perf_data; |
66 | |
67 | static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data) |
68 | { |
69 | return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu); |
70 | } |
71 | |
72 | static struct cpufreq_driver acpi_cpufreq_driver; |
73 | |
74 | static unsigned int acpi_pstate_strict; |
75 | |
76 | static bool boost_state(unsigned int cpu) |
77 | { |
78 | u32 lo, hi; |
79 | u64 msr; |
80 | |
81 | switch (boot_cpu_data.x86_vendor) { |
82 | case X86_VENDOR_INTEL: |
83 | case X86_VENDOR_CENTAUR: |
84 | case X86_VENDOR_ZHAOXIN: |
85 | rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, l: &lo, h: &hi); |
86 | msr = lo | ((u64)hi << 32); |
87 | return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); |
88 | case X86_VENDOR_HYGON: |
89 | case X86_VENDOR_AMD: |
90 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, l: &lo, h: &hi); |
91 | msr = lo | ((u64)hi << 32); |
92 | return !(msr & MSR_K7_HWCR_CPB_DIS); |
93 | } |
94 | return false; |
95 | } |
96 | |
97 | static int boost_set_msr(bool enable) |
98 | { |
99 | u32 msr_addr; |
100 | u64 msr_mask, val; |
101 | |
102 | switch (boot_cpu_data.x86_vendor) { |
103 | case X86_VENDOR_INTEL: |
104 | case X86_VENDOR_CENTAUR: |
105 | case X86_VENDOR_ZHAOXIN: |
106 | msr_addr = MSR_IA32_MISC_ENABLE; |
107 | msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; |
108 | break; |
109 | case X86_VENDOR_HYGON: |
110 | case X86_VENDOR_AMD: |
111 | msr_addr = MSR_K7_HWCR; |
112 | msr_mask = MSR_K7_HWCR_CPB_DIS; |
113 | break; |
114 | default: |
115 | return -EINVAL; |
116 | } |
117 | |
118 | rdmsrl(msr_addr, val); |
119 | |
120 | if (enable) |
121 | val &= ~msr_mask; |
122 | else |
123 | val |= msr_mask; |
124 | |
125 | wrmsrl(msr: msr_addr, val); |
126 | return 0; |
127 | } |
128 | |
129 | static void boost_set_msr_each(void *p_en) |
130 | { |
131 | bool enable = (bool) p_en; |
132 | |
133 | boost_set_msr(enable); |
134 | } |
135 | |
136 | static int set_boost(struct cpufreq_policy *policy, int val) |
137 | { |
138 | on_each_cpu_mask(mask: policy->cpus, func: boost_set_msr_each, |
139 | info: (void *)(long)val, wait: 1); |
140 | pr_debug("CPU %*pbl: Core Boosting %s.\n" , |
141 | cpumask_pr_args(policy->cpus), str_enabled_disabled(val)); |
142 | |
143 | return 0; |
144 | } |
145 | |
146 | static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) |
147 | { |
148 | struct acpi_cpufreq_data *data = policy->driver_data; |
149 | |
150 | if (unlikely(!data)) |
151 | return -ENODEV; |
152 | |
153 | return cpufreq_show_cpus(mask: data->freqdomain_cpus, buf); |
154 | } |
155 | |
156 | cpufreq_freq_attr_ro(freqdomain_cpus); |
157 | |
158 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
159 | static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, |
160 | size_t count) |
161 | { |
162 | int ret; |
163 | unsigned int val = 0; |
164 | |
165 | if (!acpi_cpufreq_driver.set_boost) |
166 | return -EINVAL; |
167 | |
168 | ret = kstrtouint(s: buf, base: 10, res: &val); |
169 | if (ret || val > 1) |
170 | return -EINVAL; |
171 | |
172 | cpus_read_lock(); |
173 | set_boost(policy, val); |
174 | cpus_read_unlock(); |
175 | |
176 | return count; |
177 | } |
178 | |
179 | static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) |
180 | { |
181 | return sprintf(buf, fmt: "%u\n" , acpi_cpufreq_driver.boost_enabled); |
182 | } |
183 | |
184 | cpufreq_freq_attr_rw(cpb); |
185 | #endif |
186 | |
187 | static int check_est_cpu(unsigned int cpuid) |
188 | { |
189 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
190 | |
191 | return cpu_has(cpu, X86_FEATURE_EST); |
192 | } |
193 | |
194 | static int check_amd_hwpstate_cpu(unsigned int cpuid) |
195 | { |
196 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
197 | |
198 | return cpu_has(cpu, X86_FEATURE_HW_PSTATE); |
199 | } |
200 | |
201 | static unsigned (struct cpufreq_policy *policy, u32 value) |
202 | { |
203 | struct acpi_cpufreq_data *data = policy->driver_data; |
204 | struct acpi_processor_performance *perf; |
205 | int i; |
206 | |
207 | perf = to_perf_data(data); |
208 | |
209 | for (i = 0; i < perf->state_count; i++) { |
210 | if (value == perf->states[i].status) |
211 | return policy->freq_table[i].frequency; |
212 | } |
213 | return 0; |
214 | } |
215 | |
216 | static unsigned (struct cpufreq_policy *policy, u32 msr) |
217 | { |
218 | struct acpi_cpufreq_data *data = policy->driver_data; |
219 | struct cpufreq_frequency_table *pos; |
220 | struct acpi_processor_performance *perf; |
221 | |
222 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
223 | msr &= AMD_MSR_RANGE; |
224 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
225 | msr &= HYGON_MSR_RANGE; |
226 | else |
227 | msr &= INTEL_MSR_RANGE; |
228 | |
229 | perf = to_perf_data(data); |
230 | |
231 | cpufreq_for_each_entry(pos, policy->freq_table) |
232 | if (msr == perf->states[pos->driver_data].status) |
233 | return pos->frequency; |
234 | return policy->freq_table[0].frequency; |
235 | } |
236 | |
237 | static unsigned (struct cpufreq_policy *policy, u32 val) |
238 | { |
239 | struct acpi_cpufreq_data *data = policy->driver_data; |
240 | |
241 | switch (data->cpu_feature) { |
242 | case SYSTEM_INTEL_MSR_CAPABLE: |
243 | case SYSTEM_AMD_MSR_CAPABLE: |
244 | return extract_msr(policy, msr: val); |
245 | case SYSTEM_IO_CAPABLE: |
246 | return extract_io(policy, value: val); |
247 | default: |
248 | return 0; |
249 | } |
250 | } |
251 | |
252 | static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) |
253 | { |
254 | u32 val, dummy __always_unused; |
255 | |
256 | rdmsr(MSR_IA32_PERF_CTL, val, dummy); |
257 | return val; |
258 | } |
259 | |
260 | static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) |
261 | { |
262 | u32 lo, hi; |
263 | |
264 | rdmsr(MSR_IA32_PERF_CTL, lo, hi); |
265 | lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE); |
266 | wrmsr(MSR_IA32_PERF_CTL, lo, hi); |
267 | } |
268 | |
269 | static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) |
270 | { |
271 | u32 val, dummy __always_unused; |
272 | |
273 | rdmsr(MSR_AMD_PERF_CTL, val, dummy); |
274 | return val; |
275 | } |
276 | |
277 | static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) |
278 | { |
279 | wrmsr(MSR_AMD_PERF_CTL, val, 0); |
280 | } |
281 | |
282 | static u32 cpu_freq_read_io(struct acpi_pct_register *reg) |
283 | { |
284 | u32 val; |
285 | |
286 | acpi_os_read_port(address: reg->address, value: &val, width: reg->bit_width); |
287 | return val; |
288 | } |
289 | |
290 | static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) |
291 | { |
292 | acpi_os_write_port(address: reg->address, value: val, width: reg->bit_width); |
293 | } |
294 | |
295 | struct drv_cmd { |
296 | struct acpi_pct_register *reg; |
297 | u32 val; |
298 | union { |
299 | void (*write)(struct acpi_pct_register *reg, u32 val); |
300 | u32 (*read)(struct acpi_pct_register *reg); |
301 | } func; |
302 | }; |
303 | |
304 | /* Called via smp_call_function_single(), on the target CPU */ |
305 | static void do_drv_read(void *_cmd) |
306 | { |
307 | struct drv_cmd *cmd = _cmd; |
308 | |
309 | cmd->val = cmd->func.read(cmd->reg); |
310 | } |
311 | |
312 | static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask) |
313 | { |
314 | struct acpi_processor_performance *perf = to_perf_data(data); |
315 | struct drv_cmd cmd = { |
316 | .reg = &perf->control_register, |
317 | .func.read = data->cpu_freq_read, |
318 | }; |
319 | int err; |
320 | |
321 | err = smp_call_function_any(mask, func: do_drv_read, info: &cmd, wait: 1); |
322 | WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ |
323 | return cmd.val; |
324 | } |
325 | |
326 | /* Called via smp_call_function_many(), on the target CPUs */ |
327 | static void do_drv_write(void *_cmd) |
328 | { |
329 | struct drv_cmd *cmd = _cmd; |
330 | |
331 | cmd->func.write(cmd->reg, cmd->val); |
332 | } |
333 | |
334 | static void drv_write(struct acpi_cpufreq_data *data, |
335 | const struct cpumask *mask, u32 val) |
336 | { |
337 | struct acpi_processor_performance *perf = to_perf_data(data); |
338 | struct drv_cmd cmd = { |
339 | .reg = &perf->control_register, |
340 | .val = val, |
341 | .func.write = data->cpu_freq_write, |
342 | }; |
343 | int this_cpu; |
344 | |
345 | this_cpu = get_cpu(); |
346 | if (cpumask_test_cpu(cpu: this_cpu, cpumask: mask)) |
347 | do_drv_write(cmd: &cmd); |
348 | |
349 | smp_call_function_many(mask, func: do_drv_write, info: &cmd, wait: 1); |
350 | put_cpu(); |
351 | } |
352 | |
353 | static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) |
354 | { |
355 | u32 val; |
356 | |
357 | if (unlikely(cpumask_empty(mask))) |
358 | return 0; |
359 | |
360 | val = drv_read(data, mask); |
361 | |
362 | pr_debug("%s = %u\n" , __func__, val); |
363 | |
364 | return val; |
365 | } |
366 | |
367 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
368 | { |
369 | struct acpi_cpufreq_data *data; |
370 | struct cpufreq_policy *policy; |
371 | unsigned int freq; |
372 | unsigned int cached_freq; |
373 | |
374 | pr_debug("%s (%d)\n" , __func__, cpu); |
375 | |
376 | policy = cpufreq_cpu_get_raw(cpu); |
377 | if (unlikely(!policy)) |
378 | return 0; |
379 | |
380 | data = policy->driver_data; |
381 | if (unlikely(!data || !policy->freq_table)) |
382 | return 0; |
383 | |
384 | cached_freq = policy->freq_table[to_perf_data(data)->state].frequency; |
385 | freq = extract_freq(policy, val: get_cur_val(cpumask_of(cpu), data)); |
386 | if (freq != cached_freq) { |
387 | /* |
388 | * The dreaded BIOS frequency change behind our back. |
389 | * Force set the frequency on next target call. |
390 | */ |
391 | data->resume = 1; |
392 | } |
393 | |
394 | pr_debug("cur freq = %u\n" , freq); |
395 | |
396 | return freq; |
397 | } |
398 | |
399 | static unsigned int check_freqs(struct cpufreq_policy *policy, |
400 | const struct cpumask *mask, unsigned int freq) |
401 | { |
402 | struct acpi_cpufreq_data *data = policy->driver_data; |
403 | unsigned int cur_freq; |
404 | unsigned int i; |
405 | |
406 | for (i = 0; i < 100; i++) { |
407 | cur_freq = extract_freq(policy, val: get_cur_val(mask, data)); |
408 | if (cur_freq == freq) |
409 | return 1; |
410 | udelay(10); |
411 | } |
412 | return 0; |
413 | } |
414 | |
415 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
416 | unsigned int index) |
417 | { |
418 | struct acpi_cpufreq_data *data = policy->driver_data; |
419 | struct acpi_processor_performance *perf; |
420 | const struct cpumask *mask; |
421 | unsigned int next_perf_state = 0; /* Index into perf table */ |
422 | int result = 0; |
423 | |
424 | if (unlikely(!data)) { |
425 | return -ENODEV; |
426 | } |
427 | |
428 | perf = to_perf_data(data); |
429 | next_perf_state = policy->freq_table[index].driver_data; |
430 | if (perf->state == next_perf_state) { |
431 | if (unlikely(data->resume)) { |
432 | pr_debug("Called after resume, resetting to P%d\n" , |
433 | next_perf_state); |
434 | data->resume = 0; |
435 | } else { |
436 | pr_debug("Already at target state (P%d)\n" , |
437 | next_perf_state); |
438 | return 0; |
439 | } |
440 | } |
441 | |
442 | /* |
443 | * The core won't allow CPUs to go away until the governor has been |
444 | * stopped, so we can rely on the stability of policy->cpus. |
445 | */ |
446 | mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ? |
447 | cpumask_of(policy->cpu) : policy->cpus; |
448 | |
449 | drv_write(data, mask, val: perf->states[next_perf_state].control); |
450 | |
451 | if (acpi_pstate_strict) { |
452 | if (!check_freqs(policy, mask, |
453 | freq: policy->freq_table[index].frequency)) { |
454 | pr_debug("%s (%d)\n" , __func__, policy->cpu); |
455 | result = -EAGAIN; |
456 | } |
457 | } |
458 | |
459 | if (!result) |
460 | perf->state = next_perf_state; |
461 | |
462 | return result; |
463 | } |
464 | |
465 | static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, |
466 | unsigned int target_freq) |
467 | { |
468 | struct acpi_cpufreq_data *data = policy->driver_data; |
469 | struct acpi_processor_performance *perf; |
470 | struct cpufreq_frequency_table *entry; |
471 | unsigned int next_perf_state, next_freq, index; |
472 | |
473 | /* |
474 | * Find the closest frequency above target_freq. |
475 | */ |
476 | if (policy->cached_target_freq == target_freq) |
477 | index = policy->cached_resolved_idx; |
478 | else |
479 | index = cpufreq_table_find_index_dl(policy, target_freq, |
480 | efficiencies: false); |
481 | |
482 | entry = &policy->freq_table[index]; |
483 | next_freq = entry->frequency; |
484 | next_perf_state = entry->driver_data; |
485 | |
486 | perf = to_perf_data(data); |
487 | if (perf->state == next_perf_state) { |
488 | if (unlikely(data->resume)) |
489 | data->resume = 0; |
490 | else |
491 | return next_freq; |
492 | } |
493 | |
494 | data->cpu_freq_write(&perf->control_register, |
495 | perf->states[next_perf_state].control); |
496 | perf->state = next_perf_state; |
497 | return next_freq; |
498 | } |
499 | |
500 | static unsigned long |
501 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
502 | { |
503 | struct acpi_processor_performance *perf; |
504 | |
505 | perf = to_perf_data(data); |
506 | if (cpu_khz) { |
507 | /* search the closest match to cpu_khz */ |
508 | unsigned int i; |
509 | unsigned long freq; |
510 | unsigned long freqn = perf->states[0].core_frequency * 1000; |
511 | |
512 | for (i = 0; i < (perf->state_count-1); i++) { |
513 | freq = freqn; |
514 | freqn = perf->states[i+1].core_frequency * 1000; |
515 | if ((2 * cpu_khz) > (freqn + freq)) { |
516 | perf->state = i; |
517 | return freq; |
518 | } |
519 | } |
520 | perf->state = perf->state_count-1; |
521 | return freqn; |
522 | } else { |
523 | /* assume CPU is at P0... */ |
524 | perf->state = 0; |
525 | return perf->states[0].core_frequency * 1000; |
526 | } |
527 | } |
528 | |
529 | static void free_acpi_perf_data(void) |
530 | { |
531 | unsigned int i; |
532 | |
533 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ |
534 | for_each_possible_cpu(i) |
535 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) |
536 | ->shared_cpu_map); |
537 | free_percpu(pdata: acpi_perf_data); |
538 | } |
539 | |
540 | static int cpufreq_boost_down_prep(unsigned int cpu) |
541 | { |
542 | /* |
543 | * Clear the boost-disable bit on the CPU_DOWN path so that |
544 | * this cpu cannot block the remaining ones from boosting. |
545 | */ |
546 | return boost_set_msr(enable: 1); |
547 | } |
548 | |
549 | /* |
550 | * acpi_cpufreq_early_init - initialize ACPI P-States library |
551 | * |
552 | * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) |
553 | * in order to determine correct frequency and voltage pairings. We can |
554 | * do _PDC and _PSD and find out the processor dependency for the |
555 | * actual init that will happen later... |
556 | */ |
557 | static int __init acpi_cpufreq_early_init(void) |
558 | { |
559 | unsigned int i; |
560 | pr_debug("%s\n" , __func__); |
561 | |
562 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
563 | if (!acpi_perf_data) { |
564 | pr_debug("Memory allocation error for acpi_perf_data.\n" ); |
565 | return -ENOMEM; |
566 | } |
567 | for_each_possible_cpu(i) { |
568 | if (!zalloc_cpumask_var_node( |
569 | mask: &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, |
570 | GFP_KERNEL, cpu_to_node(cpu: i))) { |
571 | |
572 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ |
573 | free_acpi_perf_data(); |
574 | return -ENOMEM; |
575 | } |
576 | } |
577 | |
578 | /* Do initialization in ACPI core */ |
579 | acpi_processor_preregister_performance(performance: acpi_perf_data); |
580 | return 0; |
581 | } |
582 | |
583 | #ifdef CONFIG_SMP |
584 | /* |
585 | * Some BIOSes do SW_ANY coordination internally, either set it up in hw |
586 | * or do it in BIOS firmware and won't inform about it to OS. If not |
587 | * detected, this has a side effect of making CPU run at a different speed |
588 | * than OS intended it to run at. Detect it and handle it cleanly. |
589 | */ |
590 | static int bios_with_sw_any_bug; |
591 | |
592 | static int sw_any_bug_found(const struct dmi_system_id *d) |
593 | { |
594 | bios_with_sw_any_bug = 1; |
595 | return 0; |
596 | } |
597 | |
598 | static const struct dmi_system_id sw_any_bug_dmi_table[] = { |
599 | { |
600 | .callback = sw_any_bug_found, |
601 | .ident = "Supermicro Server X6DLP" , |
602 | .matches = { |
603 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro" ), |
604 | DMI_MATCH(DMI_BIOS_VERSION, "080010" ), |
605 | DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP" ), |
606 | }, |
607 | }, |
608 | { } |
609 | }; |
610 | |
611 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) |
612 | { |
613 | /* Intel Xeon Processor 7100 Series Specification Update |
614 | * https://www.intel.com/Assets/PDF/specupdate/314554.pdf |
615 | * AL30: A Machine Check Exception (MCE) Occurring during an |
616 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause |
617 | * Both Processor Cores to Lock Up. */ |
618 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
619 | if ((c->x86 == 15) && |
620 | (c->x86_model == 6) && |
621 | (c->x86_stepping == 8)) { |
622 | pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n" ); |
623 | return -ENODEV; |
624 | } |
625 | } |
626 | return 0; |
627 | } |
628 | #endif |
629 | |
630 | #ifdef CONFIG_ACPI_CPPC_LIB |
631 | static u64 get_max_boost_ratio(unsigned int cpu) |
632 | { |
633 | struct cppc_perf_caps perf_caps; |
634 | u64 highest_perf, nominal_perf; |
635 | int ret; |
636 | |
637 | if (acpi_pstate_strict) |
638 | return 0; |
639 | |
640 | ret = cppc_get_perf_caps(cpu, caps: &perf_caps); |
641 | if (ret) { |
642 | pr_debug("CPU%d: Unable to get performance capabilities (%d)\n" , |
643 | cpu, ret); |
644 | return 0; |
645 | } |
646 | |
647 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
648 | highest_perf = amd_get_highest_perf(); |
649 | else |
650 | highest_perf = perf_caps.highest_perf; |
651 | |
652 | nominal_perf = perf_caps.nominal_perf; |
653 | |
654 | if (!highest_perf || !nominal_perf) { |
655 | pr_debug("CPU%d: highest or nominal performance missing\n" , cpu); |
656 | return 0; |
657 | } |
658 | |
659 | if (highest_perf < nominal_perf) { |
660 | pr_debug("CPU%d: nominal performance above highest\n" , cpu); |
661 | return 0; |
662 | } |
663 | |
664 | return div_u64(dividend: highest_perf << SCHED_CAPACITY_SHIFT, divisor: nominal_perf); |
665 | } |
666 | #else |
667 | static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } |
668 | #endif |
669 | |
670 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
671 | { |
672 | struct cpufreq_frequency_table *freq_table; |
673 | struct acpi_processor_performance *perf; |
674 | struct acpi_cpufreq_data *data; |
675 | unsigned int cpu = policy->cpu; |
676 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
677 | unsigned int valid_states = 0; |
678 | unsigned int result = 0; |
679 | u64 max_boost_ratio; |
680 | unsigned int i; |
681 | #ifdef CONFIG_SMP |
682 | static int blacklisted; |
683 | #endif |
684 | |
685 | pr_debug("%s\n" , __func__); |
686 | |
687 | #ifdef CONFIG_SMP |
688 | if (blacklisted) |
689 | return blacklisted; |
690 | blacklisted = acpi_cpufreq_blacklist(c); |
691 | if (blacklisted) |
692 | return blacklisted; |
693 | #endif |
694 | |
695 | data = kzalloc(size: sizeof(*data), GFP_KERNEL); |
696 | if (!data) |
697 | return -ENOMEM; |
698 | |
699 | if (!zalloc_cpumask_var(mask: &data->freqdomain_cpus, GFP_KERNEL)) { |
700 | result = -ENOMEM; |
701 | goto err_free; |
702 | } |
703 | |
704 | perf = per_cpu_ptr(acpi_perf_data, cpu); |
705 | data->acpi_perf_cpu = cpu; |
706 | policy->driver_data = data; |
707 | |
708 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
709 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
710 | |
711 | result = acpi_processor_register_performance(performance: perf, cpu); |
712 | if (result) |
713 | goto err_free_mask; |
714 | |
715 | policy->shared_type = perf->shared_type; |
716 | |
717 | /* |
718 | * Will let policy->cpus know about dependency only when software |
719 | * coordination is required. |
720 | */ |
721 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || |
722 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
723 | cpumask_copy(dstp: policy->cpus, srcp: perf->shared_cpu_map); |
724 | } |
725 | cpumask_copy(dstp: data->freqdomain_cpus, srcp: perf->shared_cpu_map); |
726 | |
727 | #ifdef CONFIG_SMP |
728 | dmi_check_system(list: sw_any_bug_dmi_table); |
729 | if (bios_with_sw_any_bug && !policy_is_shared(policy)) { |
730 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
731 | cpumask_copy(dstp: policy->cpus, topology_core_cpumask(cpu)); |
732 | } |
733 | |
734 | if (check_amd_hwpstate_cpu(cpuid: cpu) && boot_cpu_data.x86 < 0x19 && |
735 | !acpi_pstate_strict) { |
736 | cpumask_clear(dstp: policy->cpus); |
737 | cpumask_set_cpu(cpu, dstp: policy->cpus); |
738 | cpumask_copy(dstp: data->freqdomain_cpus, |
739 | topology_sibling_cpumask(cpu)); |
740 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; |
741 | pr_info_once("overriding BIOS provided _PSD data\n" ); |
742 | } |
743 | #endif |
744 | |
745 | /* capability check */ |
746 | if (perf->state_count <= 1) { |
747 | pr_debug("No P-States\n" ); |
748 | result = -ENODEV; |
749 | goto err_unreg; |
750 | } |
751 | |
752 | if (perf->control_register.space_id != perf->status_register.space_id) { |
753 | result = -ENODEV; |
754 | goto err_unreg; |
755 | } |
756 | |
757 | switch (perf->control_register.space_id) { |
758 | case ACPI_ADR_SPACE_SYSTEM_IO: |
759 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
760 | boot_cpu_data.x86 == 0xf) { |
761 | pr_debug("AMD K8 systems must use native drivers.\n" ); |
762 | result = -ENODEV; |
763 | goto err_unreg; |
764 | } |
765 | pr_debug("SYSTEM IO addr space\n" ); |
766 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
767 | data->cpu_freq_read = cpu_freq_read_io; |
768 | data->cpu_freq_write = cpu_freq_write_io; |
769 | break; |
770 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
771 | pr_debug("HARDWARE addr space\n" ); |
772 | if (check_est_cpu(cpuid: cpu)) { |
773 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; |
774 | data->cpu_freq_read = cpu_freq_read_intel; |
775 | data->cpu_freq_write = cpu_freq_write_intel; |
776 | break; |
777 | } |
778 | if (check_amd_hwpstate_cpu(cpuid: cpu)) { |
779 | data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; |
780 | data->cpu_freq_read = cpu_freq_read_amd; |
781 | data->cpu_freq_write = cpu_freq_write_amd; |
782 | break; |
783 | } |
784 | result = -ENODEV; |
785 | goto err_unreg; |
786 | default: |
787 | pr_debug("Unknown addr space %d\n" , |
788 | (u32) (perf->control_register.space_id)); |
789 | result = -ENODEV; |
790 | goto err_unreg; |
791 | } |
792 | |
793 | freq_table = kcalloc(n: perf->state_count + 1, size: sizeof(*freq_table), |
794 | GFP_KERNEL); |
795 | if (!freq_table) { |
796 | result = -ENOMEM; |
797 | goto err_unreg; |
798 | } |
799 | |
800 | /* detect transition latency */ |
801 | policy->cpuinfo.transition_latency = 0; |
802 | for (i = 0; i < perf->state_count; i++) { |
803 | if ((perf->states[i].transition_latency * 1000) > |
804 | policy->cpuinfo.transition_latency) |
805 | policy->cpuinfo.transition_latency = |
806 | perf->states[i].transition_latency * 1000; |
807 | } |
808 | |
809 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ |
810 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && |
811 | policy->cpuinfo.transition_latency > 20 * 1000) { |
812 | policy->cpuinfo.transition_latency = 20 * 1000; |
813 | pr_info_once("P-state transition latency capped at 20 uS\n" ); |
814 | } |
815 | |
816 | /* table init */ |
817 | for (i = 0; i < perf->state_count; i++) { |
818 | if (i > 0 && perf->states[i].core_frequency >= |
819 | freq_table[valid_states-1].frequency / 1000) |
820 | continue; |
821 | |
822 | freq_table[valid_states].driver_data = i; |
823 | freq_table[valid_states].frequency = |
824 | perf->states[i].core_frequency * 1000; |
825 | valid_states++; |
826 | } |
827 | freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
828 | |
829 | max_boost_ratio = get_max_boost_ratio(cpu); |
830 | if (max_boost_ratio) { |
831 | unsigned int freq = freq_table[0].frequency; |
832 | |
833 | /* |
834 | * Because the loop above sorts the freq_table entries in the |
835 | * descending order, freq is the maximum frequency in the table. |
836 | * Assume that it corresponds to the CPPC nominal frequency and |
837 | * use it to set cpuinfo.max_freq. |
838 | */ |
839 | policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; |
840 | } else { |
841 | /* |
842 | * If the maximum "boost" frequency is unknown, ask the arch |
843 | * scale-invariance code to use the "nominal" performance for |
844 | * CPU utilization scaling so as to prevent the schedutil |
845 | * governor from selecting inadequate CPU frequencies. |
846 | */ |
847 | arch_set_max_freq_ratio(turbo_disabled: true); |
848 | } |
849 | |
850 | policy->freq_table = freq_table; |
851 | perf->state = 0; |
852 | |
853 | switch (perf->control_register.space_id) { |
854 | case ACPI_ADR_SPACE_SYSTEM_IO: |
855 | /* |
856 | * The core will not set policy->cur, because |
857 | * cpufreq_driver->get is NULL, so we need to set it here. |
858 | * However, we have to guess it, because the current speed is |
859 | * unknown and not detectable via IO ports. |
860 | */ |
861 | policy->cur = acpi_cpufreq_guess_freq(data, cpu: policy->cpu); |
862 | break; |
863 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
864 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; |
865 | break; |
866 | default: |
867 | break; |
868 | } |
869 | |
870 | /* notify BIOS that we exist */ |
871 | acpi_processor_notify_smm(THIS_MODULE); |
872 | |
873 | pr_debug("CPU%u - ACPI performance management activated.\n" , cpu); |
874 | for (i = 0; i < perf->state_count; i++) |
875 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n" , |
876 | (i == perf->state ? '*' : ' '), i, |
877 | (u32) perf->states[i].core_frequency, |
878 | (u32) perf->states[i].power, |
879 | (u32) perf->states[i].transition_latency); |
880 | |
881 | /* |
882 | * the first call to ->target() should result in us actually |
883 | * writing something to the appropriate registers. |
884 | */ |
885 | data->resume = 1; |
886 | |
887 | policy->fast_switch_possible = !acpi_pstate_strict && |
888 | !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY); |
889 | |
890 | if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) |
891 | pr_warn(FW_WARN "P-state 0 is not max freq\n" ); |
892 | |
893 | if (acpi_cpufreq_driver.set_boost) |
894 | set_boost(policy, val: acpi_cpufreq_driver.boost_enabled); |
895 | |
896 | return result; |
897 | |
898 | err_unreg: |
899 | acpi_processor_unregister_performance(cpu); |
900 | err_free_mask: |
901 | free_cpumask_var(mask: data->freqdomain_cpus); |
902 | err_free: |
903 | kfree(objp: data); |
904 | policy->driver_data = NULL; |
905 | |
906 | return result; |
907 | } |
908 | |
909 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
910 | { |
911 | struct acpi_cpufreq_data *data = policy->driver_data; |
912 | |
913 | pr_debug("%s\n" , __func__); |
914 | |
915 | cpufreq_boost_down_prep(cpu: policy->cpu); |
916 | policy->fast_switch_possible = false; |
917 | policy->driver_data = NULL; |
918 | acpi_processor_unregister_performance(cpu: data->acpi_perf_cpu); |
919 | free_cpumask_var(mask: data->freqdomain_cpus); |
920 | kfree(objp: policy->freq_table); |
921 | kfree(objp: data); |
922 | |
923 | return 0; |
924 | } |
925 | |
926 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
927 | { |
928 | struct acpi_cpufreq_data *data = policy->driver_data; |
929 | |
930 | pr_debug("%s\n" , __func__); |
931 | |
932 | data->resume = 1; |
933 | |
934 | return 0; |
935 | } |
936 | |
937 | static struct freq_attr *acpi_cpufreq_attr[] = { |
938 | &cpufreq_freq_attr_scaling_available_freqs, |
939 | &freqdomain_cpus, |
940 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
941 | &cpb, |
942 | #endif |
943 | NULL, |
944 | }; |
945 | |
946 | static struct cpufreq_driver acpi_cpufreq_driver = { |
947 | .verify = cpufreq_generic_frequency_table_verify, |
948 | .target_index = acpi_cpufreq_target, |
949 | .fast_switch = acpi_cpufreq_fast_switch, |
950 | .bios_limit = acpi_processor_get_bios_limit, |
951 | .init = acpi_cpufreq_cpu_init, |
952 | .exit = acpi_cpufreq_cpu_exit, |
953 | .resume = acpi_cpufreq_resume, |
954 | .name = "acpi-cpufreq" , |
955 | .attr = acpi_cpufreq_attr, |
956 | }; |
957 | |
958 | static void __init acpi_cpufreq_boost_init(void) |
959 | { |
960 | if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) { |
961 | pr_debug("Boost capabilities not present in the processor\n" ); |
962 | return; |
963 | } |
964 | |
965 | acpi_cpufreq_driver.set_boost = set_boost; |
966 | acpi_cpufreq_driver.boost_enabled = boost_state(cpu: 0); |
967 | } |
968 | |
969 | static int __init acpi_cpufreq_probe(struct platform_device *pdev) |
970 | { |
971 | int ret; |
972 | |
973 | if (acpi_disabled) |
974 | return -ENODEV; |
975 | |
976 | /* don't keep reloading if cpufreq_driver exists */ |
977 | if (cpufreq_get_current_driver()) |
978 | return -ENODEV; |
979 | |
980 | pr_debug("%s\n" , __func__); |
981 | |
982 | ret = acpi_cpufreq_early_init(); |
983 | if (ret) |
984 | return ret; |
985 | |
986 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
987 | /* this is a sysfs file with a strange name and an even stranger |
988 | * semantic - per CPU instantiation, but system global effect. |
989 | * Lets enable it only on AMD CPUs for compatibility reasons and |
990 | * only if configured. This is considered legacy code, which |
991 | * will probably be removed at some point in the future. |
992 | */ |
993 | if (!check_amd_hwpstate_cpu(cpuid: 0)) { |
994 | struct freq_attr **attr; |
995 | |
996 | pr_debug("CPB unsupported, do not expose it\n" ); |
997 | |
998 | for (attr = acpi_cpufreq_attr; *attr; attr++) |
999 | if (*attr == &cpb) { |
1000 | *attr = NULL; |
1001 | break; |
1002 | } |
1003 | } |
1004 | #endif |
1005 | acpi_cpufreq_boost_init(); |
1006 | |
1007 | ret = cpufreq_register_driver(driver_data: &acpi_cpufreq_driver); |
1008 | if (ret) { |
1009 | free_acpi_perf_data(); |
1010 | } |
1011 | return ret; |
1012 | } |
1013 | |
1014 | static void acpi_cpufreq_remove(struct platform_device *pdev) |
1015 | { |
1016 | pr_debug("%s\n" , __func__); |
1017 | |
1018 | cpufreq_unregister_driver(driver_data: &acpi_cpufreq_driver); |
1019 | |
1020 | free_acpi_perf_data(); |
1021 | } |
1022 | |
1023 | static struct platform_driver acpi_cpufreq_platdrv = { |
1024 | .driver = { |
1025 | .name = "acpi-cpufreq" , |
1026 | }, |
1027 | .remove_new = acpi_cpufreq_remove, |
1028 | }; |
1029 | |
1030 | static int __init acpi_cpufreq_init(void) |
1031 | { |
1032 | return platform_driver_probe(&acpi_cpufreq_platdrv, acpi_cpufreq_probe); |
1033 | } |
1034 | |
1035 | static void __exit acpi_cpufreq_exit(void) |
1036 | { |
1037 | platform_driver_unregister(&acpi_cpufreq_platdrv); |
1038 | } |
1039 | |
1040 | module_param(acpi_pstate_strict, uint, 0644); |
1041 | MODULE_PARM_DESC(acpi_pstate_strict, |
1042 | "value 0 or non-zero. non-zero -> strict ACPI checks are " |
1043 | "performed during frequency changes." ); |
1044 | |
1045 | late_initcall(acpi_cpufreq_init); |
1046 | module_exit(acpi_cpufreq_exit); |
1047 | |
1048 | MODULE_ALIAS("platform:acpi-cpufreq" ); |
1049 | |