1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * processor_throttling.c - Throttling submodule of the ACPI processor driver |
4 | * |
5 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
6 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
7 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> |
8 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
9 | * - Added processor hotplug support |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "ACPI: " fmt |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/init.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/cpufreq.h> |
20 | #include <linux/acpi.h> |
21 | #include <acpi/processor.h> |
22 | #include <asm/io.h> |
23 | #include <linux/uaccess.h> |
24 | |
25 | /* ignore_tpc: |
26 | * 0 -> acpi processor driver doesn't ignore _TPC values |
27 | * 1 -> acpi processor driver ignores _TPC values |
28 | */ |
29 | static int ignore_tpc; |
30 | module_param(ignore_tpc, int, 0644); |
31 | MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support" ); |
32 | |
33 | struct throttling_tstate { |
34 | unsigned int cpu; /* cpu nr */ |
35 | int target_state; /* target T-state */ |
36 | }; |
37 | |
38 | struct acpi_processor_throttling_arg { |
39 | struct acpi_processor *pr; |
40 | int target_state; |
41 | bool force; |
42 | }; |
43 | |
44 | #define THROTTLING_PRECHANGE (1) |
45 | #define THROTTLING_POSTCHANGE (2) |
46 | |
47 | static int acpi_processor_get_throttling(struct acpi_processor *pr); |
48 | static int __acpi_processor_set_throttling(struct acpi_processor *pr, |
49 | int state, bool force, bool direct); |
50 | |
51 | static int acpi_processor_update_tsd_coord(void) |
52 | { |
53 | int count_target; |
54 | int retval = 0; |
55 | unsigned int i, j; |
56 | cpumask_var_t covered_cpus; |
57 | struct acpi_processor *pr, *match_pr; |
58 | struct acpi_tsd_package *pdomain, *match_pdomain; |
59 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; |
60 | |
61 | if (!zalloc_cpumask_var(mask: &covered_cpus, GFP_KERNEL)) |
62 | return -ENOMEM; |
63 | |
64 | /* |
65 | * Now that we have _TSD data from all CPUs, lets setup T-state |
66 | * coordination between all CPUs. |
67 | */ |
68 | for_each_possible_cpu(i) { |
69 | pr = per_cpu(processors, i); |
70 | if (!pr) |
71 | continue; |
72 | |
73 | /* Basic validity check for domain info */ |
74 | pthrottling = &(pr->throttling); |
75 | |
76 | /* |
77 | * If tsd package for one cpu is invalid, the coordination |
78 | * among all CPUs is thought as invalid. |
79 | * Maybe it is ugly. |
80 | */ |
81 | if (!pthrottling->tsd_valid_flag) { |
82 | retval = -EINVAL; |
83 | break; |
84 | } |
85 | } |
86 | if (retval) |
87 | goto err_ret; |
88 | |
89 | for_each_possible_cpu(i) { |
90 | pr = per_cpu(processors, i); |
91 | if (!pr) |
92 | continue; |
93 | |
94 | if (cpumask_test_cpu(cpu: i, cpumask: covered_cpus)) |
95 | continue; |
96 | pthrottling = &pr->throttling; |
97 | |
98 | pdomain = &(pthrottling->domain_info); |
99 | cpumask_set_cpu(cpu: i, dstp: pthrottling->shared_cpu_map); |
100 | cpumask_set_cpu(cpu: i, dstp: covered_cpus); |
101 | /* |
102 | * If the number of processor in the TSD domain is 1, it is |
103 | * unnecessary to parse the coordination for this CPU. |
104 | */ |
105 | if (pdomain->num_processors <= 1) |
106 | continue; |
107 | |
108 | /* Validate the Domain info */ |
109 | count_target = pdomain->num_processors; |
110 | |
111 | for_each_possible_cpu(j) { |
112 | if (i == j) |
113 | continue; |
114 | |
115 | match_pr = per_cpu(processors, j); |
116 | if (!match_pr) |
117 | continue; |
118 | |
119 | match_pthrottling = &(match_pr->throttling); |
120 | match_pdomain = &(match_pthrottling->domain_info); |
121 | if (match_pdomain->domain != pdomain->domain) |
122 | continue; |
123 | |
124 | /* Here i and j are in the same domain. |
125 | * If two TSD packages have the same domain, they |
126 | * should have the same num_porcessors and |
127 | * coordination type. Otherwise it will be regarded |
128 | * as illegal. |
129 | */ |
130 | if (match_pdomain->num_processors != count_target) { |
131 | retval = -EINVAL; |
132 | goto err_ret; |
133 | } |
134 | |
135 | if (pdomain->coord_type != match_pdomain->coord_type) { |
136 | retval = -EINVAL; |
137 | goto err_ret; |
138 | } |
139 | |
140 | cpumask_set_cpu(cpu: j, dstp: covered_cpus); |
141 | cpumask_set_cpu(cpu: j, dstp: pthrottling->shared_cpu_map); |
142 | } |
143 | for_each_possible_cpu(j) { |
144 | if (i == j) |
145 | continue; |
146 | |
147 | match_pr = per_cpu(processors, j); |
148 | if (!match_pr) |
149 | continue; |
150 | |
151 | match_pthrottling = &(match_pr->throttling); |
152 | match_pdomain = &(match_pthrottling->domain_info); |
153 | if (match_pdomain->domain != pdomain->domain) |
154 | continue; |
155 | |
156 | /* |
157 | * If some CPUS have the same domain, they |
158 | * will have the same shared_cpu_map. |
159 | */ |
160 | cpumask_copy(dstp: match_pthrottling->shared_cpu_map, |
161 | srcp: pthrottling->shared_cpu_map); |
162 | } |
163 | } |
164 | |
165 | err_ret: |
166 | free_cpumask_var(mask: covered_cpus); |
167 | |
168 | for_each_possible_cpu(i) { |
169 | pr = per_cpu(processors, i); |
170 | if (!pr) |
171 | continue; |
172 | |
173 | /* |
174 | * Assume no coordination on any error parsing domain info. |
175 | * The coordination type will be forced as SW_ALL. |
176 | */ |
177 | if (retval) { |
178 | pthrottling = &(pr->throttling); |
179 | cpumask_clear(dstp: pthrottling->shared_cpu_map); |
180 | cpumask_set_cpu(cpu: i, dstp: pthrottling->shared_cpu_map); |
181 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
182 | } |
183 | } |
184 | |
185 | return retval; |
186 | } |
187 | |
188 | /* |
189 | * Update the T-state coordination after the _TSD |
190 | * data for all cpus is obtained. |
191 | */ |
192 | void acpi_processor_throttling_init(void) |
193 | { |
194 | if (acpi_processor_update_tsd_coord()) |
195 | pr_debug("Assume no T-state coordination\n" ); |
196 | } |
197 | |
198 | static int acpi_processor_throttling_notifier(unsigned long event, void *data) |
199 | { |
200 | struct throttling_tstate *p_tstate = data; |
201 | struct acpi_processor *pr; |
202 | unsigned int cpu; |
203 | int target_state; |
204 | struct acpi_processor_limit *p_limit; |
205 | struct acpi_processor_throttling *p_throttling; |
206 | |
207 | cpu = p_tstate->cpu; |
208 | pr = per_cpu(processors, cpu); |
209 | if (!pr) { |
210 | pr_debug("Invalid pr pointer\n" ); |
211 | return 0; |
212 | } |
213 | if (!pr->flags.throttling) { |
214 | acpi_handle_debug(pr->handle, |
215 | "Throttling control unsupported on CPU %d\n" , |
216 | cpu); |
217 | return 0; |
218 | } |
219 | target_state = p_tstate->target_state; |
220 | p_throttling = &(pr->throttling); |
221 | switch (event) { |
222 | case THROTTLING_PRECHANGE: |
223 | /* |
224 | * Prechange event is used to choose one proper t-state, |
225 | * which meets the limits of thermal, user and _TPC. |
226 | */ |
227 | p_limit = &pr->limit; |
228 | if (p_limit->thermal.tx > target_state) |
229 | target_state = p_limit->thermal.tx; |
230 | if (p_limit->user.tx > target_state) |
231 | target_state = p_limit->user.tx; |
232 | if (pr->throttling_platform_limit > target_state) |
233 | target_state = pr->throttling_platform_limit; |
234 | if (target_state >= p_throttling->state_count) { |
235 | pr_warn("Exceed the limit of T-state \n" ); |
236 | target_state = p_throttling->state_count - 1; |
237 | } |
238 | p_tstate->target_state = target_state; |
239 | acpi_handle_debug(pr->handle, |
240 | "PreChange Event: target T-state of CPU %d is T%d\n" , |
241 | cpu, target_state); |
242 | break; |
243 | case THROTTLING_POSTCHANGE: |
244 | /* |
245 | * Postchange event is only used to update the |
246 | * T-state flag of acpi_processor_throttling. |
247 | */ |
248 | p_throttling->state = target_state; |
249 | acpi_handle_debug(pr->handle, |
250 | "PostChange Event: CPU %d is switched to T%d\n" , |
251 | cpu, target_state); |
252 | break; |
253 | default: |
254 | pr_warn("Unsupported Throttling notifier event\n" ); |
255 | break; |
256 | } |
257 | |
258 | return 0; |
259 | } |
260 | |
261 | /* |
262 | * _TPC - Throttling Present Capabilities |
263 | */ |
264 | static int acpi_processor_get_platform_limit(struct acpi_processor *pr) |
265 | { |
266 | acpi_status status = 0; |
267 | unsigned long long tpc = 0; |
268 | |
269 | if (!pr) |
270 | return -EINVAL; |
271 | |
272 | if (ignore_tpc) |
273 | goto end; |
274 | |
275 | status = acpi_evaluate_integer(handle: pr->handle, pathname: "_TPC" , NULL, data: &tpc); |
276 | if (ACPI_FAILURE(status)) { |
277 | if (status != AE_NOT_FOUND) |
278 | acpi_evaluation_failure_warn(handle: pr->handle, name: "_TPC" , status); |
279 | |
280 | return -ENODEV; |
281 | } |
282 | |
283 | end: |
284 | pr->throttling_platform_limit = (int)tpc; |
285 | return 0; |
286 | } |
287 | |
288 | int acpi_processor_tstate_has_changed(struct acpi_processor *pr) |
289 | { |
290 | int result = 0; |
291 | int throttling_limit; |
292 | int current_state; |
293 | struct acpi_processor_limit *limit; |
294 | int target_state; |
295 | |
296 | if (ignore_tpc) |
297 | return 0; |
298 | |
299 | result = acpi_processor_get_platform_limit(pr); |
300 | if (result) { |
301 | /* Throttling Limit is unsupported */ |
302 | return result; |
303 | } |
304 | |
305 | throttling_limit = pr->throttling_platform_limit; |
306 | if (throttling_limit >= pr->throttling.state_count) { |
307 | /* Uncorrect Throttling Limit */ |
308 | return -EINVAL; |
309 | } |
310 | |
311 | current_state = pr->throttling.state; |
312 | if (current_state > throttling_limit) { |
313 | /* |
314 | * The current state can meet the requirement of |
315 | * _TPC limit. But it is reasonable that OSPM changes |
316 | * t-states from high to low for better performance. |
317 | * Of course the limit condition of thermal |
318 | * and user should be considered. |
319 | */ |
320 | limit = &pr->limit; |
321 | target_state = throttling_limit; |
322 | if (limit->thermal.tx > target_state) |
323 | target_state = limit->thermal.tx; |
324 | if (limit->user.tx > target_state) |
325 | target_state = limit->user.tx; |
326 | } else if (current_state == throttling_limit) { |
327 | /* |
328 | * Unnecessary to change the throttling state |
329 | */ |
330 | return 0; |
331 | } else { |
332 | /* |
333 | * If the current state is lower than the limit of _TPC, it |
334 | * will be forced to switch to the throttling state defined |
335 | * by throttling_platfor_limit. |
336 | * Because the previous state meets with the limit condition |
337 | * of thermal and user, it is unnecessary to check it again. |
338 | */ |
339 | target_state = throttling_limit; |
340 | } |
341 | return acpi_processor_set_throttling(pr, state: target_state, force: false); |
342 | } |
343 | |
344 | /* |
345 | * This function is used to reevaluate whether the T-state is valid |
346 | * after one CPU is onlined/offlined. |
347 | * It is noted that it won't reevaluate the following properties for |
348 | * the T-state. |
349 | * 1. Control method. |
350 | * 2. the number of supported T-state |
351 | * 3. TSD domain |
352 | */ |
353 | void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, |
354 | bool is_dead) |
355 | { |
356 | int result = 0; |
357 | |
358 | if (is_dead) { |
359 | /* When one CPU is offline, the T-state throttling |
360 | * will be invalidated. |
361 | */ |
362 | pr->flags.throttling = 0; |
363 | return; |
364 | } |
365 | /* the following is to recheck whether the T-state is valid for |
366 | * the online CPU |
367 | */ |
368 | if (!pr->throttling.state_count) { |
369 | /* If the number of T-state is invalid, it is |
370 | * invalidated. |
371 | */ |
372 | pr->flags.throttling = 0; |
373 | return; |
374 | } |
375 | pr->flags.throttling = 1; |
376 | |
377 | /* Disable throttling (if enabled). We'll let subsequent |
378 | * policy (e.g.thermal) decide to lower performance if it |
379 | * so chooses, but for now we'll crank up the speed. |
380 | */ |
381 | |
382 | result = acpi_processor_get_throttling(pr); |
383 | if (result) |
384 | goto end; |
385 | |
386 | if (pr->throttling.state) { |
387 | result = acpi_processor_set_throttling(pr, state: 0, force: false); |
388 | if (result) |
389 | goto end; |
390 | } |
391 | |
392 | end: |
393 | if (result) |
394 | pr->flags.throttling = 0; |
395 | } |
396 | /* |
397 | * _PTC - Processor Throttling Control (and status) register location |
398 | */ |
399 | static int acpi_processor_get_throttling_control(struct acpi_processor *pr) |
400 | { |
401 | int result = 0; |
402 | acpi_status status = 0; |
403 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
404 | union acpi_object *ptc = NULL; |
405 | union acpi_object obj; |
406 | struct acpi_processor_throttling *throttling; |
407 | |
408 | status = acpi_evaluate_object(object: pr->handle, pathname: "_PTC" , NULL, return_object_buffer: &buffer); |
409 | if (ACPI_FAILURE(status)) { |
410 | if (status != AE_NOT_FOUND) |
411 | acpi_evaluation_failure_warn(handle: pr->handle, name: "_PTC" , status); |
412 | |
413 | return -ENODEV; |
414 | } |
415 | |
416 | ptc = (union acpi_object *)buffer.pointer; |
417 | if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) |
418 | || (ptc->package.count != 2)) { |
419 | pr_err("Invalid _PTC data\n" ); |
420 | result = -EFAULT; |
421 | goto end; |
422 | } |
423 | |
424 | /* |
425 | * control_register |
426 | */ |
427 | |
428 | obj = ptc->package.elements[0]; |
429 | |
430 | if ((obj.type != ACPI_TYPE_BUFFER) |
431 | || (obj.buffer.length < sizeof(struct acpi_ptc_register)) |
432 | || (obj.buffer.pointer == NULL)) { |
433 | pr_err("Invalid _PTC data (control_register)\n" ); |
434 | result = -EFAULT; |
435 | goto end; |
436 | } |
437 | memcpy(&pr->throttling.control_register, obj.buffer.pointer, |
438 | sizeof(struct acpi_ptc_register)); |
439 | |
440 | /* |
441 | * status_register |
442 | */ |
443 | |
444 | obj = ptc->package.elements[1]; |
445 | |
446 | if ((obj.type != ACPI_TYPE_BUFFER) |
447 | || (obj.buffer.length < sizeof(struct acpi_ptc_register)) |
448 | || (obj.buffer.pointer == NULL)) { |
449 | pr_err("Invalid _PTC data (status_register)\n" ); |
450 | result = -EFAULT; |
451 | goto end; |
452 | } |
453 | |
454 | memcpy(&pr->throttling.status_register, obj.buffer.pointer, |
455 | sizeof(struct acpi_ptc_register)); |
456 | |
457 | throttling = &pr->throttling; |
458 | |
459 | if ((throttling->control_register.bit_width + |
460 | throttling->control_register.bit_offset) > 32) { |
461 | pr_err("Invalid _PTC control register\n" ); |
462 | result = -EFAULT; |
463 | goto end; |
464 | } |
465 | |
466 | if ((throttling->status_register.bit_width + |
467 | throttling->status_register.bit_offset) > 32) { |
468 | pr_err("Invalid _PTC status register\n" ); |
469 | result = -EFAULT; |
470 | goto end; |
471 | } |
472 | |
473 | end: |
474 | kfree(objp: buffer.pointer); |
475 | |
476 | return result; |
477 | } |
478 | |
479 | /* |
480 | * _TSS - Throttling Supported States |
481 | */ |
482 | static int acpi_processor_get_throttling_states(struct acpi_processor *pr) |
483 | { |
484 | int result = 0; |
485 | acpi_status status = AE_OK; |
486 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
487 | struct acpi_buffer format = { sizeof("NNNNN" ), "NNNNN" }; |
488 | struct acpi_buffer state = { 0, NULL }; |
489 | union acpi_object *tss = NULL; |
490 | int i; |
491 | |
492 | status = acpi_evaluate_object(object: pr->handle, pathname: "_TSS" , NULL, return_object_buffer: &buffer); |
493 | if (ACPI_FAILURE(status)) { |
494 | if (status != AE_NOT_FOUND) |
495 | acpi_evaluation_failure_warn(handle: pr->handle, name: "_TSS" , status); |
496 | |
497 | return -ENODEV; |
498 | } |
499 | |
500 | tss = buffer.pointer; |
501 | if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { |
502 | pr_err("Invalid _TSS data\n" ); |
503 | result = -EFAULT; |
504 | goto end; |
505 | } |
506 | |
507 | acpi_handle_debug(pr->handle, "Found %d throttling states\n" , |
508 | tss->package.count); |
509 | |
510 | pr->throttling.state_count = tss->package.count; |
511 | pr->throttling.states_tss = |
512 | kmalloc_array(n: tss->package.count, |
513 | size: sizeof(struct acpi_processor_tx_tss), |
514 | GFP_KERNEL); |
515 | if (!pr->throttling.states_tss) { |
516 | result = -ENOMEM; |
517 | goto end; |
518 | } |
519 | |
520 | for (i = 0; i < pr->throttling.state_count; i++) { |
521 | |
522 | struct acpi_processor_tx_tss *tx = |
523 | (struct acpi_processor_tx_tss *)&(pr->throttling. |
524 | states_tss[i]); |
525 | |
526 | state.length = sizeof(struct acpi_processor_tx_tss); |
527 | state.pointer = tx; |
528 | |
529 | acpi_handle_debug(pr->handle, "Extracting state %d\n" , i); |
530 | |
531 | status = acpi_extract_package(package: &(tss->package.elements[i]), |
532 | format: &format, buffer: &state); |
533 | if (ACPI_FAILURE(status)) { |
534 | acpi_handle_warn(pr->handle, "Invalid _TSS data: %s\n" , |
535 | acpi_format_exception(status)); |
536 | result = -EFAULT; |
537 | kfree(objp: pr->throttling.states_tss); |
538 | goto end; |
539 | } |
540 | |
541 | if (!tx->freqpercentage) { |
542 | pr_err("Invalid _TSS data: freq is zero\n" ); |
543 | result = -EFAULT; |
544 | kfree(objp: pr->throttling.states_tss); |
545 | goto end; |
546 | } |
547 | } |
548 | |
549 | end: |
550 | kfree(objp: buffer.pointer); |
551 | |
552 | return result; |
553 | } |
554 | |
555 | /* |
556 | * _TSD - T-State Dependencies |
557 | */ |
558 | static int acpi_processor_get_tsd(struct acpi_processor *pr) |
559 | { |
560 | int result = 0; |
561 | acpi_status status = AE_OK; |
562 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
563 | struct acpi_buffer format = { sizeof("NNNNN" ), "NNNNN" }; |
564 | struct acpi_buffer state = { 0, NULL }; |
565 | union acpi_object *tsd = NULL; |
566 | struct acpi_tsd_package *pdomain; |
567 | struct acpi_processor_throttling *pthrottling; |
568 | |
569 | pthrottling = &pr->throttling; |
570 | pthrottling->tsd_valid_flag = 0; |
571 | |
572 | status = acpi_evaluate_object(object: pr->handle, pathname: "_TSD" , NULL, return_object_buffer: &buffer); |
573 | if (ACPI_FAILURE(status)) { |
574 | if (status != AE_NOT_FOUND) |
575 | acpi_evaluation_failure_warn(handle: pr->handle, name: "_TSD" , status); |
576 | |
577 | return -ENODEV; |
578 | } |
579 | |
580 | tsd = buffer.pointer; |
581 | if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { |
582 | pr_err("Invalid _TSD data\n" ); |
583 | result = -EFAULT; |
584 | goto end; |
585 | } |
586 | |
587 | if (tsd->package.count != 1) { |
588 | pr_err("Invalid _TSD data\n" ); |
589 | result = -EFAULT; |
590 | goto end; |
591 | } |
592 | |
593 | pdomain = &(pr->throttling.domain_info); |
594 | |
595 | state.length = sizeof(struct acpi_tsd_package); |
596 | state.pointer = pdomain; |
597 | |
598 | status = acpi_extract_package(package: &(tsd->package.elements[0]), |
599 | format: &format, buffer: &state); |
600 | if (ACPI_FAILURE(status)) { |
601 | pr_err("Invalid _TSD data\n" ); |
602 | result = -EFAULT; |
603 | goto end; |
604 | } |
605 | |
606 | if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { |
607 | pr_err("Unknown _TSD:num_entries\n" ); |
608 | result = -EFAULT; |
609 | goto end; |
610 | } |
611 | |
612 | if (pdomain->revision != ACPI_TSD_REV0_REVISION) { |
613 | pr_err("Unknown _TSD:revision\n" ); |
614 | result = -EFAULT; |
615 | goto end; |
616 | } |
617 | |
618 | pthrottling = &pr->throttling; |
619 | pthrottling->tsd_valid_flag = 1; |
620 | pthrottling->shared_type = pdomain->coord_type; |
621 | cpumask_set_cpu(cpu: pr->id, dstp: pthrottling->shared_cpu_map); |
622 | /* |
623 | * If the coordination type is not defined in ACPI spec, |
624 | * the tsd_valid_flag will be clear and coordination type |
625 | * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. |
626 | */ |
627 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && |
628 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && |
629 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { |
630 | pthrottling->tsd_valid_flag = 0; |
631 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
632 | } |
633 | |
634 | end: |
635 | kfree(objp: buffer.pointer); |
636 | return result; |
637 | } |
638 | |
639 | /* -------------------------------------------------------------------------- |
640 | Throttling Control |
641 | -------------------------------------------------------------------------- */ |
642 | static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) |
643 | { |
644 | int state = 0; |
645 | u32 value = 0; |
646 | u32 duty_mask = 0; |
647 | u32 duty_value = 0; |
648 | |
649 | if (!pr) |
650 | return -EINVAL; |
651 | |
652 | if (!pr->flags.throttling) |
653 | return -ENODEV; |
654 | |
655 | /* |
656 | * We don't care about error returns - we just try to mark |
657 | * these reserved so that nobody else is confused into thinking |
658 | * that this region might be unused.. |
659 | * |
660 | * (In particular, allocating the IO range for Cardbus) |
661 | */ |
662 | request_region(pr->throttling.address, 6, "ACPI CPU throttle" ); |
663 | |
664 | pr->throttling.state = 0; |
665 | |
666 | duty_mask = pr->throttling.state_count - 1; |
667 | |
668 | duty_mask <<= pr->throttling.duty_offset; |
669 | |
670 | local_irq_disable(); |
671 | |
672 | value = inl(port: pr->throttling.address); |
673 | |
674 | /* |
675 | * Compute the current throttling state when throttling is enabled |
676 | * (bit 4 is on). |
677 | */ |
678 | if (value & 0x10) { |
679 | duty_value = value & duty_mask; |
680 | duty_value >>= pr->throttling.duty_offset; |
681 | |
682 | if (duty_value) |
683 | state = pr->throttling.state_count - duty_value; |
684 | } |
685 | |
686 | pr->throttling.state = state; |
687 | |
688 | local_irq_enable(); |
689 | |
690 | acpi_handle_debug(pr->handle, |
691 | "Throttling state is T%d (%d%% throttling applied)\n" , |
692 | state, pr->throttling.states[state].performance); |
693 | |
694 | return 0; |
695 | } |
696 | |
697 | #ifdef CONFIG_X86 |
698 | static int acpi_throttling_rdmsr(u64 *value) |
699 | { |
700 | u64 msr_high, msr_low; |
701 | u64 msr = 0; |
702 | int ret = -1; |
703 | |
704 | if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || |
705 | !this_cpu_has(X86_FEATURE_ACPI)) { |
706 | pr_err("HARDWARE addr space,NOT supported yet\n" ); |
707 | } else { |
708 | msr_low = 0; |
709 | msr_high = 0; |
710 | rdmsr_safe(MSR_IA32_THERM_CONTROL, |
711 | (u32 *)&msr_low, (u32 *) &msr_high); |
712 | msr = (msr_high << 32) | msr_low; |
713 | *value = (u64) msr; |
714 | ret = 0; |
715 | } |
716 | return ret; |
717 | } |
718 | |
719 | static int acpi_throttling_wrmsr(u64 value) |
720 | { |
721 | int ret = -1; |
722 | u64 msr; |
723 | |
724 | if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || |
725 | !this_cpu_has(X86_FEATURE_ACPI)) { |
726 | pr_err("HARDWARE addr space,NOT supported yet\n" ); |
727 | } else { |
728 | msr = value; |
729 | wrmsr_safe(MSR_IA32_THERM_CONTROL, |
730 | msr & 0xffffffff, msr >> 32); |
731 | ret = 0; |
732 | } |
733 | return ret; |
734 | } |
735 | #else |
736 | static int acpi_throttling_rdmsr(u64 *value) |
737 | { |
738 | pr_err("HARDWARE addr space,NOT supported yet\n" ); |
739 | return -1; |
740 | } |
741 | |
742 | static int acpi_throttling_wrmsr(u64 value) |
743 | { |
744 | pr_err("HARDWARE addr space,NOT supported yet\n" ); |
745 | return -1; |
746 | } |
747 | #endif |
748 | |
749 | static int acpi_read_throttling_status(struct acpi_processor *pr, |
750 | u64 *value) |
751 | { |
752 | u32 bit_width, bit_offset; |
753 | u32 ptc_value; |
754 | u64 ptc_mask; |
755 | struct acpi_processor_throttling *throttling; |
756 | int ret = -1; |
757 | |
758 | throttling = &pr->throttling; |
759 | switch (throttling->status_register.space_id) { |
760 | case ACPI_ADR_SPACE_SYSTEM_IO: |
761 | bit_width = throttling->status_register.bit_width; |
762 | bit_offset = throttling->status_register.bit_offset; |
763 | |
764 | acpi_os_read_port(address: (acpi_io_address) throttling->status_register. |
765 | address, value: &ptc_value, |
766 | width: (u32) (bit_width + bit_offset)); |
767 | ptc_mask = (1 << bit_width) - 1; |
768 | *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); |
769 | ret = 0; |
770 | break; |
771 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
772 | ret = acpi_throttling_rdmsr(value); |
773 | break; |
774 | default: |
775 | pr_err("Unknown addr space %d\n" , |
776 | (u32) (throttling->status_register.space_id)); |
777 | } |
778 | return ret; |
779 | } |
780 | |
781 | static int acpi_write_throttling_state(struct acpi_processor *pr, |
782 | u64 value) |
783 | { |
784 | u32 bit_width, bit_offset; |
785 | u64 ptc_value; |
786 | u64 ptc_mask; |
787 | struct acpi_processor_throttling *throttling; |
788 | int ret = -1; |
789 | |
790 | throttling = &pr->throttling; |
791 | switch (throttling->control_register.space_id) { |
792 | case ACPI_ADR_SPACE_SYSTEM_IO: |
793 | bit_width = throttling->control_register.bit_width; |
794 | bit_offset = throttling->control_register.bit_offset; |
795 | ptc_mask = (1 << bit_width) - 1; |
796 | ptc_value = value & ptc_mask; |
797 | |
798 | acpi_os_write_port(address: (acpi_io_address) throttling-> |
799 | control_register.address, |
800 | value: (u32) (ptc_value << bit_offset), |
801 | width: (u32) (bit_width + bit_offset)); |
802 | ret = 0; |
803 | break; |
804 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
805 | ret = acpi_throttling_wrmsr(value); |
806 | break; |
807 | default: |
808 | pr_err("Unknown addr space %d\n" , |
809 | (u32) (throttling->control_register.space_id)); |
810 | } |
811 | return ret; |
812 | } |
813 | |
814 | static int acpi_get_throttling_state(struct acpi_processor *pr, |
815 | u64 value) |
816 | { |
817 | int i; |
818 | |
819 | for (i = 0; i < pr->throttling.state_count; i++) { |
820 | struct acpi_processor_tx_tss *tx = |
821 | (struct acpi_processor_tx_tss *)&(pr->throttling. |
822 | states_tss[i]); |
823 | if (tx->control == value) |
824 | return i; |
825 | } |
826 | return -1; |
827 | } |
828 | |
829 | static int acpi_get_throttling_value(struct acpi_processor *pr, |
830 | int state, u64 *value) |
831 | { |
832 | int ret = -1; |
833 | |
834 | if (state >= 0 && state <= pr->throttling.state_count) { |
835 | struct acpi_processor_tx_tss *tx = |
836 | (struct acpi_processor_tx_tss *)&(pr->throttling. |
837 | states_tss[state]); |
838 | *value = tx->control; |
839 | ret = 0; |
840 | } |
841 | return ret; |
842 | } |
843 | |
844 | static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) |
845 | { |
846 | int state = 0; |
847 | int ret; |
848 | u64 value; |
849 | |
850 | if (!pr) |
851 | return -EINVAL; |
852 | |
853 | if (!pr->flags.throttling) |
854 | return -ENODEV; |
855 | |
856 | pr->throttling.state = 0; |
857 | |
858 | value = 0; |
859 | ret = acpi_read_throttling_status(pr, value: &value); |
860 | if (ret >= 0) { |
861 | state = acpi_get_throttling_state(pr, value); |
862 | if (state == -1) { |
863 | acpi_handle_debug(pr->handle, |
864 | "Invalid throttling state, reset\n" ); |
865 | state = 0; |
866 | ret = __acpi_processor_set_throttling(pr, state, force: true, |
867 | direct: true); |
868 | if (ret) |
869 | return ret; |
870 | } |
871 | pr->throttling.state = state; |
872 | } |
873 | |
874 | return 0; |
875 | } |
876 | |
877 | static long __acpi_processor_get_throttling(void *data) |
878 | { |
879 | struct acpi_processor *pr = data; |
880 | |
881 | return pr->throttling.acpi_processor_get_throttling(pr); |
882 | } |
883 | |
884 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
885 | { |
886 | if (!pr) |
887 | return -EINVAL; |
888 | |
889 | if (!pr->flags.throttling) |
890 | return -ENODEV; |
891 | |
892 | /* |
893 | * This is either called from the CPU hotplug callback of |
894 | * processor_driver or via the ACPI probe function. In the latter |
895 | * case the CPU is not guaranteed to be online. Both call sites are |
896 | * protected against CPU hotplug. |
897 | */ |
898 | if (!cpu_online(cpu: pr->id)) |
899 | return -ENODEV; |
900 | |
901 | return call_on_cpu(cpu: pr->id, fn: __acpi_processor_get_throttling, arg: pr, direct: false); |
902 | } |
903 | |
904 | static int acpi_processor_get_fadt_info(struct acpi_processor *pr) |
905 | { |
906 | int i, step; |
907 | |
908 | if (!pr->throttling.address) { |
909 | acpi_handle_debug(pr->handle, "No throttling register\n" ); |
910 | return -EINVAL; |
911 | } else if (!pr->throttling.duty_width) { |
912 | acpi_handle_debug(pr->handle, "No throttling states\n" ); |
913 | return -EINVAL; |
914 | } |
915 | /* TBD: Support duty_cycle values that span bit 4. */ |
916 | else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { |
917 | pr_warn("duty_cycle spans bit 4\n" ); |
918 | return -EINVAL; |
919 | } |
920 | |
921 | pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; |
922 | |
923 | /* |
924 | * Compute state values. Note that throttling displays a linear power |
925 | * performance relationship (at 50% performance the CPU will consume |
926 | * 50% power). Values are in 1/10th of a percent to preserve accuracy. |
927 | */ |
928 | |
929 | step = (1000 / pr->throttling.state_count); |
930 | |
931 | for (i = 0; i < pr->throttling.state_count; i++) { |
932 | pr->throttling.states[i].performance = 1000 - step * i; |
933 | pr->throttling.states[i].power = 1000 - step * i; |
934 | } |
935 | return 0; |
936 | } |
937 | |
938 | static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
939 | int state, bool force) |
940 | { |
941 | u32 value = 0; |
942 | u32 duty_mask = 0; |
943 | u32 duty_value = 0; |
944 | |
945 | if (!pr) |
946 | return -EINVAL; |
947 | |
948 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) |
949 | return -EINVAL; |
950 | |
951 | if (!pr->flags.throttling) |
952 | return -ENODEV; |
953 | |
954 | if (!force && (state == pr->throttling.state)) |
955 | return 0; |
956 | |
957 | if (state < pr->throttling_platform_limit) |
958 | return -EPERM; |
959 | /* |
960 | * Calculate the duty_value and duty_mask. |
961 | */ |
962 | if (state) { |
963 | duty_value = pr->throttling.state_count - state; |
964 | |
965 | duty_value <<= pr->throttling.duty_offset; |
966 | |
967 | /* Used to clear all duty_value bits */ |
968 | duty_mask = pr->throttling.state_count - 1; |
969 | |
970 | duty_mask <<= acpi_gbl_FADT.duty_offset; |
971 | duty_mask = ~duty_mask; |
972 | } |
973 | |
974 | local_irq_disable(); |
975 | |
976 | /* |
977 | * Disable throttling by writing a 0 to bit 4. Note that we must |
978 | * turn it off before you can change the duty_value. |
979 | */ |
980 | value = inl(port: pr->throttling.address); |
981 | if (value & 0x10) { |
982 | value &= 0xFFFFFFEF; |
983 | outl(value, port: pr->throttling.address); |
984 | } |
985 | |
986 | /* |
987 | * Write the new duty_value and then enable throttling. Note |
988 | * that a state value of 0 leaves throttling disabled. |
989 | */ |
990 | if (state) { |
991 | value &= duty_mask; |
992 | value |= duty_value; |
993 | outl(value, port: pr->throttling.address); |
994 | |
995 | value |= 0x00000010; |
996 | outl(value, port: pr->throttling.address); |
997 | } |
998 | |
999 | pr->throttling.state = state; |
1000 | |
1001 | local_irq_enable(); |
1002 | |
1003 | acpi_handle_debug(pr->handle, |
1004 | "Throttling state set to T%d (%d%%)\n" , state, |
1005 | (pr->throttling.states[state].performance ? pr-> |
1006 | throttling.states[state].performance / 10 : 0)); |
1007 | |
1008 | return 0; |
1009 | } |
1010 | |
1011 | static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
1012 | int state, bool force) |
1013 | { |
1014 | int ret; |
1015 | u64 value; |
1016 | |
1017 | if (!pr) |
1018 | return -EINVAL; |
1019 | |
1020 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) |
1021 | return -EINVAL; |
1022 | |
1023 | if (!pr->flags.throttling) |
1024 | return -ENODEV; |
1025 | |
1026 | if (!force && (state == pr->throttling.state)) |
1027 | return 0; |
1028 | |
1029 | if (state < pr->throttling_platform_limit) |
1030 | return -EPERM; |
1031 | |
1032 | value = 0; |
1033 | ret = acpi_get_throttling_value(pr, state, value: &value); |
1034 | if (ret >= 0) { |
1035 | acpi_write_throttling_state(pr, value); |
1036 | pr->throttling.state = state; |
1037 | } |
1038 | |
1039 | return 0; |
1040 | } |
1041 | |
1042 | static long acpi_processor_throttling_fn(void *data) |
1043 | { |
1044 | struct acpi_processor_throttling_arg *arg = data; |
1045 | struct acpi_processor *pr = arg->pr; |
1046 | |
1047 | return pr->throttling.acpi_processor_set_throttling(pr, |
1048 | arg->target_state, arg->force); |
1049 | } |
1050 | |
1051 | static int __acpi_processor_set_throttling(struct acpi_processor *pr, |
1052 | int state, bool force, bool direct) |
1053 | { |
1054 | int ret = 0; |
1055 | unsigned int i; |
1056 | struct acpi_processor *match_pr; |
1057 | struct acpi_processor_throttling *p_throttling; |
1058 | struct acpi_processor_throttling_arg arg; |
1059 | struct throttling_tstate t_state; |
1060 | |
1061 | if (!pr) |
1062 | return -EINVAL; |
1063 | |
1064 | if (!pr->flags.throttling) |
1065 | return -ENODEV; |
1066 | |
1067 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) |
1068 | return -EINVAL; |
1069 | |
1070 | if (cpu_is_offline(pr->id)) { |
1071 | /* |
1072 | * the cpu pointed by pr->id is offline. Unnecessary to change |
1073 | * the throttling state any more. |
1074 | */ |
1075 | return -ENODEV; |
1076 | } |
1077 | |
1078 | t_state.target_state = state; |
1079 | p_throttling = &(pr->throttling); |
1080 | |
1081 | /* |
1082 | * The throttling notifier will be called for every |
1083 | * affected cpu in order to get one proper T-state. |
1084 | * The notifier event is THROTTLING_PRECHANGE. |
1085 | */ |
1086 | for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { |
1087 | t_state.cpu = i; |
1088 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1089 | data: &t_state); |
1090 | } |
1091 | /* |
1092 | * The function of acpi_processor_set_throttling will be called |
1093 | * to switch T-state. If the coordination type is SW_ALL or HW_ALL, |
1094 | * it is necessary to call it for every affected cpu. Otherwise |
1095 | * it can be called only for the cpu pointed by pr. |
1096 | */ |
1097 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1098 | arg.pr = pr; |
1099 | arg.target_state = state; |
1100 | arg.force = force; |
1101 | ret = call_on_cpu(cpu: pr->id, fn: acpi_processor_throttling_fn, arg: &arg, |
1102 | direct); |
1103 | } else { |
1104 | /* |
1105 | * When the T-state coordination is SW_ALL or HW_ALL, |
1106 | * it is necessary to set T-state for every affected |
1107 | * cpus. |
1108 | */ |
1109 | for_each_cpu_and(i, cpu_online_mask, |
1110 | p_throttling->shared_cpu_map) { |
1111 | match_pr = per_cpu(processors, i); |
1112 | /* |
1113 | * If the pointer is invalid, we will report the |
1114 | * error message and continue. |
1115 | */ |
1116 | if (!match_pr) { |
1117 | acpi_handle_debug(pr->handle, |
1118 | "Invalid Pointer for CPU %d\n" , i); |
1119 | continue; |
1120 | } |
1121 | /* |
1122 | * If the throttling control is unsupported on CPU i, |
1123 | * we will report the error message and continue. |
1124 | */ |
1125 | if (!match_pr->flags.throttling) { |
1126 | acpi_handle_debug(pr->handle, |
1127 | "Throttling Control unsupported on CPU %d\n" , i); |
1128 | continue; |
1129 | } |
1130 | |
1131 | arg.pr = match_pr; |
1132 | arg.target_state = state; |
1133 | arg.force = force; |
1134 | ret = call_on_cpu(cpu: pr->id, fn: acpi_processor_throttling_fn, |
1135 | arg: &arg, direct); |
1136 | } |
1137 | } |
1138 | /* |
1139 | * After the set_throttling is called, the |
1140 | * throttling notifier is called for every |
1141 | * affected cpu to update the T-states. |
1142 | * The notifier event is THROTTLING_POSTCHANGE |
1143 | */ |
1144 | for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { |
1145 | t_state.cpu = i; |
1146 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1147 | data: &t_state); |
1148 | } |
1149 | |
1150 | return ret; |
1151 | } |
1152 | |
1153 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state, |
1154 | bool force) |
1155 | { |
1156 | return __acpi_processor_set_throttling(pr, state, force, direct: false); |
1157 | } |
1158 | |
1159 | int acpi_processor_get_throttling_info(struct acpi_processor *pr) |
1160 | { |
1161 | int result = 0; |
1162 | struct acpi_processor_throttling *pthrottling; |
1163 | |
1164 | acpi_handle_debug(pr->handle, |
1165 | "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n" , |
1166 | pr->throttling.address, |
1167 | pr->throttling.duty_offset, |
1168 | pr->throttling.duty_width); |
1169 | |
1170 | /* |
1171 | * Evaluate _PTC, _TSS and _TPC |
1172 | * They must all be present or none of them can be used. |
1173 | */ |
1174 | if (acpi_processor_get_throttling_control(pr) || |
1175 | acpi_processor_get_throttling_states(pr) || |
1176 | acpi_processor_get_platform_limit(pr)) { |
1177 | pr->throttling.acpi_processor_get_throttling = |
1178 | &acpi_processor_get_throttling_fadt; |
1179 | pr->throttling.acpi_processor_set_throttling = |
1180 | &acpi_processor_set_throttling_fadt; |
1181 | if (acpi_processor_get_fadt_info(pr)) |
1182 | return 0; |
1183 | } else { |
1184 | pr->throttling.acpi_processor_get_throttling = |
1185 | &acpi_processor_get_throttling_ptc; |
1186 | pr->throttling.acpi_processor_set_throttling = |
1187 | &acpi_processor_set_throttling_ptc; |
1188 | } |
1189 | |
1190 | /* |
1191 | * If TSD package for one CPU can't be parsed successfully, it means |
1192 | * that this CPU will have no coordination with other CPUs. |
1193 | */ |
1194 | if (acpi_processor_get_tsd(pr)) { |
1195 | pthrottling = &pr->throttling; |
1196 | pthrottling->tsd_valid_flag = 0; |
1197 | cpumask_set_cpu(cpu: pr->id, dstp: pthrottling->shared_cpu_map); |
1198 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
1199 | } |
1200 | |
1201 | /* |
1202 | * PIIX4 Errata: We don't support throttling on the original PIIX4. |
1203 | * This shouldn't be an issue as few (if any) mobile systems ever |
1204 | * used this part. |
1205 | */ |
1206 | if (errata.piix4.throttle) { |
1207 | acpi_handle_debug(pr->handle, |
1208 | "Throttling not supported on PIIX4 A- or B-step\n" ); |
1209 | return 0; |
1210 | } |
1211 | |
1212 | acpi_handle_debug(pr->handle, "Found %d throttling states\n" , |
1213 | pr->throttling.state_count); |
1214 | |
1215 | pr->flags.throttling = 1; |
1216 | |
1217 | /* |
1218 | * Disable throttling (if enabled). We'll let subsequent policy (e.g. |
1219 | * thermal) decide to lower performance if it so chooses, but for now |
1220 | * we'll crank up the speed. |
1221 | */ |
1222 | |
1223 | result = acpi_processor_get_throttling(pr); |
1224 | if (result) |
1225 | goto end; |
1226 | |
1227 | if (pr->throttling.state) { |
1228 | acpi_handle_debug(pr->handle, |
1229 | "Disabling throttling (was T%d)\n" , |
1230 | pr->throttling.state); |
1231 | result = acpi_processor_set_throttling(pr, state: 0, force: false); |
1232 | if (result) |
1233 | goto end; |
1234 | } |
1235 | |
1236 | end: |
1237 | if (result) |
1238 | pr->flags.throttling = 0; |
1239 | |
1240 | return result; |
1241 | } |
1242 | |
1243 | |