1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright (C) 2016 ARM Limited |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | |
9 | #include <linux/atomic.h> |
10 | #include <linux/completion.h> |
11 | #include <linux/cpu.h> |
12 | #include <linux/cpuidle.h> |
13 | #include <linux/cpu_pm.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/kthread.h> |
16 | #include <uapi/linux/sched/types.h> |
17 | #include <linux/module.h> |
18 | #include <linux/preempt.h> |
19 | #include <linux/psci.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/tick.h> |
22 | #include <linux/topology.h> |
23 | |
24 | #include <asm/cpuidle.h> |
25 | |
26 | #include <uapi/linux/psci.h> |
27 | |
28 | #define NUM_SUSPEND_CYCLE (10) |
29 | |
30 | static unsigned int nb_available_cpus; |
31 | static int tos_resident_cpu = -1; |
32 | |
33 | static atomic_t nb_active_threads; |
34 | static struct completion suspend_threads_started = |
35 | COMPLETION_INITIALIZER(suspend_threads_started); |
36 | static struct completion suspend_threads_done = |
37 | COMPLETION_INITIALIZER(suspend_threads_done); |
38 | |
39 | /* |
40 | * We assume that PSCI operations are used if they are available. This is not |
41 | * necessarily true on arm64, since the decision is based on the |
42 | * "enable-method" property of each CPU in the DT, but given that there is no |
43 | * arch-specific way to check this, we assume that the DT is sensible. |
44 | */ |
45 | static int psci_ops_check(void) |
46 | { |
47 | int migrate_type = -1; |
48 | int cpu; |
49 | |
50 | if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { |
51 | pr_warn("Missing PSCI operations, aborting tests\n" ); |
52 | return -EOPNOTSUPP; |
53 | } |
54 | |
55 | if (psci_ops.migrate_info_type) |
56 | migrate_type = psci_ops.migrate_info_type(); |
57 | |
58 | if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || |
59 | migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { |
60 | /* There is a UP Trusted OS, find on which core it resides. */ |
61 | for_each_online_cpu(cpu) |
62 | if (psci_tos_resident_on(cpu)) { |
63 | tos_resident_cpu = cpu; |
64 | break; |
65 | } |
66 | if (tos_resident_cpu == -1) |
67 | pr_warn("UP Trusted OS resides on no online CPU\n" ); |
68 | } |
69 | |
70 | return 0; |
71 | } |
72 | |
73 | /* |
74 | * offlined_cpus is a temporary array but passing it as an argument avoids |
75 | * multiple allocations. |
76 | */ |
77 | static unsigned int down_and_up_cpus(const struct cpumask *cpus, |
78 | struct cpumask *offlined_cpus) |
79 | { |
80 | int cpu; |
81 | int err = 0; |
82 | |
83 | cpumask_clear(dstp: offlined_cpus); |
84 | |
85 | /* Try to power down all CPUs in the mask. */ |
86 | for_each_cpu(cpu, cpus) { |
87 | int ret = remove_cpu(cpu); |
88 | |
89 | /* |
90 | * cpu_down() checks the number of online CPUs before the TOS |
91 | * resident CPU. |
92 | */ |
93 | if (cpumask_weight(srcp: offlined_cpus) + 1 == nb_available_cpus) { |
94 | if (ret != -EBUSY) { |
95 | pr_err("Unexpected return code %d while trying " |
96 | "to power down last online CPU %d\n" , |
97 | ret, cpu); |
98 | ++err; |
99 | } |
100 | } else if (cpu == tos_resident_cpu) { |
101 | if (ret != -EPERM) { |
102 | pr_err("Unexpected return code %d while trying " |
103 | "to power down TOS resident CPU %d\n" , |
104 | ret, cpu); |
105 | ++err; |
106 | } |
107 | } else if (ret != 0) { |
108 | pr_err("Error occurred (%d) while trying " |
109 | "to power down CPU %d\n" , ret, cpu); |
110 | ++err; |
111 | } |
112 | |
113 | if (ret == 0) |
114 | cpumask_set_cpu(cpu, dstp: offlined_cpus); |
115 | } |
116 | |
117 | /* Try to power up all the CPUs that have been offlined. */ |
118 | for_each_cpu(cpu, offlined_cpus) { |
119 | int ret = add_cpu(cpu); |
120 | |
121 | if (ret != 0) { |
122 | pr_err("Error occurred (%d) while trying " |
123 | "to power up CPU %d\n" , ret, cpu); |
124 | ++err; |
125 | } else { |
126 | cpumask_clear_cpu(cpu, dstp: offlined_cpus); |
127 | } |
128 | } |
129 | |
130 | /* |
131 | * Something went bad at some point and some CPUs could not be turned |
132 | * back on. |
133 | */ |
134 | WARN_ON(!cpumask_empty(offlined_cpus) || |
135 | num_online_cpus() != nb_available_cpus); |
136 | |
137 | return err; |
138 | } |
139 | |
140 | static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups) |
141 | { |
142 | int i; |
143 | cpumask_var_t *cpu_groups = *pcpu_groups; |
144 | |
145 | for (i = 0; i < num; ++i) |
146 | free_cpumask_var(mask: cpu_groups[i]); |
147 | kfree(objp: cpu_groups); |
148 | } |
149 | |
150 | static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) |
151 | { |
152 | int num_groups = 0; |
153 | cpumask_var_t tmp, *cpu_groups; |
154 | |
155 | if (!alloc_cpumask_var(mask: &tmp, GFP_KERNEL)) |
156 | return -ENOMEM; |
157 | |
158 | cpu_groups = kcalloc(n: nb_available_cpus, size: sizeof(*cpu_groups), |
159 | GFP_KERNEL); |
160 | if (!cpu_groups) { |
161 | free_cpumask_var(mask: tmp); |
162 | return -ENOMEM; |
163 | } |
164 | |
165 | cpumask_copy(dstp: tmp, cpu_online_mask); |
166 | |
167 | while (!cpumask_empty(srcp: tmp)) { |
168 | const struct cpumask *cpu_group = |
169 | topology_core_cpumask(cpumask_any(tmp)); |
170 | |
171 | if (!alloc_cpumask_var(mask: &cpu_groups[num_groups], GFP_KERNEL)) { |
172 | free_cpumask_var(mask: tmp); |
173 | free_cpu_groups(num: num_groups, pcpu_groups: &cpu_groups); |
174 | return -ENOMEM; |
175 | } |
176 | cpumask_copy(dstp: cpu_groups[num_groups++], srcp: cpu_group); |
177 | cpumask_andnot(dstp: tmp, src1p: tmp, src2p: cpu_group); |
178 | } |
179 | |
180 | free_cpumask_var(mask: tmp); |
181 | *pcpu_groups = cpu_groups; |
182 | |
183 | return num_groups; |
184 | } |
185 | |
186 | static int hotplug_tests(void) |
187 | { |
188 | int i, nb_cpu_group, err = -ENOMEM; |
189 | cpumask_var_t offlined_cpus, *cpu_groups; |
190 | char *page_buf; |
191 | |
192 | if (!alloc_cpumask_var(mask: &offlined_cpus, GFP_KERNEL)) |
193 | return err; |
194 | |
195 | nb_cpu_group = alloc_init_cpu_groups(pcpu_groups: &cpu_groups); |
196 | if (nb_cpu_group < 0) |
197 | goto out_free_cpus; |
198 | page_buf = (char *)__get_free_page(GFP_KERNEL); |
199 | if (!page_buf) |
200 | goto out_free_cpu_groups; |
201 | |
202 | /* |
203 | * Of course the last CPU cannot be powered down and cpu_down() should |
204 | * refuse doing that. |
205 | */ |
206 | pr_info("Trying to turn off and on again all CPUs\n" ); |
207 | err = down_and_up_cpus(cpu_online_mask, offlined_cpus); |
208 | |
209 | /* |
210 | * Take down CPUs by cpu group this time. When the last CPU is turned |
211 | * off, the cpu group itself should shut down. |
212 | */ |
213 | for (i = 0; i < nb_cpu_group; ++i) { |
214 | ssize_t len = cpumap_print_to_pagebuf(list: true, buf: page_buf, |
215 | mask: cpu_groups[i]); |
216 | /* Remove trailing newline. */ |
217 | page_buf[len - 1] = '\0'; |
218 | pr_info("Trying to turn off and on again group %d (CPUs %s)\n" , |
219 | i, page_buf); |
220 | err += down_and_up_cpus(cpus: cpu_groups[i], offlined_cpus); |
221 | } |
222 | |
223 | free_page((unsigned long)page_buf); |
224 | out_free_cpu_groups: |
225 | free_cpu_groups(num: nb_cpu_group, pcpu_groups: &cpu_groups); |
226 | out_free_cpus: |
227 | free_cpumask_var(mask: offlined_cpus); |
228 | return err; |
229 | } |
230 | |
231 | static void dummy_callback(struct timer_list *unused) {} |
232 | |
233 | static int suspend_cpu(struct cpuidle_device *dev, |
234 | struct cpuidle_driver *drv, int index) |
235 | { |
236 | struct cpuidle_state *state = &drv->states[index]; |
237 | bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; |
238 | int ret; |
239 | |
240 | arch_cpu_idle_enter(); |
241 | |
242 | if (broadcast) { |
243 | /* |
244 | * The local timer will be shut down, we need to enter tick |
245 | * broadcast. |
246 | */ |
247 | ret = tick_broadcast_enter(); |
248 | if (ret) { |
249 | /* |
250 | * In the absence of hardware broadcast mechanism, |
251 | * this CPU might be used to broadcast wakeups, which |
252 | * may be why entering tick broadcast has failed. |
253 | * There is little the kernel can do to work around |
254 | * that, so enter WFI instead (idle state 0). |
255 | */ |
256 | cpu_do_idle(); |
257 | ret = 0; |
258 | goto out_arch_exit; |
259 | } |
260 | } |
261 | |
262 | ret = state->enter(dev, drv, index); |
263 | |
264 | if (broadcast) |
265 | tick_broadcast_exit(); |
266 | |
267 | out_arch_exit: |
268 | arch_cpu_idle_exit(); |
269 | |
270 | return ret; |
271 | } |
272 | |
273 | static int suspend_test_thread(void *arg) |
274 | { |
275 | int cpu = (long)arg; |
276 | int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; |
277 | struct cpuidle_device *dev; |
278 | struct cpuidle_driver *drv; |
279 | /* No need for an actual callback, we just want to wake up the CPU. */ |
280 | struct timer_list wakeup_timer; |
281 | |
282 | /* Wait for the main thread to give the start signal. */ |
283 | wait_for_completion(&suspend_threads_started); |
284 | |
285 | /* Set maximum priority to preempt all other threads on this CPU. */ |
286 | sched_set_fifo(current); |
287 | |
288 | dev = this_cpu_read(cpuidle_devices); |
289 | drv = cpuidle_get_cpu_driver(dev); |
290 | |
291 | pr_info("CPU %d entering suspend cycles, states 1 through %d\n" , |
292 | cpu, drv->state_count - 1); |
293 | |
294 | timer_setup_on_stack(&wakeup_timer, dummy_callback, 0); |
295 | for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { |
296 | int index; |
297 | /* |
298 | * Test all possible states, except 0 (which is usually WFI and |
299 | * doesn't use PSCI). |
300 | */ |
301 | for (index = 1; index < drv->state_count; ++index) { |
302 | int ret; |
303 | struct cpuidle_state *state = &drv->states[index]; |
304 | |
305 | /* |
306 | * Set the timer to wake this CPU up in some time (which |
307 | * should be largely sufficient for entering suspend). |
308 | * If the local tick is disabled when entering suspend, |
309 | * suspend_cpu() takes care of switching to a broadcast |
310 | * tick, so the timer will still wake us up. |
311 | */ |
312 | mod_timer(timer: &wakeup_timer, expires: jiffies + |
313 | usecs_to_jiffies(u: state->target_residency)); |
314 | |
315 | /* IRQs must be disabled during suspend operations. */ |
316 | local_irq_disable(); |
317 | |
318 | ret = suspend_cpu(dev, drv, index); |
319 | |
320 | /* |
321 | * We have woken up. Re-enable IRQs to handle any |
322 | * pending interrupt, do not wait until the end of the |
323 | * loop. |
324 | */ |
325 | local_irq_enable(); |
326 | |
327 | if (ret == index) { |
328 | ++nb_suspend; |
329 | } else if (ret >= 0) { |
330 | /* We did not enter the expected state. */ |
331 | ++nb_shallow_sleep; |
332 | } else { |
333 | pr_err("Failed to suspend CPU %d: error %d " |
334 | "(requested state %d, cycle %d)\n" , |
335 | cpu, ret, index, i); |
336 | ++nb_err; |
337 | } |
338 | } |
339 | } |
340 | |
341 | /* |
342 | * Disable the timer to make sure that the timer will not trigger |
343 | * later. |
344 | */ |
345 | del_timer(timer: &wakeup_timer); |
346 | destroy_timer_on_stack(timer: &wakeup_timer); |
347 | |
348 | if (atomic_dec_return_relaxed(v: &nb_active_threads) == 0) |
349 | complete(&suspend_threads_done); |
350 | |
351 | for (;;) { |
352 | /* Needs to be set first to avoid missing a wakeup. */ |
353 | set_current_state(TASK_INTERRUPTIBLE); |
354 | if (kthread_should_park()) |
355 | break; |
356 | schedule(); |
357 | } |
358 | |
359 | pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n" , |
360 | cpu, nb_suspend, nb_shallow_sleep, nb_err); |
361 | |
362 | kthread_parkme(); |
363 | |
364 | return nb_err; |
365 | } |
366 | |
367 | static int suspend_tests(void) |
368 | { |
369 | int i, cpu, err = 0; |
370 | struct task_struct **threads; |
371 | int nb_threads = 0; |
372 | |
373 | threads = kmalloc_array(n: nb_available_cpus, size: sizeof(*threads), |
374 | GFP_KERNEL); |
375 | if (!threads) |
376 | return -ENOMEM; |
377 | |
378 | /* |
379 | * Stop cpuidle to prevent the idle tasks from entering a deep sleep |
380 | * mode, as it might interfere with the suspend threads on other CPUs. |
381 | * This does not prevent the suspend threads from using cpuidle (only |
382 | * the idle tasks check this status). Take the idle lock so that |
383 | * the cpuidle driver and device look-up can be carried out safely. |
384 | */ |
385 | cpuidle_pause_and_lock(); |
386 | |
387 | for_each_online_cpu(cpu) { |
388 | struct task_struct *thread; |
389 | /* Check that cpuidle is available on that CPU. */ |
390 | struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); |
391 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
392 | |
393 | if (!dev || !drv) { |
394 | pr_warn("cpuidle not available on CPU %d, ignoring\n" , |
395 | cpu); |
396 | continue; |
397 | } |
398 | |
399 | thread = kthread_create_on_cpu(threadfn: suspend_test_thread, |
400 | data: (void *)(long)cpu, cpu, |
401 | namefmt: "psci_suspend_test" ); |
402 | if (IS_ERR(ptr: thread)) |
403 | pr_err("Failed to create kthread on CPU %d\n" , cpu); |
404 | else |
405 | threads[nb_threads++] = thread; |
406 | } |
407 | |
408 | if (nb_threads < 1) { |
409 | err = -ENODEV; |
410 | goto out; |
411 | } |
412 | |
413 | atomic_set(v: &nb_active_threads, i: nb_threads); |
414 | |
415 | /* |
416 | * Wake up the suspend threads. To avoid the main thread being preempted |
417 | * before all the threads have been unparked, the suspend threads will |
418 | * wait for the completion of suspend_threads_started. |
419 | */ |
420 | for (i = 0; i < nb_threads; ++i) |
421 | wake_up_process(tsk: threads[i]); |
422 | complete_all(&suspend_threads_started); |
423 | |
424 | wait_for_completion(&suspend_threads_done); |
425 | |
426 | |
427 | /* Stop and destroy all threads, get return status. */ |
428 | for (i = 0; i < nb_threads; ++i) { |
429 | err += kthread_park(k: threads[i]); |
430 | err += kthread_stop(k: threads[i]); |
431 | } |
432 | out: |
433 | cpuidle_resume_and_unlock(); |
434 | kfree(objp: threads); |
435 | return err; |
436 | } |
437 | |
438 | static int __init psci_checker(void) |
439 | { |
440 | int ret; |
441 | |
442 | /* |
443 | * Since we're in an initcall, we assume that all the CPUs that all |
444 | * CPUs that can be onlined have been onlined. |
445 | * |
446 | * The tests assume that hotplug is enabled but nobody else is using it, |
447 | * otherwise the results will be unpredictable. However, since there |
448 | * is no userspace yet in initcalls, that should be fine, as long as |
449 | * no torture test is running at the same time (see Kconfig). |
450 | */ |
451 | nb_available_cpus = num_online_cpus(); |
452 | |
453 | /* Check PSCI operations are set up and working. */ |
454 | ret = psci_ops_check(); |
455 | if (ret) |
456 | return ret; |
457 | |
458 | pr_info("PSCI checker started using %u CPUs\n" , nb_available_cpus); |
459 | |
460 | pr_info("Starting hotplug tests\n" ); |
461 | ret = hotplug_tests(); |
462 | if (ret == 0) |
463 | pr_info("Hotplug tests passed OK\n" ); |
464 | else if (ret > 0) |
465 | pr_err("%d error(s) encountered in hotplug tests\n" , ret); |
466 | else { |
467 | pr_err("Out of memory\n" ); |
468 | return ret; |
469 | } |
470 | |
471 | pr_info("Starting suspend tests (%d cycles per state)\n" , |
472 | NUM_SUSPEND_CYCLE); |
473 | ret = suspend_tests(); |
474 | if (ret == 0) |
475 | pr_info("Suspend tests passed OK\n" ); |
476 | else if (ret > 0) |
477 | pr_err("%d error(s) encountered in suspend tests\n" , ret); |
478 | else { |
479 | switch (ret) { |
480 | case -ENOMEM: |
481 | pr_err("Out of memory\n" ); |
482 | break; |
483 | case -ENODEV: |
484 | pr_warn("Could not start suspend tests on any CPU\n" ); |
485 | break; |
486 | } |
487 | } |
488 | |
489 | pr_info("PSCI checker completed\n" ); |
490 | return ret < 0 ? ret : 0; |
491 | } |
492 | late_initcall(psci_checker); |
493 | |