1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * kernel/power/suspend.c - Suspend to RAM and standby functionality. |
4 | * |
5 | * Copyright (c) 2003 Patrick Mochel |
6 | * Copyright (c) 2003 Open Source Development Lab |
7 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) "PM: " fmt |
11 | |
12 | #include <linux/string.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/init.h> |
16 | #include <linux/console.h> |
17 | #include <linux/cpu.h> |
18 | #include <linux/cpuidle.h> |
19 | #include <linux/gfp.h> |
20 | #include <linux/io.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/list.h> |
23 | #include <linux/mm.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/export.h> |
26 | #include <linux/suspend.h> |
27 | #include <linux/syscore_ops.h> |
28 | #include <linux/swait.h> |
29 | #include <linux/ftrace.h> |
30 | #include <trace/events/power.h> |
31 | #include <linux/compiler.h> |
32 | #include <linux/moduleparam.h> |
33 | #include <linux/fs.h> |
34 | |
35 | #include "power.h" |
36 | |
37 | const char * const pm_labels[] = { |
38 | [PM_SUSPEND_TO_IDLE] = "freeze", |
39 | [PM_SUSPEND_STANDBY] = "standby", |
40 | [PM_SUSPEND_MEM] = "mem", |
41 | }; |
42 | const char *pm_states[PM_SUSPEND_MAX]; |
43 | static const char * const mem_sleep_labels[] = { |
44 | [PM_SUSPEND_TO_IDLE] = "s2idle", |
45 | [PM_SUSPEND_STANDBY] = "shallow", |
46 | [PM_SUSPEND_MEM] = "deep", |
47 | }; |
48 | const char *mem_sleep_states[PM_SUSPEND_MAX]; |
49 | |
50 | suspend_state_t mem_sleep_current = PM_SUSPEND_TO_IDLE; |
51 | suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; |
52 | suspend_state_t pm_suspend_target_state; |
53 | EXPORT_SYMBOL_GPL(pm_suspend_target_state); |
54 | |
55 | unsigned int pm_suspend_global_flags; |
56 | EXPORT_SYMBOL_GPL(pm_suspend_global_flags); |
57 | |
58 | static const struct platform_suspend_ops *suspend_ops; |
59 | static const struct platform_s2idle_ops *s2idle_ops; |
60 | static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head); |
61 | |
62 | enum s2idle_states __read_mostly s2idle_state; |
63 | static DEFINE_RAW_SPINLOCK(s2idle_lock); |
64 | |
65 | /** |
66 | * pm_suspend_default_s2idle - Check if suspend-to-idle is the default suspend. |
67 | * |
68 | * Return 'true' if suspend-to-idle has been selected as the default system |
69 | * suspend method. |
70 | */ |
71 | bool pm_suspend_default_s2idle(void) |
72 | { |
73 | return mem_sleep_current == PM_SUSPEND_TO_IDLE; |
74 | } |
75 | EXPORT_SYMBOL_GPL(pm_suspend_default_s2idle); |
76 | |
77 | void s2idle_set_ops(const struct platform_s2idle_ops *ops) |
78 | { |
79 | unsigned int sleep_flags; |
80 | |
81 | sleep_flags = lock_system_sleep(); |
82 | s2idle_ops = ops; |
83 | unlock_system_sleep(sleep_flags); |
84 | } |
85 | |
86 | static void s2idle_begin(void) |
87 | { |
88 | s2idle_state = S2IDLE_STATE_NONE; |
89 | } |
90 | |
91 | static void s2idle_enter(void) |
92 | { |
93 | trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, start: true); |
94 | |
95 | /* |
96 | * The correctness of the code below depends on the number of online |
97 | * CPUs being stable, but CPUs cannot be taken offline or put online |
98 | * while it is running. |
99 | * |
100 | * The s2idle_lock must be acquired before the pending wakeup check to |
101 | * prevent pm_system_wakeup() from running as a whole between that check |
102 | * and the subsequent s2idle_state update in which case a wakeup event |
103 | * would get lost. |
104 | */ |
105 | raw_spin_lock_irq(&s2idle_lock); |
106 | if (pm_wakeup_pending()) |
107 | goto out; |
108 | |
109 | s2idle_state = S2IDLE_STATE_ENTER; |
110 | raw_spin_unlock_irq(&s2idle_lock); |
111 | |
112 | /* Push all the CPUs into the idle loop. */ |
113 | wake_up_all_idle_cpus(); |
114 | /* Make the current CPU wait so it can enter the idle loop too. */ |
115 | swait_event_exclusive(s2idle_wait_head, |
116 | s2idle_state == S2IDLE_STATE_WAKE); |
117 | |
118 | /* |
119 | * Kick all CPUs to ensure that they resume their timers and restore |
120 | * consistent system state. |
121 | */ |
122 | wake_up_all_idle_cpus(); |
123 | |
124 | raw_spin_lock_irq(&s2idle_lock); |
125 | |
126 | out: |
127 | s2idle_state = S2IDLE_STATE_NONE; |
128 | raw_spin_unlock_irq(&s2idle_lock); |
129 | |
130 | trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, start: false); |
131 | } |
132 | |
133 | static void s2idle_loop(void) |
134 | { |
135 | pm_pr_dbg("suspend-to-idle\n"); |
136 | |
137 | /* |
138 | * Suspend-to-idle equals: |
139 | * frozen processes + suspended devices + idle processors. |
140 | * Thus s2idle_enter() should be called right after all devices have |
141 | * been suspended. |
142 | * |
143 | * Wakeups during the noirq suspend of devices may be spurious, so try |
144 | * to avoid them upfront. |
145 | */ |
146 | for (;;) { |
147 | if (s2idle_ops && s2idle_ops->wake) { |
148 | if (s2idle_ops->wake()) |
149 | break; |
150 | } else if (pm_wakeup_pending()) { |
151 | break; |
152 | } |
153 | |
154 | if (s2idle_ops && s2idle_ops->check) |
155 | s2idle_ops->check(); |
156 | |
157 | s2idle_enter(); |
158 | } |
159 | |
160 | pm_pr_dbg("resume from suspend-to-idle\n"); |
161 | } |
162 | |
163 | void s2idle_wake(void) |
164 | { |
165 | unsigned long flags; |
166 | |
167 | raw_spin_lock_irqsave(&s2idle_lock, flags); |
168 | if (s2idle_state > S2IDLE_STATE_NONE) { |
169 | s2idle_state = S2IDLE_STATE_WAKE; |
170 | swake_up_one(q: &s2idle_wait_head); |
171 | } |
172 | raw_spin_unlock_irqrestore(&s2idle_lock, flags); |
173 | } |
174 | EXPORT_SYMBOL_GPL(s2idle_wake); |
175 | |
176 | static bool valid_state(suspend_state_t state) |
177 | { |
178 | /* |
179 | * The PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states require low-level |
180 | * support and need to be valid to the low-level implementation. |
181 | * |
182 | * No ->valid() or ->enter() callback implies that none are valid. |
183 | */ |
184 | return suspend_ops && suspend_ops->valid && suspend_ops->valid(state) && |
185 | suspend_ops->enter; |
186 | } |
187 | |
188 | void __init pm_states_init(void) |
189 | { |
190 | /* "mem" and "freeze" are always present in /sys/power/state. */ |
191 | pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM]; |
192 | pm_states[PM_SUSPEND_TO_IDLE] = pm_labels[PM_SUSPEND_TO_IDLE]; |
193 | /* |
194 | * Suspend-to-idle should be supported even without any suspend_ops, |
195 | * initialize mem_sleep_states[] accordingly here. |
196 | */ |
197 | mem_sleep_states[PM_SUSPEND_TO_IDLE] = mem_sleep_labels[PM_SUSPEND_TO_IDLE]; |
198 | } |
199 | |
200 | static int __init mem_sleep_default_setup(char *str) |
201 | { |
202 | suspend_state_t state; |
203 | |
204 | for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++) |
205 | if (mem_sleep_labels[state] && |
206 | !strcmp(str, mem_sleep_labels[state])) { |
207 | mem_sleep_default = state; |
208 | mem_sleep_current = state; |
209 | break; |
210 | } |
211 | |
212 | return 1; |
213 | } |
214 | __setup("mem_sleep_default=", mem_sleep_default_setup); |
215 | |
216 | /** |
217 | * suspend_set_ops - Set the global suspend method table. |
218 | * @ops: Suspend operations to use. |
219 | */ |
220 | void suspend_set_ops(const struct platform_suspend_ops *ops) |
221 | { |
222 | unsigned int sleep_flags; |
223 | |
224 | sleep_flags = lock_system_sleep(); |
225 | |
226 | suspend_ops = ops; |
227 | |
228 | if (valid_state(PM_SUSPEND_STANDBY)) { |
229 | mem_sleep_states[PM_SUSPEND_STANDBY] = mem_sleep_labels[PM_SUSPEND_STANDBY]; |
230 | pm_states[PM_SUSPEND_STANDBY] = pm_labels[PM_SUSPEND_STANDBY]; |
231 | if (mem_sleep_default == PM_SUSPEND_STANDBY) |
232 | mem_sleep_current = PM_SUSPEND_STANDBY; |
233 | } |
234 | if (valid_state(PM_SUSPEND_MEM)) { |
235 | mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; |
236 | if (mem_sleep_default >= PM_SUSPEND_MEM) |
237 | mem_sleep_current = PM_SUSPEND_MEM; |
238 | } |
239 | |
240 | unlock_system_sleep(sleep_flags); |
241 | } |
242 | EXPORT_SYMBOL_GPL(suspend_set_ops); |
243 | |
244 | /** |
245 | * suspend_valid_only_mem - Generic memory-only valid callback. |
246 | * @state: Target system sleep state. |
247 | * |
248 | * Platform drivers that implement mem suspend only and only need to check for |
249 | * that in their .valid() callback can use this instead of rolling their own |
250 | * .valid() callback. |
251 | */ |
252 | int suspend_valid_only_mem(suspend_state_t state) |
253 | { |
254 | return state == PM_SUSPEND_MEM; |
255 | } |
256 | EXPORT_SYMBOL_GPL(suspend_valid_only_mem); |
257 | |
258 | static bool sleep_state_supported(suspend_state_t state) |
259 | { |
260 | return state == PM_SUSPEND_TO_IDLE || |
261 | (valid_state(state) && !cxl_mem_active()); |
262 | } |
263 | |
264 | static int platform_suspend_prepare(suspend_state_t state) |
265 | { |
266 | return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare ? |
267 | suspend_ops->prepare() : 0; |
268 | } |
269 | |
270 | static int platform_suspend_prepare_late(suspend_state_t state) |
271 | { |
272 | return state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->prepare ? |
273 | s2idle_ops->prepare() : 0; |
274 | } |
275 | |
276 | static int platform_suspend_prepare_noirq(suspend_state_t state) |
277 | { |
278 | if (state == PM_SUSPEND_TO_IDLE) |
279 | return s2idle_ops && s2idle_ops->prepare_late ? |
280 | s2idle_ops->prepare_late() : 0; |
281 | |
282 | return suspend_ops->prepare_late ? suspend_ops->prepare_late() : 0; |
283 | } |
284 | |
285 | static void platform_resume_noirq(suspend_state_t state) |
286 | { |
287 | if (state == PM_SUSPEND_TO_IDLE) { |
288 | if (s2idle_ops && s2idle_ops->restore_early) |
289 | s2idle_ops->restore_early(); |
290 | } else if (suspend_ops->wake) { |
291 | suspend_ops->wake(); |
292 | } |
293 | } |
294 | |
295 | static void platform_resume_early(suspend_state_t state) |
296 | { |
297 | if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->restore) |
298 | s2idle_ops->restore(); |
299 | } |
300 | |
301 | static void platform_resume_finish(suspend_state_t state) |
302 | { |
303 | if (state != PM_SUSPEND_TO_IDLE && suspend_ops->finish) |
304 | suspend_ops->finish(); |
305 | } |
306 | |
307 | static int platform_suspend_begin(suspend_state_t state) |
308 | { |
309 | if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->begin) |
310 | return s2idle_ops->begin(); |
311 | else if (suspend_ops && suspend_ops->begin) |
312 | return suspend_ops->begin(state); |
313 | else |
314 | return 0; |
315 | } |
316 | |
317 | static void platform_resume_end(suspend_state_t state) |
318 | { |
319 | if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->end) |
320 | s2idle_ops->end(); |
321 | else if (suspend_ops && suspend_ops->end) |
322 | suspend_ops->end(); |
323 | } |
324 | |
325 | static void platform_recover(suspend_state_t state) |
326 | { |
327 | if (state != PM_SUSPEND_TO_IDLE && suspend_ops->recover) |
328 | suspend_ops->recover(); |
329 | } |
330 | |
331 | static bool platform_suspend_again(suspend_state_t state) |
332 | { |
333 | return state != PM_SUSPEND_TO_IDLE && suspend_ops->suspend_again ? |
334 | suspend_ops->suspend_again() : false; |
335 | } |
336 | |
337 | #ifdef CONFIG_PM_DEBUG |
338 | static unsigned int pm_test_delay = 5; |
339 | module_param(pm_test_delay, uint, 0644); |
340 | MODULE_PARM_DESC(pm_test_delay, |
341 | "Number of seconds to wait before resuming from suspend test"); |
342 | #endif |
343 | |
344 | static int suspend_test(int level) |
345 | { |
346 | #ifdef CONFIG_PM_DEBUG |
347 | if (pm_test_level == level) { |
348 | pr_info("suspend debug: Waiting for %d second(s).\n", |
349 | pm_test_delay); |
350 | mdelay(pm_test_delay * 1000); |
351 | return 1; |
352 | } |
353 | #endif /* !CONFIG_PM_DEBUG */ |
354 | return 0; |
355 | } |
356 | |
357 | /** |
358 | * suspend_prepare - Prepare for entering system sleep state. |
359 | * @state: Target system sleep state. |
360 | * |
361 | * Common code run for every system sleep state that can be entered (except for |
362 | * hibernation). Run suspend notifiers, allocate the "suspend" console and |
363 | * freeze processes. |
364 | */ |
365 | static int suspend_prepare(suspend_state_t state) |
366 | { |
367 | int error; |
368 | |
369 | if (!sleep_state_supported(state)) |
370 | return -EPERM; |
371 | |
372 | pm_prepare_console(); |
373 | |
374 | error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND); |
375 | if (error) |
376 | goto Restore; |
377 | |
378 | if (filesystem_freeze_enabled) |
379 | filesystems_freeze(); |
380 | trace_suspend_resume(TPS("freeze_processes"), val: 0, start: true); |
381 | error = suspend_freeze_processes(); |
382 | trace_suspend_resume(TPS("freeze_processes"), val: 0, start: false); |
383 | if (!error) |
384 | return 0; |
385 | |
386 | dpm_save_failed_step(step: SUSPEND_FREEZE); |
387 | pm_notifier_call_chain(PM_POST_SUSPEND); |
388 | Restore: |
389 | pm_restore_console(); |
390 | return error; |
391 | } |
392 | |
393 | /* default implementation */ |
394 | void __weak arch_suspend_disable_irqs(void) |
395 | { |
396 | local_irq_disable(); |
397 | } |
398 | |
399 | /* default implementation */ |
400 | void __weak arch_suspend_enable_irqs(void) |
401 | { |
402 | local_irq_enable(); |
403 | } |
404 | |
405 | /** |
406 | * suspend_enter - Make the system enter the given sleep state. |
407 | * @state: System sleep state to enter. |
408 | * @wakeup: Returns information that the sleep state should not be re-entered. |
409 | * |
410 | * This function should be called after devices have been suspended. |
411 | */ |
412 | static int suspend_enter(suspend_state_t state, bool *wakeup) |
413 | { |
414 | int error; |
415 | |
416 | error = platform_suspend_prepare(state); |
417 | if (error) |
418 | goto Platform_finish; |
419 | |
420 | error = dpm_suspend_late(PMSG_SUSPEND); |
421 | if (error) { |
422 | pr_err("late suspend of devices failed\n"); |
423 | goto Platform_finish; |
424 | } |
425 | error = platform_suspend_prepare_late(state); |
426 | if (error) |
427 | goto Devices_early_resume; |
428 | |
429 | error = dpm_suspend_noirq(PMSG_SUSPEND); |
430 | if (error) { |
431 | pr_err("noirq suspend of devices failed\n"); |
432 | goto Platform_early_resume; |
433 | } |
434 | error = platform_suspend_prepare_noirq(state); |
435 | if (error) |
436 | goto Platform_wake; |
437 | |
438 | if (suspend_test(level: TEST_PLATFORM)) |
439 | goto Platform_wake; |
440 | |
441 | if (state == PM_SUSPEND_TO_IDLE) { |
442 | s2idle_loop(); |
443 | goto Platform_wake; |
444 | } |
445 | |
446 | error = pm_sleep_disable_secondary_cpus(); |
447 | if (error || suspend_test(level: TEST_CPUS)) |
448 | goto Enable_cpus; |
449 | |
450 | arch_suspend_disable_irqs(); |
451 | BUG_ON(!irqs_disabled()); |
452 | |
453 | system_state = SYSTEM_SUSPEND; |
454 | |
455 | error = syscore_suspend(); |
456 | if (!error) { |
457 | *wakeup = pm_wakeup_pending(); |
458 | if (!(suspend_test(level: TEST_CORE) || *wakeup)) { |
459 | trace_suspend_resume(TPS("machine_suspend"), |
460 | val: state, start: true); |
461 | error = suspend_ops->enter(state); |
462 | trace_suspend_resume(TPS("machine_suspend"), |
463 | val: state, start: false); |
464 | } else if (*wakeup) { |
465 | error = -EBUSY; |
466 | } |
467 | syscore_resume(); |
468 | } |
469 | |
470 | system_state = SYSTEM_RUNNING; |
471 | |
472 | arch_suspend_enable_irqs(); |
473 | BUG_ON(irqs_disabled()); |
474 | |
475 | Enable_cpus: |
476 | pm_sleep_enable_secondary_cpus(); |
477 | |
478 | Platform_wake: |
479 | platform_resume_noirq(state); |
480 | dpm_resume_noirq(PMSG_RESUME); |
481 | |
482 | Platform_early_resume: |
483 | platform_resume_early(state); |
484 | |
485 | Devices_early_resume: |
486 | dpm_resume_early(PMSG_RESUME); |
487 | |
488 | Platform_finish: |
489 | platform_resume_finish(state); |
490 | return error; |
491 | } |
492 | |
493 | /** |
494 | * suspend_devices_and_enter - Suspend devices and enter system sleep state. |
495 | * @state: System sleep state to enter. |
496 | */ |
497 | int suspend_devices_and_enter(suspend_state_t state) |
498 | { |
499 | int error; |
500 | bool wakeup = false; |
501 | |
502 | if (!sleep_state_supported(state)) |
503 | return -ENOSYS; |
504 | |
505 | pm_suspend_target_state = state; |
506 | |
507 | if (state == PM_SUSPEND_TO_IDLE) |
508 | pm_set_suspend_no_platform(); |
509 | |
510 | error = platform_suspend_begin(state); |
511 | if (error) |
512 | goto Close; |
513 | |
514 | console_suspend_all(); |
515 | suspend_test_start(); |
516 | error = dpm_suspend_start(PMSG_SUSPEND); |
517 | if (error) { |
518 | pr_err("Some devices failed to suspend, or early wake event detected\n"); |
519 | goto Recover_platform; |
520 | } |
521 | suspend_test_finish(label: "suspend devices"); |
522 | if (suspend_test(level: TEST_DEVICES)) |
523 | goto Recover_platform; |
524 | |
525 | do { |
526 | error = suspend_enter(state, wakeup: &wakeup); |
527 | } while (!error && !wakeup && platform_suspend_again(state)); |
528 | |
529 | Resume_devices: |
530 | suspend_test_start(); |
531 | dpm_resume_end(PMSG_RESUME); |
532 | suspend_test_finish(label: "resume devices"); |
533 | trace_suspend_resume(TPS("console_resume_all"), val: state, start: true); |
534 | console_resume_all(); |
535 | trace_suspend_resume(TPS("console_resume_all"), val: state, start: false); |
536 | |
537 | Close: |
538 | platform_resume_end(state); |
539 | pm_suspend_target_state = PM_SUSPEND_ON; |
540 | return error; |
541 | |
542 | Recover_platform: |
543 | platform_recover(state); |
544 | goto Resume_devices; |
545 | } |
546 | |
547 | /** |
548 | * suspend_finish - Clean up before finishing the suspend sequence. |
549 | * |
550 | * Call platform code to clean up, restart processes, and free the console that |
551 | * we've allocated. This routine is not called for hibernation. |
552 | */ |
553 | static void suspend_finish(void) |
554 | { |
555 | suspend_thaw_processes(); |
556 | filesystems_thaw(); |
557 | pm_notifier_call_chain(PM_POST_SUSPEND); |
558 | pm_restore_console(); |
559 | } |
560 | |
561 | /** |
562 | * enter_state - Do common work needed to enter system sleep state. |
563 | * @state: System sleep state to enter. |
564 | * |
565 | * Make sure that no one else is trying to put the system into a sleep state. |
566 | * Fail if that's not the case. Otherwise, prepare for system suspend, make the |
567 | * system enter the given sleep state and clean up after wakeup. |
568 | */ |
569 | static int enter_state(suspend_state_t state) |
570 | { |
571 | int error; |
572 | |
573 | trace_suspend_resume(TPS("suspend_enter"), val: state, start: true); |
574 | if (state == PM_SUSPEND_TO_IDLE) { |
575 | #ifdef CONFIG_PM_DEBUG |
576 | if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { |
577 | pr_warn("Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n"); |
578 | return -EAGAIN; |
579 | } |
580 | #endif |
581 | } else if (!valid_state(state)) { |
582 | return -EINVAL; |
583 | } |
584 | if (!mutex_trylock(&system_transition_mutex)) |
585 | return -EBUSY; |
586 | |
587 | if (state == PM_SUSPEND_TO_IDLE) |
588 | s2idle_begin(); |
589 | |
590 | if (sync_on_suspend_enabled) { |
591 | trace_suspend_resume(TPS("sync_filesystems"), val: 0, start: true); |
592 | ksys_sync_helper(); |
593 | trace_suspend_resume(TPS("sync_filesystems"), val: 0, start: false); |
594 | } |
595 | if (filesystem_freeze_enabled) |
596 | filesystems_freeze(); |
597 | |
598 | pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); |
599 | pm_suspend_clear_flags(); |
600 | error = suspend_prepare(state); |
601 | if (error) |
602 | goto Unlock; |
603 | |
604 | if (suspend_test(level: TEST_FREEZER)) |
605 | goto Finish; |
606 | |
607 | trace_suspend_resume(TPS("suspend_enter"), val: state, start: false); |
608 | pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]); |
609 | pm_restrict_gfp_mask(); |
610 | error = suspend_devices_and_enter(state); |
611 | pm_restore_gfp_mask(); |
612 | |
613 | Finish: |
614 | events_check_enabled = false; |
615 | pm_pr_dbg("Finishing wakeup.\n"); |
616 | suspend_finish(); |
617 | Unlock: |
618 | filesystems_thaw(); |
619 | mutex_unlock(lock: &system_transition_mutex); |
620 | return error; |
621 | } |
622 | |
623 | /** |
624 | * pm_suspend - Externally visible function for suspending the system. |
625 | * @state: System sleep state to enter. |
626 | * |
627 | * Check if the value of @state represents one of the supported states, |
628 | * execute enter_state() and update system suspend statistics. |
629 | */ |
630 | int pm_suspend(suspend_state_t state) |
631 | { |
632 | int error; |
633 | |
634 | if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) |
635 | return -EINVAL; |
636 | |
637 | pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); |
638 | error = enter_state(state); |
639 | dpm_save_errno(err: error); |
640 | pr_info("suspend exit\n"); |
641 | return error; |
642 | } |
643 | EXPORT_SYMBOL(pm_suspend); |
644 |
Definitions
- pm_labels
- pm_states
- mem_sleep_labels
- mem_sleep_states
- mem_sleep_current
- mem_sleep_default
- pm_suspend_target_state
- pm_suspend_global_flags
- suspend_ops
- s2idle_ops
- s2idle_wait_head
- s2idle_state
- s2idle_lock
- pm_suspend_default_s2idle
- s2idle_set_ops
- s2idle_begin
- s2idle_enter
- s2idle_loop
- s2idle_wake
- valid_state
- pm_states_init
- mem_sleep_default_setup
- suspend_set_ops
- suspend_valid_only_mem
- sleep_state_supported
- platform_suspend_prepare
- platform_suspend_prepare_late
- platform_suspend_prepare_noirq
- platform_resume_noirq
- platform_resume_early
- platform_resume_finish
- platform_suspend_begin
- platform_resume_end
- platform_recover
- platform_suspend_again
- pm_test_delay
- suspend_test
- suspend_prepare
- arch_suspend_disable_irqs
- arch_suspend_enable_irqs
- suspend_enter
- suspend_devices_and_enter
- suspend_finish
- enter_state
Improve your Profiling and Debugging skills
Find out more