1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Hibernate low-level support |
4 | * |
5 | * Copyright (C) 2016 ARM Ltd. |
6 | * Author: James Morse <james.morse@arm.com> |
7 | */ |
8 | #include <linux/linkage.h> |
9 | #include <linux/errno.h> |
10 | |
11 | #include <asm/asm-offsets.h> |
12 | #include <asm/assembler.h> |
13 | #include <asm/cputype.h> |
14 | #include <asm/memory.h> |
15 | #include <asm/page.h> |
16 | #include <asm/virt.h> |
17 | |
18 | /* |
19 | * Resume from hibernate |
20 | * |
21 | * Loads temporary page tables then restores the memory image. |
22 | * Finally branches to cpu_resume() to restore the state saved by |
23 | * swsusp_arch_suspend(). |
24 | * |
25 | * Because this code has to be copied to a 'safe' page, it can't call out to |
26 | * other functions by PC-relative address. Also remember that it may be |
27 | * mid-way through over-writing other functions. For this reason it contains |
28 | * code from caches_clean_inval_pou() and uses the copy_page() macro. |
29 | * |
30 | * This 'safe' page is mapped via ttbr0, and executed from there. This function |
31 | * switches to a copy of the linear map in ttbr1, performs the restore, then |
32 | * switches ttbr1 to the original kernel's swapper_pg_dir. |
33 | * |
34 | * All of memory gets written to, including code. We need to clean the kernel |
35 | * text to the Point of Coherence (PoC) before secondary cores can be booted. |
36 | * Because the kernel modules and executable pages mapped to user space are |
37 | * also written as data, we clean all pages we touch to the Point of |
38 | * Unification (PoU). |
39 | * |
40 | * x0: physical address of temporary page tables |
41 | * x1: physical address of swapper page tables |
42 | * x2: address of cpu_resume |
43 | * x3: linear map address of restore_pblist in the current kernel |
44 | * x4: physical address of __hyp_stub_vectors, or 0 |
45 | * x5: physical address of a zero page that remains zero after resume |
46 | */ |
47 | .pushsection ".hibernate_exit.text" , "ax" |
48 | SYM_CODE_START(swsusp_arch_suspend_exit) |
49 | /* |
50 | * We execute from ttbr0, change ttbr1 to our copied linear map tables |
51 | * with a break-before-make via the zero page |
52 | */ |
53 | break_before_make_ttbr_switch x5, x0, x6, x8 |
54 | |
55 | mov x21, x1 |
56 | mov x30, x2 |
57 | mov x24, x4 |
58 | mov x25, x5 |
59 | |
60 | /* walk the restore_pblist and use copy_page() to over-write memory */ |
61 | mov x19, x3 |
62 | |
63 | 1: ldr x10, [x19, #HIBERN_PBE_ORIG] |
64 | mov x0, x10 |
65 | ldr x1, [x19, #HIBERN_PBE_ADDR] |
66 | |
67 | copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 |
68 | |
69 | add x1, x10, #PAGE_SIZE |
70 | /* Clean the copied page to PoU - based on caches_clean_inval_pou() */ |
71 | raw_dcache_line_size x2, x3 |
72 | sub x3, x2, #1 |
73 | bic x4, x10, x3 |
74 | 2: /* clean D line / unified line */ |
75 | alternative_insn "dc cvau, x4" , "dc civac, x4" , ARM64_WORKAROUND_CLEAN_CACHE |
76 | add x4, x4, x2 |
77 | cmp x4, x1 |
78 | b.lo 2b |
79 | |
80 | ldr x19, [x19, #HIBERN_PBE_NEXT] |
81 | cbnz x19, 1b |
82 | dsb ish /* wait for PoU cleaning to finish */ |
83 | |
84 | /* switch to the restored kernels page tables */ |
85 | break_before_make_ttbr_switch x25, x21, x6, x8 |
86 | |
87 | ic ialluis |
88 | dsb ish |
89 | isb |
90 | |
91 | cbz x24, 3f /* Do we need to re-initialise EL2? */ |
92 | hvc #0 |
93 | 3: ret |
94 | SYM_CODE_END(swsusp_arch_suspend_exit) |
95 | .popsection |
96 | |