1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Hibernation support specific for i386 - temporary page tables |
4 | * |
5 | * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
6 | */ |
7 | |
8 | #include <linux/gfp.h> |
9 | #include <linux/suspend.h> |
10 | #include <linux/memblock.h> |
11 | #include <linux/pgtable.h> |
12 | |
13 | #include <asm/page.h> |
14 | #include <asm/mmzone.h> |
15 | #include <asm/sections.h> |
16 | #include <asm/suspend.h> |
17 | |
18 | /* Pointer to the temporary resume page tables */ |
19 | pgd_t *resume_pg_dir; |
20 | |
21 | /* The following three functions are based on the analogous code in |
22 | * arch/x86/mm/init_32.c |
23 | */ |
24 | |
25 | /* |
26 | * Create a middle page table on a resume-safe page and put a pointer to it in |
27 | * the given global directory entry. This only returns the gd entry |
28 | * in non-PAE compilation mode, since the middle layer is folded. |
29 | */ |
30 | static pmd_t *resume_one_md_table_init(pgd_t *pgd) |
31 | { |
32 | p4d_t *p4d; |
33 | pud_t *pud; |
34 | pmd_t *pmd_table; |
35 | |
36 | #ifdef CONFIG_X86_PAE |
37 | pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); |
38 | if (!pmd_table) |
39 | return NULL; |
40 | |
41 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
42 | p4d = p4d_offset(pgd, 0); |
43 | pud = pud_offset(p4d, 0); |
44 | |
45 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
46 | #else |
47 | p4d = p4d_offset(pgd, address: 0); |
48 | pud = pud_offset(p4d, address: 0); |
49 | pmd_table = pmd_offset(pud, address: 0); |
50 | #endif |
51 | |
52 | return pmd_table; |
53 | } |
54 | |
55 | /* |
56 | * Create a page table on a resume-safe page and place a pointer to it in |
57 | * a middle page directory entry. |
58 | */ |
59 | static pte_t *resume_one_page_table_init(pmd_t *pmd) |
60 | { |
61 | if (pmd_none(pmd: *pmd)) { |
62 | pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); |
63 | if (!page_table) |
64 | return NULL; |
65 | |
66 | set_pmd(pmdp: pmd, pmd: __pmd(__pa(page_table) | _PAGE_TABLE)); |
67 | |
68 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
69 | |
70 | return page_table; |
71 | } |
72 | |
73 | return pte_offset_kernel(pmd, address: 0); |
74 | } |
75 | |
76 | /* |
77 | * This maps the physical memory to kernel virtual address space, a total |
78 | * of max_low_pfn pages, by creating page tables starting from address |
79 | * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. |
80 | */ |
81 | static int resume_physical_mapping_init(pgd_t *pgd_base) |
82 | { |
83 | unsigned long pfn; |
84 | pgd_t *pgd; |
85 | pmd_t *pmd; |
86 | pte_t *pte; |
87 | int pgd_idx, pmd_idx; |
88 | |
89 | pgd_idx = pgd_index(PAGE_OFFSET); |
90 | pgd = pgd_base + pgd_idx; |
91 | pfn = 0; |
92 | |
93 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { |
94 | pmd = resume_one_md_table_init(pgd); |
95 | if (!pmd) |
96 | return -ENOMEM; |
97 | |
98 | if (pfn >= max_low_pfn) |
99 | continue; |
100 | |
101 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { |
102 | if (pfn >= max_low_pfn) |
103 | break; |
104 | |
105 | /* Map with big pages if possible, otherwise create |
106 | * normal page tables. |
107 | * NOTE: We can mark everything as executable here |
108 | */ |
109 | if (boot_cpu_has(X86_FEATURE_PSE)) { |
110 | set_pmd(pmdp: pmd, pmd: pfn_pmd(page_nr: pfn, PAGE_KERNEL_LARGE_EXEC)); |
111 | pfn += PTRS_PER_PTE; |
112 | } else { |
113 | pte_t *max_pte; |
114 | |
115 | pte = resume_one_page_table_init(pmd); |
116 | if (!pte) |
117 | return -ENOMEM; |
118 | |
119 | max_pte = pte + PTRS_PER_PTE; |
120 | for (; pte < max_pte; pte++, pfn++) { |
121 | if (pfn >= max_low_pfn) |
122 | break; |
123 | |
124 | set_pte(ptep: pte, pte: pfn_pte(page_nr: pfn, PAGE_KERNEL_EXEC)); |
125 | } |
126 | } |
127 | } |
128 | } |
129 | |
130 | return 0; |
131 | } |
132 | |
133 | static inline void resume_init_first_level_page_table(pgd_t *pg_dir) |
134 | { |
135 | #ifdef CONFIG_X86_PAE |
136 | int i; |
137 | |
138 | /* Init entries of the first-level page table to the zero page */ |
139 | for (i = 0; i < PTRS_PER_PGD; i++) |
140 | set_pgd(pg_dir + i, |
141 | __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); |
142 | #endif |
143 | } |
144 | |
145 | static int set_up_temporary_text_mapping(pgd_t *pgd_base) |
146 | { |
147 | pgd_t *pgd; |
148 | pmd_t *pmd; |
149 | pte_t *pte; |
150 | |
151 | pgd = pgd_base + pgd_index(restore_jump_address); |
152 | |
153 | pmd = resume_one_md_table_init(pgd); |
154 | if (!pmd) |
155 | return -ENOMEM; |
156 | |
157 | if (boot_cpu_has(X86_FEATURE_PSE)) { |
158 | set_pmd(pmdp: pmd + pmd_index(address: restore_jump_address), |
159 | pmd: __pmd(val: (jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC))); |
160 | } else { |
161 | pte = resume_one_page_table_init(pmd); |
162 | if (!pte) |
163 | return -ENOMEM; |
164 | set_pte(ptep: pte + pte_index(address: restore_jump_address), |
165 | pte: __pte(val: (jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC))); |
166 | } |
167 | |
168 | return 0; |
169 | } |
170 | |
171 | asmlinkage int swsusp_arch_resume(void) |
172 | { |
173 | int error; |
174 | |
175 | resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); |
176 | if (!resume_pg_dir) |
177 | return -ENOMEM; |
178 | |
179 | resume_init_first_level_page_table(pg_dir: resume_pg_dir); |
180 | |
181 | error = set_up_temporary_text_mapping(resume_pg_dir); |
182 | if (error) |
183 | return error; |
184 | |
185 | error = resume_physical_mapping_init(pgd_base: resume_pg_dir); |
186 | if (error) |
187 | return error; |
188 | |
189 | temp_pgt = __pa(resume_pg_dir); |
190 | |
191 | error = relocate_restore_code(); |
192 | if (error) |
193 | return error; |
194 | |
195 | /* We have got enough memory and from now on we cannot recover */ |
196 | restore_image(); |
197 | return 0; |
198 | } |
199 | |