1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * This file contains kasan initialization code for ARM64. |
4 | * |
5 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) "kasan: " fmt |
10 | #include <linux/kasan.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/sched/task.h> |
13 | #include <linux/memblock.h> |
14 | #include <linux/start_kernel.h> |
15 | #include <linux/mm.h> |
16 | |
17 | #include <asm/mmu_context.h> |
18 | #include <asm/kernel-pgtable.h> |
19 | #include <asm/page.h> |
20 | #include <asm/pgalloc.h> |
21 | #include <asm/sections.h> |
22 | #include <asm/tlbflush.h> |
23 | |
24 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
25 | |
26 | static pgd_t tmp_pg_dir[PTRS_PER_PTE] __initdata __aligned(PAGE_SIZE); |
27 | |
28 | /* |
29 | * The p*d_populate functions call virt_to_phys implicitly so they can't be used |
30 | * directly on kernel symbols (bm_p*d). All the early functions are called too |
31 | * early to use lm_alias so __p*d_populate functions must be used to populate |
32 | * with the physical address from __pa_symbol. |
33 | */ |
34 | |
35 | static phys_addr_t __init kasan_alloc_zeroed_page(int node) |
36 | { |
37 | void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, |
38 | __pa(MAX_DMA_ADDRESS), |
39 | MEMBLOCK_ALLOC_NOLEAKTRACE, node); |
40 | if (!p) |
41 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n" , |
42 | __func__, PAGE_SIZE, PAGE_SIZE, node, |
43 | __pa(MAX_DMA_ADDRESS)); |
44 | |
45 | return __pa(p); |
46 | } |
47 | |
48 | static phys_addr_t __init kasan_alloc_raw_page(int node) |
49 | { |
50 | void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, |
51 | __pa(MAX_DMA_ADDRESS), |
52 | MEMBLOCK_ALLOC_NOLEAKTRACE, |
53 | node); |
54 | if (!p) |
55 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n" , |
56 | __func__, PAGE_SIZE, PAGE_SIZE, node, |
57 | __pa(MAX_DMA_ADDRESS)); |
58 | |
59 | return __pa(p); |
60 | } |
61 | |
62 | static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, |
63 | bool early) |
64 | { |
65 | if (pmd_none(READ_ONCE(*pmdp))) { |
66 | phys_addr_t pte_phys = early ? |
67 | __pa_symbol(kasan_early_shadow_pte) |
68 | : kasan_alloc_zeroed_page(node); |
69 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); |
70 | } |
71 | |
72 | return early ? pte_offset_kimg(pmdp, addr) |
73 | : pte_offset_kernel(pmdp, addr); |
74 | } |
75 | |
76 | static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, |
77 | bool early) |
78 | { |
79 | if (pud_none(READ_ONCE(*pudp))) { |
80 | phys_addr_t pmd_phys = early ? |
81 | __pa_symbol(kasan_early_shadow_pmd) |
82 | : kasan_alloc_zeroed_page(node); |
83 | __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); |
84 | } |
85 | |
86 | return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); |
87 | } |
88 | |
89 | static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, |
90 | bool early) |
91 | { |
92 | if (p4d_none(READ_ONCE(*p4dp))) { |
93 | phys_addr_t pud_phys = early ? |
94 | __pa_symbol(kasan_early_shadow_pud) |
95 | : kasan_alloc_zeroed_page(node); |
96 | __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE); |
97 | } |
98 | |
99 | return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr); |
100 | } |
101 | |
102 | static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, |
103 | bool early) |
104 | { |
105 | if (pgd_none(READ_ONCE(*pgdp))) { |
106 | phys_addr_t p4d_phys = early ? |
107 | __pa_symbol(kasan_early_shadow_p4d) |
108 | : kasan_alloc_zeroed_page(node); |
109 | __pgd_populate(pgdp, p4d_phys, PGD_TYPE_TABLE); |
110 | } |
111 | |
112 | return early ? p4d_offset_kimg(pgdp, addr) : p4d_offset(pgdp, addr); |
113 | } |
114 | |
115 | static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, |
116 | unsigned long end, int node, bool early) |
117 | { |
118 | unsigned long next; |
119 | pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); |
120 | |
121 | do { |
122 | phys_addr_t page_phys = early ? |
123 | __pa_symbol(kasan_early_shadow_page) |
124 | : kasan_alloc_raw_page(node); |
125 | if (!early) |
126 | memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); |
127 | next = addr + PAGE_SIZE; |
128 | __set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); |
129 | } while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep))); |
130 | } |
131 | |
132 | static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, |
133 | unsigned long end, int node, bool early) |
134 | { |
135 | unsigned long next; |
136 | pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early); |
137 | |
138 | do { |
139 | next = pmd_addr_end(addr, end); |
140 | kasan_pte_populate(pmdp, addr, next, node, early); |
141 | } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); |
142 | } |
143 | |
144 | static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, |
145 | unsigned long end, int node, bool early) |
146 | { |
147 | unsigned long next; |
148 | pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early); |
149 | |
150 | do { |
151 | next = pud_addr_end(addr, end); |
152 | kasan_pmd_populate(pudp, addr, next, node, early); |
153 | } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); |
154 | } |
155 | |
156 | static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, |
157 | unsigned long end, int node, bool early) |
158 | { |
159 | unsigned long next; |
160 | p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early); |
161 | |
162 | do { |
163 | next = p4d_addr_end(addr, end); |
164 | kasan_pud_populate(p4dp, addr, next, node, early); |
165 | } while (p4dp++, addr = next, addr != end && p4d_none(READ_ONCE(*p4dp))); |
166 | } |
167 | |
168 | static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, |
169 | int node, bool early) |
170 | { |
171 | unsigned long next; |
172 | pgd_t *pgdp; |
173 | |
174 | pgdp = pgd_offset_k(addr); |
175 | do { |
176 | next = pgd_addr_end(addr, end); |
177 | kasan_p4d_populate(pgdp, addr, next, node, early); |
178 | } while (pgdp++, addr = next, addr != end); |
179 | } |
180 | |
181 | #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS > 4 |
182 | #define SHADOW_ALIGN P4D_SIZE |
183 | #else |
184 | #define SHADOW_ALIGN PUD_SIZE |
185 | #endif |
186 | |
187 | /* |
188 | * Return whether 'addr' is aligned to the size covered by a root level |
189 | * descriptor. |
190 | */ |
191 | static bool __init root_level_aligned(u64 addr) |
192 | { |
193 | int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * (PAGE_SHIFT - 3); |
194 | |
195 | return (addr % (PAGE_SIZE << shift)) == 0; |
196 | } |
197 | |
198 | /* The early shadow maps everything to a single page of zeroes */ |
199 | asmlinkage void __init kasan_early_init(void) |
200 | { |
201 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != |
202 | KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); |
203 | BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), SHADOW_ALIGN)); |
204 | BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), SHADOW_ALIGN)); |
205 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, SHADOW_ALIGN)); |
206 | |
207 | if (!root_level_aligned(KASAN_SHADOW_START)) { |
208 | /* |
209 | * The start address is misaligned, and so the next level table |
210 | * will be shared with the linear region. This can happen with |
211 | * 4 or 5 level paging, so install a generic pte_t[] as the |
212 | * next level. This prevents the kasan_pgd_populate call below |
213 | * from inserting an entry that refers to the shared KASAN zero |
214 | * shadow pud_t[]/p4d_t[], which could end up getting corrupted |
215 | * when the linear region is mapped. |
216 | */ |
217 | static pte_t tbl[PTRS_PER_PTE] __page_aligned_bss; |
218 | pgd_t *pgdp = pgd_offset_k(KASAN_SHADOW_START); |
219 | |
220 | set_pgd(pgdp, __pgd(__pa_symbol(tbl) | PGD_TYPE_TABLE)); |
221 | } |
222 | |
223 | kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, |
224 | true); |
225 | } |
226 | |
227 | /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */ |
228 | static void __init kasan_map_populate(unsigned long start, unsigned long end, |
229 | int node) |
230 | { |
231 | kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false); |
232 | } |
233 | |
234 | /* |
235 | * Return the descriptor index of 'addr' in the root level table |
236 | */ |
237 | static int __init root_level_idx(u64 addr) |
238 | { |
239 | /* |
240 | * On 64k pages, the TTBR1 range root tables are extended for 52-bit |
241 | * virtual addressing, and TTBR1 will simply point to the pgd_t entry |
242 | * that covers the start of the 48-bit addressable VA space if LVA is |
243 | * not implemented. This means we need to index the table as usual, |
244 | * instead of masking off bits based on vabits_actual. |
245 | */ |
246 | u64 vabits = IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS |
247 | : vabits_actual; |
248 | int shift = (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * (PAGE_SHIFT - 3); |
249 | |
250 | return (addr & ~_PAGE_OFFSET(vabits)) >> (shift + PAGE_SHIFT); |
251 | } |
252 | |
253 | /* |
254 | * Clone a next level table from swapper_pg_dir into tmp_pg_dir |
255 | */ |
256 | static void __init clone_next_level(u64 addr, pgd_t *tmp_pg_dir, pud_t *pud) |
257 | { |
258 | int idx = root_level_idx(addr); |
259 | pgd_t pgd = READ_ONCE(swapper_pg_dir[idx]); |
260 | pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd)); |
261 | |
262 | memcpy(pud, pudp, PAGE_SIZE); |
263 | tmp_pg_dir[idx] = __pgd(__phys_to_pgd_val(__pa_symbol(pud)) | |
264 | PUD_TYPE_TABLE); |
265 | } |
266 | |
267 | /* |
268 | * Return the descriptor index of 'addr' in the next level table |
269 | */ |
270 | static int __init next_level_idx(u64 addr) |
271 | { |
272 | int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * (PAGE_SHIFT - 3); |
273 | |
274 | return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE; |
275 | } |
276 | |
277 | /* |
278 | * Dereference the table descriptor at 'pgd_idx' and clear the entries from |
279 | * 'start' to 'end' (exclusive) from the table. |
280 | */ |
281 | static void __init clear_next_level(int pgd_idx, int start, int end) |
282 | { |
283 | pgd_t pgd = READ_ONCE(swapper_pg_dir[pgd_idx]); |
284 | pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd)); |
285 | |
286 | memset(&pudp[start], 0, (end - start) * sizeof(pud_t)); |
287 | } |
288 | |
289 | static void __init clear_shadow(u64 start, u64 end) |
290 | { |
291 | int l = root_level_idx(start), m = root_level_idx(end); |
292 | |
293 | if (!root_level_aligned(start)) |
294 | clear_next_level(l++, next_level_idx(start), PTRS_PER_PTE); |
295 | if (!root_level_aligned(end)) |
296 | clear_next_level(m, 0, next_level_idx(end)); |
297 | memset(&swapper_pg_dir[l], 0, (m - l) * sizeof(pgd_t)); |
298 | } |
299 | |
300 | static void __init kasan_init_shadow(void) |
301 | { |
302 | static pud_t pud[2][PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); |
303 | u64 kimg_shadow_start, kimg_shadow_end; |
304 | u64 mod_shadow_start; |
305 | u64 vmalloc_shadow_end; |
306 | phys_addr_t pa_start, pa_end; |
307 | u64 i; |
308 | |
309 | kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK; |
310 | kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END)); |
311 | |
312 | mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); |
313 | |
314 | vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END); |
315 | |
316 | /* |
317 | * We are going to perform proper setup of shadow memory. |
318 | * At first we should unmap early shadow (clear_pgds() call below). |
319 | * However, instrumented code couldn't execute without shadow memory. |
320 | * tmp_pg_dir used to keep early shadow mapped until full shadow |
321 | * setup will be finished. |
322 | */ |
323 | memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); |
324 | |
325 | /* |
326 | * If the start or end address of the shadow region is not aligned to |
327 | * the root level size, we have to allocate a temporary next-level table |
328 | * in each case, clone the next level of descriptors, and install the |
329 | * table into tmp_pg_dir. Note that with 5 levels of paging, the next |
330 | * level will in fact be p4d_t, but that makes no difference in this |
331 | * case. |
332 | */ |
333 | if (!root_level_aligned(KASAN_SHADOW_START)) |
334 | clone_next_level(KASAN_SHADOW_START, tmp_pg_dir, pud[0]); |
335 | if (!root_level_aligned(KASAN_SHADOW_END)) |
336 | clone_next_level(KASAN_SHADOW_END, tmp_pg_dir, pud[1]); |
337 | dsb(ishst); |
338 | cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); |
339 | |
340 | clear_shadow(KASAN_SHADOW_START, KASAN_SHADOW_END); |
341 | |
342 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, |
343 | early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START)))); |
344 | |
345 | kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), |
346 | (void *)mod_shadow_start); |
347 | |
348 | BUILD_BUG_ON(VMALLOC_START != MODULES_END); |
349 | kasan_populate_early_shadow((void *)vmalloc_shadow_end, |
350 | (void *)KASAN_SHADOW_END); |
351 | |
352 | for_each_mem_range(i, &pa_start, &pa_end) { |
353 | void *start = (void *)__phys_to_virt(pa_start); |
354 | void *end = (void *)__phys_to_virt(pa_end); |
355 | |
356 | if (start >= end) |
357 | break; |
358 | |
359 | kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), |
360 | (unsigned long)kasan_mem_to_shadow(end), |
361 | early_pfn_to_nid(virt_to_pfn(start))); |
362 | } |
363 | |
364 | /* |
365 | * KAsan may reuse the contents of kasan_early_shadow_pte directly, |
366 | * so we should make sure that it maps the zero page read-only. |
367 | */ |
368 | for (i = 0; i < PTRS_PER_PTE; i++) |
369 | __set_pte(&kasan_early_shadow_pte[i], |
370 | pfn_pte(sym_to_pfn(kasan_early_shadow_page), |
371 | PAGE_KERNEL_RO)); |
372 | |
373 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); |
374 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
375 | } |
376 | |
377 | static void __init kasan_init_depth(void) |
378 | { |
379 | init_task.kasan_depth = 0; |
380 | } |
381 | |
382 | #ifdef CONFIG_KASAN_VMALLOC |
383 | void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) |
384 | { |
385 | unsigned long shadow_start, shadow_end; |
386 | |
387 | if (!is_vmalloc_or_module_addr(start)) |
388 | return; |
389 | |
390 | shadow_start = (unsigned long)kasan_mem_to_shadow(start); |
391 | shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); |
392 | shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); |
393 | shadow_end = ALIGN(shadow_end, PAGE_SIZE); |
394 | kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); |
395 | } |
396 | #endif |
397 | |
398 | void __init kasan_init(void) |
399 | { |
400 | kasan_init_shadow(); |
401 | kasan_init_depth(); |
402 | #if defined(CONFIG_KASAN_GENERIC) |
403 | /* |
404 | * Generic KASAN is now fully initialized. |
405 | * Software and Hardware Tag-Based modes still require |
406 | * kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly. |
407 | */ |
408 | pr_info("KernelAddressSanitizer initialized (generic)\n" ); |
409 | #endif |
410 | } |
411 | |
412 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
413 | |