1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California |
4 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. |
5 | * Copyright (C) 2020 FORTH-ICS/CARV |
6 | * Nick Kossifidis <mick@ics.forth.gr> |
7 | */ |
8 | |
9 | #include <linux/init.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/memblock.h> |
12 | #include <linux/initrd.h> |
13 | #include <linux/swap.h> |
14 | #include <linux/swiotlb.h> |
15 | #include <linux/sizes.h> |
16 | #include <linux/of_fdt.h> |
17 | #include <linux/of_reserved_mem.h> |
18 | #include <linux/libfdt.h> |
19 | #include <linux/set_memory.h> |
20 | #include <linux/dma-map-ops.h> |
21 | #include <linux/crash_dump.h> |
22 | #include <linux/hugetlb.h> |
23 | #ifdef CONFIG_RELOCATABLE |
24 | #include <linux/elf.h> |
25 | #endif |
26 | #include <linux/kfence.h> |
27 | |
28 | #include <asm/fixmap.h> |
29 | #include <asm/io.h> |
30 | #include <asm/numa.h> |
31 | #include <asm/pgtable.h> |
32 | #include <asm/sections.h> |
33 | #include <asm/soc.h> |
34 | #include <asm/tlbflush.h> |
35 | |
36 | #include "../kernel/head.h" |
37 | |
38 | struct kernel_mapping kernel_map __ro_after_init; |
39 | EXPORT_SYMBOL(kernel_map); |
40 | #ifdef CONFIG_XIP_KERNEL |
41 | #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) |
42 | #endif |
43 | |
44 | #ifdef CONFIG_64BIT |
45 | u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39; |
46 | #else |
47 | u64 satp_mode __ro_after_init = SATP_MODE_32; |
48 | #endif |
49 | EXPORT_SYMBOL(satp_mode); |
50 | |
51 | #ifdef CONFIG_64BIT |
52 | bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); |
53 | bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); |
54 | EXPORT_SYMBOL(pgtable_l4_enabled); |
55 | EXPORT_SYMBOL(pgtable_l5_enabled); |
56 | #endif |
57 | |
58 | phys_addr_t phys_ram_base __ro_after_init; |
59 | EXPORT_SYMBOL(phys_ram_base); |
60 | |
61 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] |
62 | __page_aligned_bss; |
63 | EXPORT_SYMBOL(empty_zero_page); |
64 | |
65 | extern char _start[]; |
66 | void *_dtb_early_va __initdata; |
67 | uintptr_t _dtb_early_pa __initdata; |
68 | |
69 | phys_addr_t dma32_phys_limit __initdata; |
70 | |
71 | static void __init zone_sizes_init(void) |
72 | { |
73 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; |
74 | |
75 | #ifdef CONFIG_ZONE_DMA32 |
76 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); |
77 | #endif |
78 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
79 | |
80 | free_area_init(max_zone_pfn: max_zone_pfns); |
81 | } |
82 | |
83 | #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) |
84 | |
85 | #define LOG2_SZ_1K ilog2(SZ_1K) |
86 | #define LOG2_SZ_1M ilog2(SZ_1M) |
87 | #define LOG2_SZ_1G ilog2(SZ_1G) |
88 | #define LOG2_SZ_1T ilog2(SZ_1T) |
89 | |
90 | static inline void print_mlk(char *name, unsigned long b, unsigned long t) |
91 | { |
92 | pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n" , name, b, t, |
93 | (((t) - (b)) >> LOG2_SZ_1K)); |
94 | } |
95 | |
96 | static inline void print_mlm(char *name, unsigned long b, unsigned long t) |
97 | { |
98 | pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n" , name, b, t, |
99 | (((t) - (b)) >> LOG2_SZ_1M)); |
100 | } |
101 | |
102 | static inline void print_mlg(char *name, unsigned long b, unsigned long t) |
103 | { |
104 | pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n" , name, b, t, |
105 | (((t) - (b)) >> LOG2_SZ_1G)); |
106 | } |
107 | |
108 | #ifdef CONFIG_64BIT |
109 | static inline void print_mlt(char *name, unsigned long b, unsigned long t) |
110 | { |
111 | pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n" , name, b, t, |
112 | (((t) - (b)) >> LOG2_SZ_1T)); |
113 | } |
114 | #else |
115 | #define print_mlt(n, b, t) do {} while (0) |
116 | #endif |
117 | |
118 | static inline void print_ml(char *name, unsigned long b, unsigned long t) |
119 | { |
120 | unsigned long diff = t - b; |
121 | |
122 | if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10) |
123 | print_mlt(name, b, t); |
124 | else if ((diff >> LOG2_SZ_1G) >= 10) |
125 | print_mlg(name, b, t); |
126 | else if ((diff >> LOG2_SZ_1M) >= 10) |
127 | print_mlm(name, b, t); |
128 | else |
129 | print_mlk(name, b, t); |
130 | } |
131 | |
132 | static void __init print_vm_layout(void) |
133 | { |
134 | pr_notice("Virtual kernel memory layout:\n" ); |
135 | print_ml(name: "fixmap" , b: (unsigned long)FIXADDR_START, |
136 | t: (unsigned long)FIXADDR_TOP); |
137 | print_ml("pci io" , (unsigned long)PCI_IO_START, |
138 | (unsigned long)PCI_IO_END); |
139 | print_ml("vmemmap" , (unsigned long)VMEMMAP_START, |
140 | (unsigned long)VMEMMAP_END); |
141 | print_ml(name: "vmalloc" , b: (unsigned long)VMALLOC_START, |
142 | t: (unsigned long)VMALLOC_END); |
143 | #ifdef CONFIG_64BIT |
144 | print_ml(name: "modules" , b: (unsigned long)MODULES_VADDR, |
145 | t: (unsigned long)MODULES_END); |
146 | #endif |
147 | print_ml(name: "lowmem" , b: (unsigned long)PAGE_OFFSET, |
148 | t: (unsigned long)high_memory); |
149 | if (IS_ENABLED(CONFIG_64BIT)) { |
150 | #ifdef CONFIG_KASAN |
151 | print_ml("kasan" , KASAN_SHADOW_START, KASAN_SHADOW_END); |
152 | #endif |
153 | |
154 | print_ml("kernel" , (unsigned long)kernel_map.virt_addr, |
155 | (unsigned long)ADDRESS_SPACE_END); |
156 | } |
157 | } |
158 | #else |
159 | static void print_vm_layout(void) { } |
160 | #endif /* CONFIG_DEBUG_VM */ |
161 | |
162 | void __init mem_init(void) |
163 | { |
164 | #ifdef CONFIG_FLATMEM |
165 | BUG_ON(!mem_map); |
166 | #endif /* CONFIG_FLATMEM */ |
167 | |
168 | swiotlb_init(addressing_limited: max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE); |
169 | memblock_free_all(); |
170 | |
171 | print_vm_layout(); |
172 | } |
173 | |
174 | /* Limit the memory size via mem. */ |
175 | static phys_addr_t memory_limit; |
176 | #ifdef CONFIG_XIP_KERNEL |
177 | #define memory_limit (*(phys_addr_t *)XIP_FIXUP(&memory_limit)) |
178 | #endif /* CONFIG_XIP_KERNEL */ |
179 | |
180 | static int __init early_mem(char *p) |
181 | { |
182 | u64 size; |
183 | |
184 | if (!p) |
185 | return 1; |
186 | |
187 | size = memparse(ptr: p, retptr: &p) & PAGE_MASK; |
188 | memory_limit = min_t(u64, size, memory_limit); |
189 | |
190 | pr_notice("Memory limited to %lldMB\n" , (u64)memory_limit >> 20); |
191 | |
192 | return 0; |
193 | } |
194 | early_param("mem" , early_mem); |
195 | |
196 | static void __init setup_bootmem(void) |
197 | { |
198 | phys_addr_t vmlinux_end = __pa_symbol(&_end); |
199 | phys_addr_t max_mapped_addr; |
200 | phys_addr_t phys_ram_end, vmlinux_start; |
201 | |
202 | if (IS_ENABLED(CONFIG_XIP_KERNEL)) |
203 | vmlinux_start = __pa_symbol(&_sdata); |
204 | else |
205 | vmlinux_start = __pa_symbol(&_start); |
206 | |
207 | memblock_enforce_memory_limit(memory_limit); |
208 | |
209 | /* |
210 | * Make sure we align the reservation on PMD_SIZE since we will |
211 | * map the kernel in the linear mapping as read-only: we do not want |
212 | * any allocation to happen between _end and the next pmd aligned page. |
213 | */ |
214 | if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) |
215 | vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; |
216 | /* |
217 | * Reserve from the start of the kernel to the end of the kernel |
218 | */ |
219 | memblock_reserve(base: vmlinux_start, size: vmlinux_end - vmlinux_start); |
220 | |
221 | phys_ram_end = memblock_end_of_DRAM(); |
222 | |
223 | /* |
224 | * Make sure we align the start of the memory on a PMD boundary so that |
225 | * at worst, we map the linear mapping with PMD mappings. |
226 | */ |
227 | if (!IS_ENABLED(CONFIG_XIP_KERNEL)) |
228 | phys_ram_base = memblock_start_of_DRAM() & PMD_MASK; |
229 | |
230 | /* |
231 | * In 64-bit, any use of __va/__pa before this point is wrong as we |
232 | * did not know the start of DRAM before. |
233 | */ |
234 | if (IS_ENABLED(CONFIG_64BIT)) |
235 | kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; |
236 | |
237 | /* |
238 | * memblock allocator is not aware of the fact that last 4K bytes of |
239 | * the addressable memory can not be mapped because of IS_ERR_VALUE |
240 | * macro. Make sure that last 4k bytes are not usable by memblock |
241 | * if end of dram is equal to maximum addressable memory. For 64-bit |
242 | * kernel, this problem can't happen here as the end of the virtual |
243 | * address space is occupied by the kernel mapping then this check must |
244 | * be done as soon as the kernel mapping base address is determined. |
245 | */ |
246 | if (!IS_ENABLED(CONFIG_64BIT)) { |
247 | max_mapped_addr = __pa(~(ulong)0); |
248 | if (max_mapped_addr == (phys_ram_end - 1)) |
249 | memblock_set_current_limit(limit: max_mapped_addr - 4096); |
250 | } |
251 | |
252 | min_low_pfn = PFN_UP(phys_ram_base); |
253 | max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); |
254 | high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); |
255 | |
256 | dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); |
257 | set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); |
258 | |
259 | reserve_initrd_mem(); |
260 | |
261 | /* |
262 | * No allocation should be done before reserving the memory as defined |
263 | * in the device tree, otherwise the allocation could end up in a |
264 | * reserved region. |
265 | */ |
266 | early_init_fdt_scan_reserved_mem(); |
267 | |
268 | /* |
269 | * If DTB is built in, no need to reserve its memblock. |
270 | * Otherwise, do reserve it but avoid using |
271 | * early_init_fdt_reserve_self() since __pa() does |
272 | * not work for DTB pointers that are fixmap addresses |
273 | */ |
274 | if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) |
275 | memblock_reserve(base: dtb_early_pa, fdt_totalsize(dtb_early_va)); |
276 | |
277 | dma_contiguous_reserve(addr_limit: dma32_phys_limit); |
278 | if (IS_ENABLED(CONFIG_64BIT)) |
279 | hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); |
280 | } |
281 | |
282 | #ifdef CONFIG_MMU |
283 | struct pt_alloc_ops pt_ops __initdata; |
284 | |
285 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
286 | pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
287 | static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; |
288 | |
289 | pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); |
290 | |
291 | #ifdef CONFIG_XIP_KERNEL |
292 | #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops)) |
293 | #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) |
294 | #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) |
295 | #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) |
296 | #endif /* CONFIG_XIP_KERNEL */ |
297 | |
298 | static const pgprot_t protection_map[16] = { |
299 | [VM_NONE] = PAGE_NONE, |
300 | [VM_READ] = PAGE_READ, |
301 | [VM_WRITE] = PAGE_COPY, |
302 | [VM_WRITE | VM_READ] = PAGE_COPY, |
303 | [VM_EXEC] = PAGE_EXEC, |
304 | [VM_EXEC | VM_READ] = PAGE_READ_EXEC, |
305 | [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, |
306 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, |
307 | [VM_SHARED] = PAGE_NONE, |
308 | [VM_SHARED | VM_READ] = PAGE_READ, |
309 | [VM_SHARED | VM_WRITE] = PAGE_SHARED, |
310 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, |
311 | [VM_SHARED | VM_EXEC] = PAGE_EXEC, |
312 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC, |
313 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, |
314 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC |
315 | }; |
316 | DECLARE_VM_GET_PAGE_PROT |
317 | |
318 | void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) |
319 | { |
320 | unsigned long addr = __fix_to_virt(idx); |
321 | pte_t *ptep; |
322 | |
323 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
324 | |
325 | ptep = &fixmap_pte[pte_index(address: addr)]; |
326 | |
327 | if (pgprot_val(prot)) |
328 | set_pte(ptep, pte: pfn_pte(page_nr: phys >> PAGE_SHIFT, pgprot: prot)); |
329 | else |
330 | pte_clear(mm: &init_mm, addr, ptep); |
331 | local_flush_tlb_page(addr); |
332 | } |
333 | |
334 | static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) |
335 | { |
336 | return (pte_t *)((uintptr_t)pa); |
337 | } |
338 | |
339 | static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) |
340 | { |
341 | clear_fixmap(FIX_PTE); |
342 | return (pte_t *)set_fixmap_offset(FIX_PTE, pa); |
343 | } |
344 | |
345 | static inline pte_t *__init get_pte_virt_late(phys_addr_t pa) |
346 | { |
347 | return (pte_t *) __va(pa); |
348 | } |
349 | |
350 | static inline phys_addr_t __init alloc_pte_early(uintptr_t va) |
351 | { |
352 | /* |
353 | * We only create PMD or PGD early mappings so we |
354 | * should never reach here with MMU disabled. |
355 | */ |
356 | BUG(); |
357 | } |
358 | |
359 | static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) |
360 | { |
361 | return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
362 | } |
363 | |
364 | static phys_addr_t __init alloc_pte_late(uintptr_t va) |
365 | { |
366 | struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, order: 0); |
367 | |
368 | BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc)); |
369 | return __pa((pte_t *)ptdesc_address(ptdesc)); |
370 | } |
371 | |
372 | static void __init create_pte_mapping(pte_t *ptep, |
373 | uintptr_t va, phys_addr_t pa, |
374 | phys_addr_t sz, pgprot_t prot) |
375 | { |
376 | uintptr_t pte_idx = pte_index(address: va); |
377 | |
378 | BUG_ON(sz != PAGE_SIZE); |
379 | |
380 | if (pte_none(pte: ptep[pte_idx])) |
381 | ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), pgprot: prot); |
382 | } |
383 | |
384 | #ifndef __PAGETABLE_PMD_FOLDED |
385 | |
386 | static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; |
387 | static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; |
388 | static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); |
389 | |
390 | #ifdef CONFIG_XIP_KERNEL |
391 | #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) |
392 | #define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) |
393 | #define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) |
394 | #endif /* CONFIG_XIP_KERNEL */ |
395 | |
396 | static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss; |
397 | static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss; |
398 | static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); |
399 | |
400 | #ifdef CONFIG_XIP_KERNEL |
401 | #define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d)) |
402 | #define fixmap_p4d ((p4d_t *)XIP_FIXUP(fixmap_p4d)) |
403 | #define early_p4d ((p4d_t *)XIP_FIXUP(early_p4d)) |
404 | #endif /* CONFIG_XIP_KERNEL */ |
405 | |
406 | static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss; |
407 | static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss; |
408 | static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); |
409 | |
410 | #ifdef CONFIG_XIP_KERNEL |
411 | #define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud)) |
412 | #define fixmap_pud ((pud_t *)XIP_FIXUP(fixmap_pud)) |
413 | #define early_pud ((pud_t *)XIP_FIXUP(early_pud)) |
414 | #endif /* CONFIG_XIP_KERNEL */ |
415 | |
416 | static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) |
417 | { |
418 | /* Before MMU is enabled */ |
419 | return (pmd_t *)((uintptr_t)pa); |
420 | } |
421 | |
422 | static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) |
423 | { |
424 | clear_fixmap(FIX_PMD); |
425 | return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); |
426 | } |
427 | |
428 | static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) |
429 | { |
430 | return (pmd_t *) __va(pa); |
431 | } |
432 | |
433 | static phys_addr_t __init alloc_pmd_early(uintptr_t va) |
434 | { |
435 | BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); |
436 | |
437 | return (uintptr_t)early_pmd; |
438 | } |
439 | |
440 | static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) |
441 | { |
442 | return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
443 | } |
444 | |
445 | static phys_addr_t __init alloc_pmd_late(uintptr_t va) |
446 | { |
447 | struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, order: 0); |
448 | |
449 | BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc)); |
450 | return __pa((pmd_t *)ptdesc_address(ptdesc)); |
451 | } |
452 | |
453 | static void __init create_pmd_mapping(pmd_t *pmdp, |
454 | uintptr_t va, phys_addr_t pa, |
455 | phys_addr_t sz, pgprot_t prot) |
456 | { |
457 | pte_t *ptep; |
458 | phys_addr_t pte_phys; |
459 | uintptr_t pmd_idx = pmd_index(address: va); |
460 | |
461 | if (sz == PMD_SIZE) { |
462 | if (pmd_none(pmd: pmdp[pmd_idx])) |
463 | pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), pgprot: prot); |
464 | return; |
465 | } |
466 | |
467 | if (pmd_none(pmd: pmdp[pmd_idx])) { |
468 | pte_phys = pt_ops.alloc_pte(va); |
469 | pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), pgprot: PAGE_TABLE); |
470 | ptep = pt_ops.get_pte_virt(pte_phys); |
471 | memset(ptep, 0, PAGE_SIZE); |
472 | } else { |
473 | pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); |
474 | ptep = pt_ops.get_pte_virt(pte_phys); |
475 | } |
476 | |
477 | create_pte_mapping(ptep, va, pa, sz, prot); |
478 | } |
479 | |
480 | static pud_t *__init get_pud_virt_early(phys_addr_t pa) |
481 | { |
482 | return (pud_t *)((uintptr_t)pa); |
483 | } |
484 | |
485 | static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa) |
486 | { |
487 | clear_fixmap(FIX_PUD); |
488 | return (pud_t *)set_fixmap_offset(FIX_PUD, pa); |
489 | } |
490 | |
491 | static pud_t *__init get_pud_virt_late(phys_addr_t pa) |
492 | { |
493 | return (pud_t *)__va(pa); |
494 | } |
495 | |
496 | static phys_addr_t __init alloc_pud_early(uintptr_t va) |
497 | { |
498 | /* Only one PUD is available for early mapping */ |
499 | BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); |
500 | |
501 | return (uintptr_t)early_pud; |
502 | } |
503 | |
504 | static phys_addr_t __init alloc_pud_fixmap(uintptr_t va) |
505 | { |
506 | return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
507 | } |
508 | |
509 | static phys_addr_t alloc_pud_late(uintptr_t va) |
510 | { |
511 | unsigned long vaddr; |
512 | |
513 | vaddr = __get_free_page(GFP_KERNEL); |
514 | BUG_ON(!vaddr); |
515 | return __pa(vaddr); |
516 | } |
517 | |
518 | static p4d_t *__init get_p4d_virt_early(phys_addr_t pa) |
519 | { |
520 | return (p4d_t *)((uintptr_t)pa); |
521 | } |
522 | |
523 | static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa) |
524 | { |
525 | clear_fixmap(FIX_P4D); |
526 | return (p4d_t *)set_fixmap_offset(FIX_P4D, pa); |
527 | } |
528 | |
529 | static p4d_t *__init get_p4d_virt_late(phys_addr_t pa) |
530 | { |
531 | return (p4d_t *)__va(pa); |
532 | } |
533 | |
534 | static phys_addr_t __init alloc_p4d_early(uintptr_t va) |
535 | { |
536 | /* Only one P4D is available for early mapping */ |
537 | BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); |
538 | |
539 | return (uintptr_t)early_p4d; |
540 | } |
541 | |
542 | static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va) |
543 | { |
544 | return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
545 | } |
546 | |
547 | static phys_addr_t alloc_p4d_late(uintptr_t va) |
548 | { |
549 | unsigned long vaddr; |
550 | |
551 | vaddr = __get_free_page(GFP_KERNEL); |
552 | BUG_ON(!vaddr); |
553 | return __pa(vaddr); |
554 | } |
555 | |
556 | static void __init create_pud_mapping(pud_t *pudp, |
557 | uintptr_t va, phys_addr_t pa, |
558 | phys_addr_t sz, pgprot_t prot) |
559 | { |
560 | pmd_t *nextp; |
561 | phys_addr_t next_phys; |
562 | uintptr_t pud_index = pud_index(va); |
563 | |
564 | if (sz == PUD_SIZE) { |
565 | if (pud_val(pud: pudp[pud_index]) == 0) |
566 | pudp[pud_index] = pfn_pud(PFN_DOWN(pa), pgprot: prot); |
567 | return; |
568 | } |
569 | |
570 | if (pud_val(pud: pudp[pud_index]) == 0) { |
571 | next_phys = pt_ops.alloc_pmd(va); |
572 | pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), pgprot: PAGE_TABLE); |
573 | nextp = pt_ops.get_pmd_virt(next_phys); |
574 | memset(nextp, 0, PAGE_SIZE); |
575 | } else { |
576 | next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index])); |
577 | nextp = pt_ops.get_pmd_virt(next_phys); |
578 | } |
579 | |
580 | create_pmd_mapping(pmdp: nextp, va, pa, sz, prot); |
581 | } |
582 | |
583 | static void __init create_p4d_mapping(p4d_t *p4dp, |
584 | uintptr_t va, phys_addr_t pa, |
585 | phys_addr_t sz, pgprot_t prot) |
586 | { |
587 | pud_t *nextp; |
588 | phys_addr_t next_phys; |
589 | uintptr_t p4d_index = p4d_index(va); |
590 | |
591 | if (sz == P4D_SIZE) { |
592 | if (p4d_val(p4d: p4dp[p4d_index]) == 0) |
593 | p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot); |
594 | return; |
595 | } |
596 | |
597 | if (p4d_val(p4d: p4dp[p4d_index]) == 0) { |
598 | next_phys = pt_ops.alloc_pud(va); |
599 | p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE); |
600 | nextp = pt_ops.get_pud_virt(next_phys); |
601 | memset(nextp, 0, PAGE_SIZE); |
602 | } else { |
603 | next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index])); |
604 | nextp = pt_ops.get_pud_virt(next_phys); |
605 | } |
606 | |
607 | create_pud_mapping(pudp: nextp, va, pa, sz, prot); |
608 | } |
609 | |
610 | #define pgd_next_t p4d_t |
611 | #define alloc_pgd_next(__va) (pgtable_l5_enabled ? \ |
612 | pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ? \ |
613 | pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))) |
614 | #define get_pgd_next_virt(__pa) (pgtable_l5_enabled ? \ |
615 | pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ? \ |
616 | pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa))) |
617 | #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ |
618 | (pgtable_l5_enabled ? \ |
619 | create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \ |
620 | (pgtable_l4_enabled ? \ |
621 | create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) : \ |
622 | create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot))) |
623 | #define fixmap_pgd_next (pgtable_l5_enabled ? \ |
624 | (uintptr_t)fixmap_p4d : (pgtable_l4_enabled ? \ |
625 | (uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd)) |
626 | #define trampoline_pgd_next (pgtable_l5_enabled ? \ |
627 | (uintptr_t)trampoline_p4d : (pgtable_l4_enabled ? \ |
628 | (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)) |
629 | #else |
630 | #define pgd_next_t pte_t |
631 | #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) |
632 | #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) |
633 | #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ |
634 | create_pte_mapping(__nextp, __va, __pa, __sz, __prot) |
635 | #define fixmap_pgd_next ((uintptr_t)fixmap_pte) |
636 | #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) |
637 | #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) |
638 | #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) |
639 | #endif /* __PAGETABLE_PMD_FOLDED */ |
640 | |
641 | void __init create_pgd_mapping(pgd_t *pgdp, |
642 | uintptr_t va, phys_addr_t pa, |
643 | phys_addr_t sz, pgprot_t prot) |
644 | { |
645 | pgd_next_t *nextp; |
646 | phys_addr_t next_phys; |
647 | uintptr_t pgd_idx = pgd_index(va); |
648 | |
649 | if (sz == PGDIR_SIZE) { |
650 | if (pgd_val(pgd: pgdp[pgd_idx]) == 0) |
651 | pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot); |
652 | return; |
653 | } |
654 | |
655 | if (pgd_val(pgd: pgdp[pgd_idx]) == 0) { |
656 | next_phys = alloc_pgd_next(va); |
657 | pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE); |
658 | nextp = get_pgd_next_virt(next_phys); |
659 | memset(nextp, 0, PAGE_SIZE); |
660 | } else { |
661 | next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx])); |
662 | nextp = get_pgd_next_virt(next_phys); |
663 | } |
664 | |
665 | create_pgd_next_mapping(nextp, va, pa, sz, prot); |
666 | } |
667 | |
668 | static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va, |
669 | phys_addr_t size) |
670 | { |
671 | if (pgtable_l5_enabled && |
672 | !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) |
673 | return P4D_SIZE; |
674 | |
675 | if (pgtable_l4_enabled && |
676 | !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) |
677 | return PUD_SIZE; |
678 | |
679 | if (IS_ENABLED(CONFIG_64BIT) && |
680 | !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) |
681 | return PMD_SIZE; |
682 | |
683 | return PAGE_SIZE; |
684 | } |
685 | |
686 | #ifdef CONFIG_XIP_KERNEL |
687 | #define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base)) |
688 | extern char _xiprom[], _exiprom[], __data_loc; |
689 | |
690 | /* called from head.S with MMU off */ |
691 | asmlinkage void __init __copy_data(void) |
692 | { |
693 | void *from = (void *)(&__data_loc); |
694 | void *to = (void *)CONFIG_PHYS_RAM_BASE; |
695 | size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); |
696 | |
697 | memcpy(to, from, sz); |
698 | } |
699 | #endif |
700 | |
701 | #ifdef CONFIG_STRICT_KERNEL_RWX |
702 | static __init pgprot_t pgprot_from_va(uintptr_t va) |
703 | { |
704 | if (is_va_kernel_text(va)) |
705 | return PAGE_KERNEL_READ_EXEC; |
706 | |
707 | /* |
708 | * In 64-bit kernel, the kernel mapping is outside the linear mapping so |
709 | * we must protect its linear mapping alias from being executed and |
710 | * written. |
711 | * And rodata section is marked readonly in mark_rodata_ro. |
712 | */ |
713 | if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) |
714 | return PAGE_KERNEL_READ; |
715 | |
716 | return PAGE_KERNEL; |
717 | } |
718 | |
719 | void mark_rodata_ro(void) |
720 | { |
721 | set_kernel_memory(__start_rodata, _data, set_memory_ro); |
722 | if (IS_ENABLED(CONFIG_64BIT)) |
723 | set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), |
724 | set_memory_ro); |
725 | } |
726 | #else |
727 | static __init pgprot_t pgprot_from_va(uintptr_t va) |
728 | { |
729 | if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) |
730 | return PAGE_KERNEL; |
731 | |
732 | return PAGE_KERNEL_EXEC; |
733 | } |
734 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
735 | |
736 | #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) |
737 | u64 __pi_set_satp_mode_from_cmdline(uintptr_t dtb_pa); |
738 | |
739 | static void __init disable_pgtable_l5(void) |
740 | { |
741 | pgtable_l5_enabled = false; |
742 | kernel_map.page_offset = PAGE_OFFSET_L4; |
743 | satp_mode = SATP_MODE_48; |
744 | } |
745 | |
746 | static void __init disable_pgtable_l4(void) |
747 | { |
748 | pgtable_l4_enabled = false; |
749 | kernel_map.page_offset = PAGE_OFFSET_L3; |
750 | satp_mode = SATP_MODE_39; |
751 | } |
752 | |
753 | static int __init print_no4lvl(char *p) |
754 | { |
755 | pr_info("Disabled 4-level and 5-level paging" ); |
756 | return 0; |
757 | } |
758 | early_param("no4lvl" , print_no4lvl); |
759 | |
760 | static int __init print_no5lvl(char *p) |
761 | { |
762 | pr_info("Disabled 5-level paging" ); |
763 | return 0; |
764 | } |
765 | early_param("no5lvl" , print_no5lvl); |
766 | |
767 | static void __init set_mmap_rnd_bits_max(void) |
768 | { |
769 | mmap_rnd_bits_max = MMAP_VA_BITS - PAGE_SHIFT - 3; |
770 | } |
771 | |
772 | /* |
773 | * There is a simple way to determine if 4-level is supported by the |
774 | * underlying hardware: establish 1:1 mapping in 4-level page table mode |
775 | * then read SATP to see if the configuration was taken into account |
776 | * meaning sv48 is supported. |
777 | */ |
778 | static __init void set_satp_mode(uintptr_t dtb_pa) |
779 | { |
780 | u64 identity_satp, hw_satp; |
781 | uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; |
782 | u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa); |
783 | |
784 | if (satp_mode_cmdline == SATP_MODE_57) { |
785 | disable_pgtable_l5(); |
786 | } else if (satp_mode_cmdline == SATP_MODE_48) { |
787 | disable_pgtable_l5(); |
788 | disable_pgtable_l4(); |
789 | return; |
790 | } |
791 | |
792 | create_p4d_mapping(early_p4d, |
793 | set_satp_mode_pmd, (uintptr_t)early_pud, |
794 | P4D_SIZE, PAGE_TABLE); |
795 | create_pud_mapping(early_pud, |
796 | set_satp_mode_pmd, (uintptr_t)early_pmd, |
797 | PUD_SIZE, PAGE_TABLE); |
798 | /* Handle the case where set_satp_mode straddles 2 PMDs */ |
799 | create_pmd_mapping(pmdp: early_pmd, |
800 | va: set_satp_mode_pmd, pa: set_satp_mode_pmd, |
801 | PMD_SIZE, PAGE_KERNEL_EXEC); |
802 | create_pmd_mapping(pmdp: early_pmd, |
803 | va: set_satp_mode_pmd + PMD_SIZE, |
804 | pa: set_satp_mode_pmd + PMD_SIZE, |
805 | PMD_SIZE, PAGE_KERNEL_EXEC); |
806 | retry: |
807 | create_pgd_mapping(early_pg_dir, |
808 | set_satp_mode_pmd, |
809 | pgtable_l5_enabled ? |
810 | (uintptr_t)early_p4d : (uintptr_t)early_pud, |
811 | PGDIR_SIZE, PAGE_TABLE); |
812 | |
813 | identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode; |
814 | |
815 | local_flush_tlb_all(); |
816 | csr_write(CSR_SATP, identity_satp); |
817 | hw_satp = csr_swap(CSR_SATP, 0ULL); |
818 | local_flush_tlb_all(); |
819 | |
820 | if (hw_satp != identity_satp) { |
821 | if (pgtable_l5_enabled) { |
822 | disable_pgtable_l5(); |
823 | memset(early_pg_dir, 0, PAGE_SIZE); |
824 | goto retry; |
825 | } |
826 | disable_pgtable_l4(); |
827 | } |
828 | |
829 | memset(early_pg_dir, 0, PAGE_SIZE); |
830 | memset(early_p4d, 0, PAGE_SIZE); |
831 | memset(early_pud, 0, PAGE_SIZE); |
832 | memset(early_pmd, 0, PAGE_SIZE); |
833 | } |
834 | #endif |
835 | |
836 | /* |
837 | * setup_vm() is called from head.S with MMU-off. |
838 | * |
839 | * Following requirements should be honoured for setup_vm() to work |
840 | * correctly: |
841 | * 1) It should use PC-relative addressing for accessing kernel symbols. |
842 | * To achieve this we always use GCC cmodel=medany. |
843 | * 2) The compiler instrumentation for FTRACE will not work for setup_vm() |
844 | * so disable compiler instrumentation when FTRACE is enabled. |
845 | * |
846 | * Currently, the above requirements are honoured by using custom CFLAGS |
847 | * for init.o in mm/Makefile. |
848 | */ |
849 | |
850 | #ifndef __riscv_cmodel_medany |
851 | #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." |
852 | #endif |
853 | |
854 | #ifdef CONFIG_RELOCATABLE |
855 | extern unsigned long __rela_dyn_start, __rela_dyn_end; |
856 | |
857 | static void __init relocate_kernel(void) |
858 | { |
859 | Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start; |
860 | /* |
861 | * This holds the offset between the linked virtual address and the |
862 | * relocated virtual address. |
863 | */ |
864 | uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; |
865 | /* |
866 | * This holds the offset between kernel linked virtual address and |
867 | * physical address. |
868 | */ |
869 | uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; |
870 | |
871 | for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) { |
872 | Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); |
873 | Elf64_Addr relocated_addr = rela->r_addend; |
874 | |
875 | if (rela->r_info != R_RISCV_RELATIVE) |
876 | continue; |
877 | |
878 | /* |
879 | * Make sure to not relocate vdso symbols like rt_sigreturn |
880 | * which are linked from the address 0 in vmlinux since |
881 | * vdso symbol addresses are actually used as an offset from |
882 | * mm->context.vdso in VDSO_OFFSET macro. |
883 | */ |
884 | if (relocated_addr >= KERNEL_LINK_ADDR) |
885 | relocated_addr += reloc_offset; |
886 | |
887 | *(Elf64_Addr *)addr = relocated_addr; |
888 | } |
889 | } |
890 | #endif /* CONFIG_RELOCATABLE */ |
891 | |
892 | #ifdef CONFIG_XIP_KERNEL |
893 | static void __init create_kernel_page_table(pgd_t *pgdir, |
894 | __always_unused bool early) |
895 | { |
896 | uintptr_t va, end_va; |
897 | |
898 | /* Map the flash resident part */ |
899 | end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; |
900 | for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) |
901 | create_pgd_mapping(pgdir, va, |
902 | kernel_map.xiprom + (va - kernel_map.virt_addr), |
903 | PMD_SIZE, PAGE_KERNEL_EXEC); |
904 | |
905 | /* Map the data in RAM */ |
906 | end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; |
907 | for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) |
908 | create_pgd_mapping(pgdir, va, |
909 | kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), |
910 | PMD_SIZE, PAGE_KERNEL); |
911 | } |
912 | #else |
913 | static void __init create_kernel_page_table(pgd_t *pgdir, bool early) |
914 | { |
915 | uintptr_t va, end_va; |
916 | |
917 | end_va = kernel_map.virt_addr + kernel_map.size; |
918 | for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) |
919 | create_pgd_mapping(pgdp: pgdir, va, |
920 | pa: kernel_map.phys_addr + (va - kernel_map.virt_addr), |
921 | PMD_SIZE, |
922 | prot: early ? |
923 | PAGE_KERNEL_EXEC : pgprot_from_va(va)); |
924 | } |
925 | #endif |
926 | |
927 | /* |
928 | * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel, |
929 | * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR |
930 | * entry. |
931 | */ |
932 | static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, |
933 | uintptr_t dtb_pa) |
934 | { |
935 | #ifndef CONFIG_BUILTIN_DTB |
936 | uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); |
937 | |
938 | /* Make sure the fdt fixmap address is always aligned on PMD size */ |
939 | BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); |
940 | |
941 | /* In 32-bit only, the fdt lies in its own PGD */ |
942 | if (!IS_ENABLED(CONFIG_64BIT)) { |
943 | create_pgd_mapping(early_pg_dir, fix_fdt_va, |
944 | pa, MAX_FDT_SIZE, PAGE_KERNEL); |
945 | } else { |
946 | create_pmd_mapping(pmdp: fixmap_pmd, va: fix_fdt_va, |
947 | pa, PMD_SIZE, PAGE_KERNEL); |
948 | create_pmd_mapping(pmdp: fixmap_pmd, va: fix_fdt_va + PMD_SIZE, |
949 | pa: pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); |
950 | } |
951 | |
952 | dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); |
953 | #else |
954 | /* |
955 | * For 64-bit kernel, __va can't be used since it would return a linear |
956 | * mapping address whereas dtb_early_va will be used before |
957 | * setup_vm_final installs the linear mapping. For 32-bit kernel, as the |
958 | * kernel is mapped in the linear mapping, that makes no difference. |
959 | */ |
960 | dtb_early_va = kernel_mapping_pa_to_va(dtb_pa); |
961 | #endif |
962 | |
963 | dtb_early_pa = dtb_pa; |
964 | } |
965 | |
966 | /* |
967 | * MMU is not enabled, the page tables are allocated directly using |
968 | * early_pmd/pud/p4d and the address returned is the physical one. |
969 | */ |
970 | static void __init pt_ops_set_early(void) |
971 | { |
972 | pt_ops.alloc_pte = alloc_pte_early; |
973 | pt_ops.get_pte_virt = get_pte_virt_early; |
974 | #ifndef __PAGETABLE_PMD_FOLDED |
975 | pt_ops.alloc_pmd = alloc_pmd_early; |
976 | pt_ops.get_pmd_virt = get_pmd_virt_early; |
977 | pt_ops.alloc_pud = alloc_pud_early; |
978 | pt_ops.get_pud_virt = get_pud_virt_early; |
979 | pt_ops.alloc_p4d = alloc_p4d_early; |
980 | pt_ops.get_p4d_virt = get_p4d_virt_early; |
981 | #endif |
982 | } |
983 | |
984 | /* |
985 | * MMU is enabled but page table setup is not complete yet. |
986 | * fixmap page table alloc functions must be used as a means to temporarily |
987 | * map the allocated physical pages since the linear mapping does not exist yet. |
988 | * |
989 | * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va, |
990 | * but it will be used as described above. |
991 | */ |
992 | static void __init pt_ops_set_fixmap(void) |
993 | { |
994 | pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap); |
995 | pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap); |
996 | #ifndef __PAGETABLE_PMD_FOLDED |
997 | pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap); |
998 | pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap); |
999 | pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap); |
1000 | pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap); |
1001 | pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap); |
1002 | pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap); |
1003 | #endif |
1004 | } |
1005 | |
1006 | /* |
1007 | * MMU is enabled and page table setup is complete, so from now, we can use |
1008 | * generic page allocation functions to setup page table. |
1009 | */ |
1010 | static void __init pt_ops_set_late(void) |
1011 | { |
1012 | pt_ops.alloc_pte = alloc_pte_late; |
1013 | pt_ops.get_pte_virt = get_pte_virt_late; |
1014 | #ifndef __PAGETABLE_PMD_FOLDED |
1015 | pt_ops.alloc_pmd = alloc_pmd_late; |
1016 | pt_ops.get_pmd_virt = get_pmd_virt_late; |
1017 | pt_ops.alloc_pud = alloc_pud_late; |
1018 | pt_ops.get_pud_virt = get_pud_virt_late; |
1019 | pt_ops.alloc_p4d = alloc_p4d_late; |
1020 | pt_ops.get_p4d_virt = get_p4d_virt_late; |
1021 | #endif |
1022 | } |
1023 | |
1024 | #ifdef CONFIG_RANDOMIZE_BASE |
1025 | extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); |
1026 | extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); |
1027 | |
1028 | static int __init print_nokaslr(char *p) |
1029 | { |
1030 | pr_info("Disabled KASLR" ); |
1031 | return 0; |
1032 | } |
1033 | early_param("nokaslr" , print_nokaslr); |
1034 | |
1035 | unsigned long kaslr_offset(void) |
1036 | { |
1037 | return kernel_map.virt_offset; |
1038 | } |
1039 | #endif |
1040 | |
1041 | asmlinkage void __init setup_vm(uintptr_t dtb_pa) |
1042 | { |
1043 | pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd; |
1044 | |
1045 | #ifdef CONFIG_RANDOMIZE_BASE |
1046 | if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { |
1047 | u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); |
1048 | u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); |
1049 | u32 nr_pos; |
1050 | |
1051 | /* |
1052 | * Compute the number of positions available: we are limited |
1053 | * by the early page table that only has one PUD and we must |
1054 | * be aligned on PMD_SIZE. |
1055 | */ |
1056 | nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; |
1057 | |
1058 | kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE; |
1059 | } |
1060 | #endif |
1061 | |
1062 | kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset; |
1063 | |
1064 | #ifdef CONFIG_XIP_KERNEL |
1065 | #ifdef CONFIG_64BIT |
1066 | kernel_map.page_offset = PAGE_OFFSET_L3; |
1067 | #else |
1068 | kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); |
1069 | #endif |
1070 | kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; |
1071 | kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); |
1072 | |
1073 | phys_ram_base = CONFIG_PHYS_RAM_BASE; |
1074 | kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; |
1075 | kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); |
1076 | |
1077 | kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; |
1078 | #else |
1079 | kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); |
1080 | kernel_map.phys_addr = (uintptr_t)(&_start); |
1081 | kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; |
1082 | #endif |
1083 | |
1084 | #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) |
1085 | set_satp_mode(dtb_pa); |
1086 | set_mmap_rnd_bits_max(); |
1087 | #endif |
1088 | |
1089 | /* |
1090 | * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem, |
1091 | * where we have the system memory layout: this allows us to align |
1092 | * the physical and virtual mappings and then make use of PUD/P4D/PGD |
1093 | * for the linear mapping. This is only possible because the kernel |
1094 | * mapping lies outside the linear mapping. |
1095 | * In 32-bit however, as the kernel resides in the linear mapping, |
1096 | * setup_vm_final can not change the mapping established here, |
1097 | * otherwise the same kernel addresses would get mapped to different |
1098 | * physical addresses (if the start of dram is different from the |
1099 | * kernel physical address start). |
1100 | */ |
1101 | kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? |
1102 | 0UL : PAGE_OFFSET - kernel_map.phys_addr; |
1103 | kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; |
1104 | |
1105 | /* |
1106 | * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit |
1107 | * kernel, whereas for 64-bit kernel, the end of the virtual address |
1108 | * space is occupied by the modules/BPF/kernel mappings which reduces |
1109 | * the available size of the linear mapping. |
1110 | */ |
1111 | memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0); |
1112 | |
1113 | /* Sanity check alignment and size */ |
1114 | BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); |
1115 | BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0); |
1116 | |
1117 | #ifdef CONFIG_64BIT |
1118 | /* |
1119 | * The last 4K bytes of the addressable memory can not be mapped because |
1120 | * of IS_ERR_VALUE macro. |
1121 | */ |
1122 | BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); |
1123 | #endif |
1124 | |
1125 | #ifdef CONFIG_RELOCATABLE |
1126 | /* |
1127 | * Early page table uses only one PUD, which makes it possible |
1128 | * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset |
1129 | * makes the kernel cross over a PUD_SIZE boundary, raise a bug |
1130 | * since a part of the kernel would not get mapped. |
1131 | */ |
1132 | BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); |
1133 | relocate_kernel(); |
1134 | #endif |
1135 | |
1136 | apply_early_boot_alternatives(); |
1137 | pt_ops_set_early(); |
1138 | |
1139 | /* Setup early PGD for fixmap */ |
1140 | create_pgd_mapping(early_pg_dir, FIXADDR_START, |
1141 | fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); |
1142 | |
1143 | #ifndef __PAGETABLE_PMD_FOLDED |
1144 | /* Setup fixmap P4D and PUD */ |
1145 | if (pgtable_l5_enabled) |
1146 | create_p4d_mapping(fixmap_p4d, FIXADDR_START, |
1147 | (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE); |
1148 | /* Setup fixmap PUD and PMD */ |
1149 | if (pgtable_l4_enabled) |
1150 | create_pud_mapping(fixmap_pud, FIXADDR_START, |
1151 | (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE); |
1152 | create_pmd_mapping(fixmap_pmd, FIXADDR_START, |
1153 | (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); |
1154 | /* Setup trampoline PGD and PMD */ |
1155 | create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, |
1156 | trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE); |
1157 | if (pgtable_l5_enabled) |
1158 | create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr, |
1159 | (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE); |
1160 | if (pgtable_l4_enabled) |
1161 | create_pud_mapping(trampoline_pud, kernel_map.virt_addr, |
1162 | (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); |
1163 | #ifdef CONFIG_XIP_KERNEL |
1164 | create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, |
1165 | kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); |
1166 | #else |
1167 | create_pmd_mapping(pmdp: trampoline_pmd, va: kernel_map.virt_addr, |
1168 | pa: kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); |
1169 | #endif |
1170 | #else |
1171 | /* Setup trampoline PGD */ |
1172 | create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, |
1173 | kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); |
1174 | #endif |
1175 | |
1176 | /* |
1177 | * Setup early PGD covering entire kernel which will allow |
1178 | * us to reach paging_init(). We map all memory banks later |
1179 | * in setup_vm_final() below. |
1180 | */ |
1181 | create_kernel_page_table(pgdir: early_pg_dir, early: true); |
1182 | |
1183 | /* Setup early mapping for FDT early scan */ |
1184 | create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa); |
1185 | |
1186 | /* |
1187 | * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap |
1188 | * range can not span multiple pmds. |
1189 | */ |
1190 | BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) |
1191 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); |
1192 | |
1193 | #ifndef __PAGETABLE_PMD_FOLDED |
1194 | /* |
1195 | * Early ioremap fixmap is already created as it lies within first 2MB |
1196 | * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END |
1197 | * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn |
1198 | * the user if not. |
1199 | */ |
1200 | fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; |
1201 | fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; |
1202 | if (pmd_val(pmd: fix_bmap_spmd) != pmd_val(pmd: fix_bmap_epmd)) { |
1203 | WARN_ON(1); |
1204 | pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n" , |
1205 | pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); |
1206 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n" , |
1207 | fix_to_virt(FIX_BTMAP_BEGIN)); |
1208 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n" , |
1209 | fix_to_virt(FIX_BTMAP_END)); |
1210 | |
1211 | pr_warn("FIX_BTMAP_END: %d\n" , FIX_BTMAP_END); |
1212 | pr_warn("FIX_BTMAP_BEGIN: %d\n" , FIX_BTMAP_BEGIN); |
1213 | } |
1214 | #endif |
1215 | |
1216 | pt_ops_set_fixmap(); |
1217 | } |
1218 | |
1219 | static void __init create_linear_mapping_range(phys_addr_t start, |
1220 | phys_addr_t end, |
1221 | uintptr_t fixed_map_size) |
1222 | { |
1223 | phys_addr_t pa; |
1224 | uintptr_t va, map_size; |
1225 | |
1226 | for (pa = start; pa < end; pa += map_size) { |
1227 | va = (uintptr_t)__va(pa); |
1228 | map_size = fixed_map_size ? fixed_map_size : |
1229 | best_map_size(pa, va, size: end - pa); |
1230 | |
1231 | create_pgd_mapping(swapper_pg_dir, va, pa, sz: map_size, |
1232 | prot: pgprot_from_va(va)); |
1233 | } |
1234 | } |
1235 | |
1236 | static void __init create_linear_mapping_page_table(void) |
1237 | { |
1238 | phys_addr_t start, end; |
1239 | phys_addr_t kfence_pool __maybe_unused; |
1240 | u64 i; |
1241 | |
1242 | #ifdef CONFIG_STRICT_KERNEL_RWX |
1243 | phys_addr_t ktext_start = __pa_symbol(_start); |
1244 | phys_addr_t ktext_size = __init_data_begin - _start; |
1245 | phys_addr_t krodata_start = __pa_symbol(__start_rodata); |
1246 | phys_addr_t krodata_size = _data - __start_rodata; |
1247 | |
1248 | /* Isolate kernel text and rodata so they don't get mapped with a PUD */ |
1249 | memblock_mark_nomap(base: ktext_start, size: ktext_size); |
1250 | memblock_mark_nomap(base: krodata_start, size: krodata_size); |
1251 | #endif |
1252 | |
1253 | #ifdef CONFIG_KFENCE |
1254 | /* |
1255 | * kfence pool must be backed by PAGE_SIZE mappings, so allocate it |
1256 | * before we setup the linear mapping so that we avoid using hugepages |
1257 | * for this region. |
1258 | */ |
1259 | kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); |
1260 | BUG_ON(!kfence_pool); |
1261 | |
1262 | memblock_mark_nomap(base: kfence_pool, KFENCE_POOL_SIZE); |
1263 | __kfence_pool = __va(kfence_pool); |
1264 | #endif |
1265 | |
1266 | /* Map all memory banks in the linear mapping */ |
1267 | for_each_mem_range(i, &start, &end) { |
1268 | if (start >= end) |
1269 | break; |
1270 | if (start <= __pa(PAGE_OFFSET) && |
1271 | __pa(PAGE_OFFSET) < end) |
1272 | start = __pa(PAGE_OFFSET); |
1273 | if (end >= __pa(PAGE_OFFSET) + memory_limit) |
1274 | end = __pa(PAGE_OFFSET) + memory_limit; |
1275 | |
1276 | create_linear_mapping_range(start, end, fixed_map_size: 0); |
1277 | } |
1278 | |
1279 | #ifdef CONFIG_STRICT_KERNEL_RWX |
1280 | create_linear_mapping_range(start: ktext_start, end: ktext_start + ktext_size, fixed_map_size: 0); |
1281 | create_linear_mapping_range(start: krodata_start, |
1282 | end: krodata_start + krodata_size, fixed_map_size: 0); |
1283 | |
1284 | memblock_clear_nomap(base: ktext_start, size: ktext_size); |
1285 | memblock_clear_nomap(base: krodata_start, size: krodata_size); |
1286 | #endif |
1287 | |
1288 | #ifdef CONFIG_KFENCE |
1289 | create_linear_mapping_range(start: kfence_pool, |
1290 | end: kfence_pool + KFENCE_POOL_SIZE, |
1291 | PAGE_SIZE); |
1292 | |
1293 | memblock_clear_nomap(base: kfence_pool, KFENCE_POOL_SIZE); |
1294 | #endif |
1295 | } |
1296 | |
1297 | static void __init setup_vm_final(void) |
1298 | { |
1299 | /* Setup swapper PGD for fixmap */ |
1300 | #if !defined(CONFIG_64BIT) |
1301 | /* |
1302 | * In 32-bit, the device tree lies in a pgd entry, so it must be copied |
1303 | * directly in swapper_pg_dir in addition to the pgd entry that points |
1304 | * to fixmap_pte. |
1305 | */ |
1306 | unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT)); |
1307 | |
1308 | set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]); |
1309 | #endif |
1310 | create_pgd_mapping(swapper_pg_dir, FIXADDR_START, |
1311 | __pa_symbol(fixmap_pgd_next), |
1312 | PGDIR_SIZE, PAGE_TABLE); |
1313 | |
1314 | /* Map the linear mapping */ |
1315 | create_linear_mapping_page_table(); |
1316 | |
1317 | /* Map the kernel */ |
1318 | if (IS_ENABLED(CONFIG_64BIT)) |
1319 | create_kernel_page_table(swapper_pg_dir, early: false); |
1320 | |
1321 | #ifdef CONFIG_KASAN |
1322 | kasan_swapper_init(); |
1323 | #endif |
1324 | |
1325 | /* Clear fixmap PTE and PMD mappings */ |
1326 | clear_fixmap(FIX_PTE); |
1327 | clear_fixmap(FIX_PMD); |
1328 | clear_fixmap(FIX_PUD); |
1329 | clear_fixmap(FIX_P4D); |
1330 | |
1331 | /* Move to swapper page table */ |
1332 | csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode); |
1333 | local_flush_tlb_all(); |
1334 | |
1335 | pt_ops_set_late(); |
1336 | } |
1337 | #else |
1338 | asmlinkage void __init setup_vm(uintptr_t dtb_pa) |
1339 | { |
1340 | dtb_early_va = (void *)dtb_pa; |
1341 | dtb_early_pa = dtb_pa; |
1342 | } |
1343 | |
1344 | static inline void setup_vm_final(void) |
1345 | { |
1346 | } |
1347 | #endif /* CONFIG_MMU */ |
1348 | |
1349 | /* |
1350 | * reserve_crashkernel() - reserves memory for crash kernel |
1351 | * |
1352 | * This function reserves memory area given in "crashkernel=" kernel command |
1353 | * line parameter. The memory reserved is used by dump capture kernel when |
1354 | * primary kernel is crashing. |
1355 | */ |
1356 | static void __init arch_reserve_crashkernel(void) |
1357 | { |
1358 | unsigned long long low_size = 0; |
1359 | unsigned long long crash_base, crash_size; |
1360 | char *cmdline = boot_command_line; |
1361 | bool high = false; |
1362 | int ret; |
1363 | |
1364 | if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) |
1365 | return; |
1366 | |
1367 | ret = parse_crashkernel(cmdline, system_ram: memblock_phys_mem_size(), |
1368 | crash_size: &crash_size, crash_base: &crash_base, |
1369 | low_size: &low_size, high: &high); |
1370 | if (ret) |
1371 | return; |
1372 | |
1373 | reserve_crashkernel_generic(cmdline, crash_size, crash_base, |
1374 | crash_low_size: low_size, high); |
1375 | } |
1376 | |
1377 | void __init paging_init(void) |
1378 | { |
1379 | setup_bootmem(); |
1380 | setup_vm_final(); |
1381 | |
1382 | /* Depend on that Linear Mapping is ready */ |
1383 | memblock_allow_resize(); |
1384 | } |
1385 | |
1386 | void __init misc_mem_init(void) |
1387 | { |
1388 | early_memtest(start: min_low_pfn << PAGE_SHIFT, end: max_low_pfn << PAGE_SHIFT); |
1389 | arch_numa_init(); |
1390 | sparse_init(); |
1391 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1392 | /* The entire VMEMMAP region has been populated. Flush TLB for this region */ |
1393 | local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END); |
1394 | #endif |
1395 | zone_sizes_init(); |
1396 | arch_reserve_crashkernel(); |
1397 | memblock_dump_all(); |
1398 | } |
1399 | |
1400 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1401 | void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, |
1402 | unsigned long addr, unsigned long next) |
1403 | { |
1404 | pmd_set_huge(pmd, virt_to_phys(address: p), PAGE_KERNEL); |
1405 | } |
1406 | |
1407 | int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, |
1408 | unsigned long addr, unsigned long next) |
1409 | { |
1410 | vmemmap_verify((pte_t *)pmdp, node, addr, next); |
1411 | return 1; |
1412 | } |
1413 | |
1414 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
1415 | struct vmem_altmap *altmap) |
1416 | { |
1417 | /* |
1418 | * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we |
1419 | * can't use hugepage mappings for 2-level page table because in case of |
1420 | * memory hotplug, we are not able to update all the page tables with |
1421 | * the new PMDs. |
1422 | */ |
1423 | return vmemmap_populate_hugepages(start, end, node, NULL); |
1424 | } |
1425 | #endif |
1426 | |
1427 | #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) |
1428 | /* |
1429 | * Pre-allocates page-table pages for a specific area in the kernel |
1430 | * page-table. Only the level which needs to be synchronized between |
1431 | * all page-tables is allocated because the synchronization can be |
1432 | * expensive. |
1433 | */ |
1434 | static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, |
1435 | const char *area) |
1436 | { |
1437 | unsigned long addr; |
1438 | const char *lvl; |
1439 | |
1440 | for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
1441 | pgd_t *pgd = pgd_offset_k(addr); |
1442 | p4d_t *p4d; |
1443 | pud_t *pud; |
1444 | pmd_t *pmd; |
1445 | |
1446 | lvl = "p4d" ; |
1447 | p4d = p4d_alloc(mm: &init_mm, pgd, address: addr); |
1448 | if (!p4d) |
1449 | goto failed; |
1450 | |
1451 | if (pgtable_l5_enabled) |
1452 | continue; |
1453 | |
1454 | lvl = "pud" ; |
1455 | pud = pud_alloc(mm: &init_mm, p4d, address: addr); |
1456 | if (!pud) |
1457 | goto failed; |
1458 | |
1459 | if (pgtable_l4_enabled) |
1460 | continue; |
1461 | |
1462 | lvl = "pmd" ; |
1463 | pmd = pmd_alloc(mm: &init_mm, pud, address: addr); |
1464 | if (!pmd) |
1465 | goto failed; |
1466 | } |
1467 | return; |
1468 | |
1469 | failed: |
1470 | /* |
1471 | * The pages have to be there now or they will be missing in |
1472 | * process page-tables later. |
1473 | */ |
1474 | panic(fmt: "Failed to pre-allocate %s pages for %s area\n" , lvl, area); |
1475 | } |
1476 | |
1477 | void __init pgtable_cache_init(void) |
1478 | { |
1479 | preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, area: "vmalloc" ); |
1480 | if (IS_ENABLED(CONFIG_MODULES)) |
1481 | preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, area: "bpf/modules" ); |
1482 | } |
1483 | #endif |
1484 | |