1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PGTABLE_64_DEFS_H |
3 | #define _ASM_X86_PGTABLE_64_DEFS_H |
4 | |
5 | #include <asm/sparsemem.h> |
6 | |
7 | #ifndef __ASSEMBLER__ |
8 | #include <linux/types.h> |
9 | #include <asm/kaslr.h> |
10 | |
11 | /* |
12 | * These are used to make use of C type-checking.. |
13 | */ |
14 | typedef unsigned long pteval_t; |
15 | typedef unsigned long pmdval_t; |
16 | typedef unsigned long pudval_t; |
17 | typedef unsigned long p4dval_t; |
18 | typedef unsigned long pgdval_t; |
19 | typedef unsigned long pgprotval_t; |
20 | |
21 | typedef struct { pteval_t pte; } pte_t; |
22 | typedef struct { pmdval_t pmd; } pmd_t; |
23 | |
24 | extern unsigned int __pgtable_l5_enabled; |
25 | |
26 | #ifdef USE_EARLY_PGTABLE_L5 |
27 | /* |
28 | * cpu_feature_enabled() is not available in early boot code. |
29 | * Use variable instead. |
30 | */ |
31 | static inline bool pgtable_l5_enabled(void) |
32 | { |
33 | return __pgtable_l5_enabled; |
34 | } |
35 | #else |
36 | #define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57) |
37 | #endif /* USE_EARLY_PGTABLE_L5 */ |
38 | |
39 | extern unsigned int pgdir_shift; |
40 | extern unsigned int ptrs_per_p4d; |
41 | |
42 | #endif /* !__ASSEMBLER__ */ |
43 | |
44 | /* |
45 | * PGDIR_SHIFT determines what a top-level page table entry can map |
46 | */ |
47 | #define PGDIR_SHIFT pgdir_shift |
48 | #define PTRS_PER_PGD 512 |
49 | |
50 | /* |
51 | * 4th level page in 5-level paging case |
52 | */ |
53 | #define P4D_SHIFT 39 |
54 | #define MAX_PTRS_PER_P4D 512 |
55 | #define PTRS_PER_P4D ptrs_per_p4d |
56 | #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) |
57 | #define P4D_MASK (~(P4D_SIZE - 1)) |
58 | |
59 | #define MAX_POSSIBLE_PHYSMEM_BITS 52 |
60 | |
61 | /* |
62 | * 3rd level page |
63 | */ |
64 | #define PUD_SHIFT 30 |
65 | #define PTRS_PER_PUD 512 |
66 | |
67 | /* |
68 | * PMD_SHIFT determines the size of the area a middle-level |
69 | * page table can map |
70 | */ |
71 | #define PMD_SHIFT 21 |
72 | #define PTRS_PER_PMD 512 |
73 | |
74 | /* |
75 | * entries per page directory level |
76 | */ |
77 | #define PTRS_PER_PTE 512 |
78 | |
79 | #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) |
80 | #define PMD_MASK (~(PMD_SIZE - 1)) |
81 | #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) |
82 | #define PUD_MASK (~(PUD_SIZE - 1)) |
83 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) |
84 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
85 | |
86 | /* |
87 | * See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map. |
88 | * |
89 | * Be very careful vs. KASLR when changing anything here. The KASLR address |
90 | * range must not overlap with anything except the KASAN shadow area, which |
91 | * is correct as KASAN disables KASLR. |
92 | */ |
93 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) |
94 | |
95 | #define GUARD_HOLE_PGD_ENTRY -256UL |
96 | #define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT) |
97 | #define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT) |
98 | #define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE) |
99 | |
100 | #define LDT_PGD_ENTRY -240UL |
101 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) |
102 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) |
103 | |
104 | #define __VMALLOC_BASE_L4 0xffffc90000000000UL |
105 | #define __VMALLOC_BASE_L5 0xffa0000000000000UL |
106 | |
107 | #define VMALLOC_SIZE_TB_L4 32UL |
108 | #define VMALLOC_SIZE_TB_L5 12800UL |
109 | |
110 | #define __VMEMMAP_BASE_L4 0xffffea0000000000UL |
111 | #define __VMEMMAP_BASE_L5 0xffd4000000000000UL |
112 | |
113 | # define VMALLOC_START vmalloc_base |
114 | # define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) |
115 | # define VMEMMAP_START vmemmap_base |
116 | |
117 | #ifdef CONFIG_RANDOMIZE_MEMORY |
118 | # define DIRECT_MAP_PHYSMEM_END direct_map_physmem_end |
119 | #endif |
120 | |
121 | /* |
122 | * End of the region for which vmalloc page tables are pre-allocated. |
123 | * For non-KMSAN builds, this is the same as VMALLOC_END. |
124 | * For KMSAN builds, VMALLOC_START..VMEMORY_END is 4 times bigger than |
125 | * VMALLOC_START..VMALLOC_END (see below). |
126 | */ |
127 | #define VMEMORY_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1) |
128 | |
129 | #ifndef CONFIG_KMSAN |
130 | #define VMALLOC_END VMEMORY_END |
131 | #else |
132 | /* |
133 | * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4 |
134 | * are used to keep the metadata for virtual pages. The memory formerly |
135 | * belonging to vmalloc area is now laid out as follows: |
136 | * |
137 | * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area |
138 | * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to |
139 | * VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow |
140 | * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to |
141 | * VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins |
142 | * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START |
143 | * - shadow for modules, |
144 | * KMSAN_MODULES_ORIGIN_START to |
145 | * KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules. |
146 | */ |
147 | #define VMALLOC_QUARTER_SIZE ((VMALLOC_SIZE_TB << 40) >> 2) |
148 | #define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1) |
149 | |
150 | /* |
151 | * vmalloc metadata addresses are calculated by adding shadow/origin offsets |
152 | * to vmalloc address. |
153 | */ |
154 | #define KMSAN_VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE |
155 | #define KMSAN_VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE << 1) |
156 | |
157 | #define KMSAN_VMALLOC_SHADOW_START (VMALLOC_START + KMSAN_VMALLOC_SHADOW_OFFSET) |
158 | #define KMSAN_VMALLOC_ORIGIN_START (VMALLOC_START + KMSAN_VMALLOC_ORIGIN_OFFSET) |
159 | |
160 | /* |
161 | * The shadow/origin for modules are placed one by one in the last 1/4 of |
162 | * vmalloc space. |
163 | */ |
164 | #define KMSAN_MODULES_SHADOW_START (VMALLOC_END + KMSAN_VMALLOC_ORIGIN_OFFSET + 1) |
165 | #define KMSAN_MODULES_ORIGIN_START (KMSAN_MODULES_SHADOW_START + MODULES_LEN) |
166 | #endif /* CONFIG_KMSAN */ |
167 | |
168 | #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) |
169 | /* The module sections ends with the start of the fixmap */ |
170 | #ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP |
171 | # define MODULES_END _AC(0xffffffffff000000, UL) |
172 | #else |
173 | # define MODULES_END _AC(0xfffffffffe000000, UL) |
174 | #endif |
175 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
176 | |
177 | #define ESPFIX_PGD_ENTRY _AC(-2, UL) |
178 | #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) |
179 | |
180 | #define CPU_ENTRY_AREA_PGD _AC(-4, UL) |
181 | #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) |
182 | |
183 | #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) |
184 | #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) |
185 | |
186 | #define EARLY_DYNAMIC_PAGE_TABLES 64 |
187 | |
188 | #define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t)) |
189 | |
190 | /* |
191 | * We borrow bit 3 to remember PG_anon_exclusive. |
192 | */ |
193 | #define _PAGE_SWP_EXCLUSIVE _PAGE_PWT |
194 | |
195 | #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ |
196 | |