1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PGALLOC_H |
3 | #define _ASM_X86_PGALLOC_H |
4 | |
5 | #include <linux/threads.h> |
6 | #include <linux/mm.h> /* for struct page */ |
7 | #include <linux/pagemap.h> |
8 | |
9 | #include <asm/cpufeature.h> |
10 | |
11 | #define __HAVE_ARCH_PTE_ALLOC_ONE |
12 | #define __HAVE_ARCH_PGD_FREE |
13 | #include <asm-generic/pgalloc.h> |
14 | |
15 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } |
16 | |
17 | #ifdef CONFIG_PARAVIRT_XXL |
18 | #include <asm/paravirt.h> |
19 | #else |
20 | #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) |
21 | static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} |
22 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} |
23 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} |
24 | static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, |
25 | unsigned long start, unsigned long count) {} |
26 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} |
27 | static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} |
28 | static inline void paravirt_release_pte(unsigned long pfn) {} |
29 | static inline void paravirt_release_pmd(unsigned long pfn) {} |
30 | static inline void paravirt_release_pud(unsigned long pfn) {} |
31 | static inline void paravirt_release_p4d(unsigned long pfn) {} |
32 | #endif |
33 | |
34 | /* |
35 | * In case of Page Table Isolation active, we acquire two PGDs instead of one. |
36 | * Being order-1, it is both 8k in size and 8k-aligned. That lets us just |
37 | * flip bit 12 in a pointer to swap between the two 4k halves. |
38 | */ |
39 | static inline unsigned int pgd_allocation_order(void) |
40 | { |
41 | if (cpu_feature_enabled(X86_FEATURE_PTI)) |
42 | return 1; |
43 | return 0; |
44 | } |
45 | |
46 | /* |
47 | * Allocate and free page tables. |
48 | */ |
49 | extern pgd_t *pgd_alloc(struct mm_struct *); |
50 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
51 | |
52 | extern pgtable_t pte_alloc_one(struct mm_struct *); |
53 | |
54 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
55 | |
56 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, |
57 | unsigned long address) |
58 | { |
59 | ___pte_free_tlb(tlb, pte); |
60 | } |
61 | |
62 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
63 | pmd_t *pmd, pte_t *pte) |
64 | { |
65 | paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); |
66 | set_pmd(pmdp: pmd, pmd: __pmd(__pa(pte) | _PAGE_TABLE)); |
67 | } |
68 | |
69 | static inline void pmd_populate_kernel_safe(struct mm_struct *mm, |
70 | pmd_t *pmd, pte_t *pte) |
71 | { |
72 | paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); |
73 | set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
74 | } |
75 | |
76 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
77 | struct page *pte) |
78 | { |
79 | unsigned long pfn = page_to_pfn(pte); |
80 | |
81 | paravirt_alloc_pte(mm, pfn); |
82 | set_pmd(pmdp: pmd, pmd: __pmd(val: ((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); |
83 | } |
84 | |
85 | #if CONFIG_PGTABLE_LEVELS > 2 |
86 | extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); |
87 | |
88 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
89 | unsigned long address) |
90 | { |
91 | ___pmd_free_tlb(tlb, pmd); |
92 | } |
93 | |
94 | #ifdef CONFIG_X86_PAE |
95 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); |
96 | #else /* !CONFIG_X86_PAE */ |
97 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
98 | { |
99 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
100 | set_pud(pudp: pud, pud: __pud(_PAGE_TABLE | __pa(pmd))); |
101 | } |
102 | |
103 | static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
104 | { |
105 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
106 | set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd))); |
107 | } |
108 | #endif /* CONFIG_X86_PAE */ |
109 | |
110 | #if CONFIG_PGTABLE_LEVELS > 3 |
111 | static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) |
112 | { |
113 | paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); |
114 | set_p4d(p4dp: p4d, p4d: __p4d(_PAGE_TABLE | __pa(pud))); |
115 | } |
116 | |
117 | static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) |
118 | { |
119 | paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); |
120 | set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); |
121 | } |
122 | |
123 | extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); |
124 | |
125 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
126 | unsigned long address) |
127 | { |
128 | ___pud_free_tlb(tlb, pud); |
129 | } |
130 | |
131 | #if CONFIG_PGTABLE_LEVELS > 4 |
132 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) |
133 | { |
134 | if (!pgtable_l5_enabled()) |
135 | return; |
136 | paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); |
137 | set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); |
138 | } |
139 | |
140 | static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) |
141 | { |
142 | if (!pgtable_l5_enabled()) |
143 | return; |
144 | paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); |
145 | set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); |
146 | } |
147 | |
148 | extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); |
149 | |
150 | static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, |
151 | unsigned long address) |
152 | { |
153 | if (pgtable_l5_enabled()) |
154 | ___p4d_free_tlb(tlb, p4d); |
155 | } |
156 | |
157 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ |
158 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
159 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
160 | |
161 | #endif /* _ASM_X86_PGALLOC_H */ |
162 | |