1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_MMU_CONTEXT_H |
3 | #define _ASM_X86_MMU_CONTEXT_H |
4 | |
5 | #include <asm/desc.h> |
6 | #include <linux/atomic.h> |
7 | #include <linux/mm_types.h> |
8 | #include <linux/pkeys.h> |
9 | |
10 | #include <trace/events/tlb.h> |
11 | |
12 | #include <asm/tlbflush.h> |
13 | #include <asm/paravirt.h> |
14 | #include <asm/debugreg.h> |
15 | #include <asm/gsseg.h> |
16 | |
17 | extern atomic64_t last_mm_ctx_id; |
18 | |
19 | #ifdef CONFIG_PERF_EVENTS |
20 | DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key); |
21 | DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); |
22 | void cr4_update_pce(void *ignored); |
23 | #endif |
24 | |
25 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
26 | /* |
27 | * ldt_structs can be allocated, used, and freed, but they are never |
28 | * modified while live. |
29 | */ |
30 | struct ldt_struct { |
31 | /* |
32 | * Xen requires page-aligned LDTs with special permissions. This is |
33 | * needed to prevent us from installing evil descriptors such as |
34 | * call gates. On native, we could merge the ldt_struct and LDT |
35 | * allocations, but it's not worth trying to optimize. |
36 | */ |
37 | struct desc_struct *entries; |
38 | unsigned int nr_entries; |
39 | |
40 | /* |
41 | * If PTI is in use, then the entries array is not mapped while we're |
42 | * in user mode. The whole array will be aliased at the addressed |
43 | * given by ldt_slot_va(slot). We use two slots so that we can allocate |
44 | * and map, and enable a new LDT without invalidating the mapping |
45 | * of an older, still-in-use LDT. |
46 | * |
47 | * slot will be -1 if this LDT doesn't have an alias mapping. |
48 | */ |
49 | int slot; |
50 | }; |
51 | |
52 | /* |
53 | * Used for LDT copy/destruction. |
54 | */ |
55 | static inline void init_new_context_ldt(struct mm_struct *mm) |
56 | { |
57 | mm->context.ldt = NULL; |
58 | init_rwsem(&mm->context.ldt_usr_sem); |
59 | } |
60 | int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); |
61 | void destroy_context_ldt(struct mm_struct *mm); |
62 | void ldt_arch_exit_mmap(struct mm_struct *mm); |
63 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
64 | static inline void init_new_context_ldt(struct mm_struct *mm) { } |
65 | static inline int ldt_dup_context(struct mm_struct *oldmm, |
66 | struct mm_struct *mm) |
67 | { |
68 | return 0; |
69 | } |
70 | static inline void destroy_context_ldt(struct mm_struct *mm) { } |
71 | static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } |
72 | #endif |
73 | |
74 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
75 | extern void load_mm_ldt(struct mm_struct *mm); |
76 | extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next); |
77 | #else |
78 | static inline void load_mm_ldt(struct mm_struct *mm) |
79 | { |
80 | clear_LDT(); |
81 | } |
82 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) |
83 | { |
84 | DEBUG_LOCKS_WARN_ON(preemptible()); |
85 | } |
86 | #endif |
87 | |
88 | #ifdef CONFIG_ADDRESS_MASKING |
89 | static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) |
90 | { |
91 | return mm->context.lam_cr3_mask; |
92 | } |
93 | |
94 | static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) |
95 | { |
96 | mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask; |
97 | mm->context.untag_mask = oldmm->context.untag_mask; |
98 | } |
99 | |
100 | #define mm_untag_mask mm_untag_mask |
101 | static inline unsigned long mm_untag_mask(struct mm_struct *mm) |
102 | { |
103 | return mm->context.untag_mask; |
104 | } |
105 | |
106 | static inline void mm_reset_untag_mask(struct mm_struct *mm) |
107 | { |
108 | mm->context.untag_mask = -1UL; |
109 | } |
110 | |
111 | #define arch_pgtable_dma_compat arch_pgtable_dma_compat |
112 | static inline bool arch_pgtable_dma_compat(struct mm_struct *mm) |
113 | { |
114 | return !mm_lam_cr3_mask(mm) || |
115 | test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags); |
116 | } |
117 | #else |
118 | |
119 | static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) |
120 | { |
121 | return 0; |
122 | } |
123 | |
124 | static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) |
125 | { |
126 | } |
127 | |
128 | static inline void mm_reset_untag_mask(struct mm_struct *mm) |
129 | { |
130 | } |
131 | #endif |
132 | |
133 | #define enter_lazy_tlb enter_lazy_tlb |
134 | extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
135 | |
136 | /* |
137 | * Init a new mm. Used on mm copies, like at fork() |
138 | * and on mm's that are brand-new, like at execve(). |
139 | */ |
140 | #define init_new_context init_new_context |
141 | static inline int init_new_context(struct task_struct *tsk, |
142 | struct mm_struct *mm) |
143 | { |
144 | mutex_init(&mm->context.lock); |
145 | |
146 | mm->context.ctx_id = atomic64_inc_return(v: &last_mm_ctx_id); |
147 | atomic64_set(v: &mm->context.tlb_gen, i: 0); |
148 | |
149 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
150 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
151 | /* pkey 0 is the default and allocated implicitly */ |
152 | mm->context.pkey_allocation_map = 0x1; |
153 | /* -1 means unallocated or invalid */ |
154 | mm->context.execute_only_pkey = -1; |
155 | } |
156 | #endif |
157 | mm_reset_untag_mask(mm); |
158 | init_new_context_ldt(mm); |
159 | return 0; |
160 | } |
161 | |
162 | #define destroy_context destroy_context |
163 | static inline void destroy_context(struct mm_struct *mm) |
164 | { |
165 | destroy_context_ldt(mm); |
166 | } |
167 | |
168 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
169 | struct task_struct *tsk); |
170 | |
171 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
172 | struct task_struct *tsk); |
173 | #define switch_mm_irqs_off switch_mm_irqs_off |
174 | |
175 | #define activate_mm(prev, next) \ |
176 | do { \ |
177 | paravirt_enter_mmap(next); \ |
178 | switch_mm((prev), (next), NULL); \ |
179 | } while (0); |
180 | |
181 | #ifdef CONFIG_X86_32 |
182 | #define deactivate_mm(tsk, mm) \ |
183 | do { \ |
184 | loadsegment(gs, 0); \ |
185 | } while (0) |
186 | #else |
187 | #define deactivate_mm(tsk, mm) \ |
188 | do { \ |
189 | shstk_free(tsk); \ |
190 | load_gs_index(0); \ |
191 | loadsegment(fs, 0); \ |
192 | } while (0) |
193 | #endif |
194 | |
195 | static inline void arch_dup_pkeys(struct mm_struct *oldmm, |
196 | struct mm_struct *mm) |
197 | { |
198 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
199 | if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) |
200 | return; |
201 | |
202 | /* Duplicate the oldmm pkey state in mm: */ |
203 | mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; |
204 | mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; |
205 | #endif |
206 | } |
207 | |
208 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
209 | { |
210 | arch_dup_pkeys(oldmm, mm); |
211 | paravirt_enter_mmap(next: mm); |
212 | dup_lam(oldmm, mm); |
213 | return ldt_dup_context(oldmm, mm); |
214 | } |
215 | |
216 | static inline void arch_exit_mmap(struct mm_struct *mm) |
217 | { |
218 | paravirt_arch_exit_mmap(mm); |
219 | ldt_arch_exit_mmap(mm); |
220 | } |
221 | |
222 | #ifdef CONFIG_X86_64 |
223 | static inline bool is_64bit_mm(struct mm_struct *mm) |
224 | { |
225 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
226 | !test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags); |
227 | } |
228 | #else |
229 | static inline bool is_64bit_mm(struct mm_struct *mm) |
230 | { |
231 | return false; |
232 | } |
233 | #endif |
234 | |
235 | static inline void arch_unmap(struct mm_struct *mm, unsigned long start, |
236 | unsigned long end) |
237 | { |
238 | } |
239 | |
240 | /* |
241 | * We only want to enforce protection keys on the current process |
242 | * because we effectively have no access to PKRU for other |
243 | * processes or any way to tell *which * PKRU in a threaded |
244 | * process we could use. |
245 | * |
246 | * So do not enforce things if the VMA is not from the current |
247 | * mm, or if we are in a kernel thread. |
248 | */ |
249 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
250 | bool write, bool execute, bool foreign) |
251 | { |
252 | /* pkeys never affect instruction fetches */ |
253 | if (execute) |
254 | return true; |
255 | /* allow access if the VMA is not one from this process */ |
256 | if (foreign || vma_is_foreign(vma)) |
257 | return true; |
258 | return __pkru_allows_pkey(pkey: vma_pkey(vma), write); |
259 | } |
260 | |
261 | unsigned long __get_current_cr3_fast(void); |
262 | |
263 | #include <asm-generic/mmu_context.h> |
264 | |
265 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |
266 | |