1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_MMU_H |
3 | #define _ASM_X86_MMU_H |
4 | |
5 | #include <linux/spinlock.h> |
6 | #include <linux/rwsem.h> |
7 | #include <linux/mutex.h> |
8 | #include <linux/atomic.h> |
9 | #include <linux/bits.h> |
10 | |
11 | /* Uprobes on this MM assume 32-bit code */ |
12 | #define MM_CONTEXT_UPROBE_IA32 0 |
13 | /* vsyscall page is accessible on this MM */ |
14 | #define MM_CONTEXT_HAS_VSYSCALL 1 |
15 | /* Do not allow changing LAM mode */ |
16 | #define MM_CONTEXT_LOCK_LAM 2 |
17 | /* Allow LAM and SVA coexisting */ |
18 | #define MM_CONTEXT_FORCE_TAGGED_SVA 3 |
19 | /* Tracks mm_cpumask */ |
20 | #define MM_CONTEXT_NOTRACK 4 |
21 | |
22 | /* |
23 | * x86 has arch-specific MMU state beyond what lives in mm_struct. |
24 | */ |
25 | typedef struct { |
26 | /* |
27 | * ctx_id uniquely identifies this mm_struct. A ctx_id will never |
28 | * be reused, and zero is not a valid ctx_id. |
29 | */ |
30 | u64 ctx_id; |
31 | |
32 | /* |
33 | * Any code that needs to do any sort of TLB flushing for this |
34 | * mm will first make its changes to the page tables, then |
35 | * increment tlb_gen, then flush. This lets the low-level |
36 | * flushing code keep track of what needs flushing. |
37 | * |
38 | * This is not used on Xen PV. |
39 | */ |
40 | atomic64_t tlb_gen; |
41 | |
42 | unsigned long next_trim_cpumask; |
43 | |
44 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
45 | struct rw_semaphore ldt_usr_sem; |
46 | struct ldt_struct *ldt; |
47 | #endif |
48 | |
49 | unsigned long flags; |
50 | |
51 | #ifdef CONFIG_ADDRESS_MASKING |
52 | /* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */ |
53 | unsigned long lam_cr3_mask; |
54 | |
55 | /* Significant bits of the virtual address. Excludes tag bits. */ |
56 | u64 untag_mask; |
57 | #endif |
58 | |
59 | struct mutex lock; |
60 | void __user *vdso; /* vdso base address */ |
61 | const struct vdso_image *vdso_image; /* vdso image in use */ |
62 | |
63 | atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ |
64 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
65 | /* |
66 | * One bit per protection key says whether userspace can |
67 | * use it or not. protected by mmap_lock. |
68 | */ |
69 | u16 pkey_allocation_map; |
70 | s16 execute_only_pkey; |
71 | #endif |
72 | |
73 | #ifdef CONFIG_BROADCAST_TLB_FLUSH |
74 | /* |
75 | * The global ASID will be a non-zero value when the process has |
76 | * the same ASID across all CPUs, allowing it to make use of |
77 | * hardware-assisted remote TLB invalidation like AMD INVLPGB. |
78 | */ |
79 | u16 global_asid; |
80 | |
81 | /* The process is transitioning to a new global ASID number. */ |
82 | bool asid_transition; |
83 | #endif |
84 | } mm_context_t; |
85 | |
86 | #define INIT_MM_CONTEXT(mm) \ |
87 | .context = { \ |
88 | .ctx_id = 1, \ |
89 | .lock = __MUTEX_INITIALIZER(mm.context.lock), \ |
90 | } |
91 | |
92 | void leave_mm(void); |
93 | #define leave_mm leave_mm |
94 | |
95 | #endif /* _ASM_X86_MMU_H */ |
96 | |