1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * vmx.h: VMX Architecture related definitions |
4 | * Copyright (c) 2004, Intel Corporation. |
5 | * |
6 | * A few random additions are: |
7 | * Copyright (C) 2006 Qumranet |
8 | * Avi Kivity <avi@qumranet.com> |
9 | * Yaniv Kamay <yaniv@qumranet.com> |
10 | */ |
11 | #ifndef VMX_H |
12 | #define VMX_H |
13 | |
14 | |
15 | #include <linux/bitops.h> |
16 | #include <linux/bug.h> |
17 | #include <linux/types.h> |
18 | |
19 | #include <uapi/asm/vmx.h> |
20 | #include <asm/trapnr.h> |
21 | #include <asm/vmxfeatures.h> |
22 | |
23 | #define VMCS_CONTROL_BIT(x) BIT(VMX_FEATURE_##x & 0x1f) |
24 | |
25 | /* |
26 | * Definitions of Primary Processor-Based VM-Execution Controls. |
27 | */ |
28 | #define CPU_BASED_INTR_WINDOW_EXITING VMCS_CONTROL_BIT(INTR_WINDOW_EXITING) |
29 | #define CPU_BASED_USE_TSC_OFFSETTING VMCS_CONTROL_BIT(USE_TSC_OFFSETTING) |
30 | #define CPU_BASED_HLT_EXITING VMCS_CONTROL_BIT(HLT_EXITING) |
31 | #define CPU_BASED_INVLPG_EXITING VMCS_CONTROL_BIT(INVLPG_EXITING) |
32 | #define CPU_BASED_MWAIT_EXITING VMCS_CONTROL_BIT(MWAIT_EXITING) |
33 | #define CPU_BASED_RDPMC_EXITING VMCS_CONTROL_BIT(RDPMC_EXITING) |
34 | #define CPU_BASED_RDTSC_EXITING VMCS_CONTROL_BIT(RDTSC_EXITING) |
35 | #define CPU_BASED_CR3_LOAD_EXITING VMCS_CONTROL_BIT(CR3_LOAD_EXITING) |
36 | #define CPU_BASED_CR3_STORE_EXITING VMCS_CONTROL_BIT(CR3_STORE_EXITING) |
37 | #define CPU_BASED_ACTIVATE_TERTIARY_CONTROLS VMCS_CONTROL_BIT(TERTIARY_CONTROLS) |
38 | #define CPU_BASED_CR8_LOAD_EXITING VMCS_CONTROL_BIT(CR8_LOAD_EXITING) |
39 | #define CPU_BASED_CR8_STORE_EXITING VMCS_CONTROL_BIT(CR8_STORE_EXITING) |
40 | #define CPU_BASED_TPR_SHADOW VMCS_CONTROL_BIT(VIRTUAL_TPR) |
41 | #define CPU_BASED_NMI_WINDOW_EXITING VMCS_CONTROL_BIT(NMI_WINDOW_EXITING) |
42 | #define CPU_BASED_MOV_DR_EXITING VMCS_CONTROL_BIT(MOV_DR_EXITING) |
43 | #define CPU_BASED_UNCOND_IO_EXITING VMCS_CONTROL_BIT(UNCOND_IO_EXITING) |
44 | #define CPU_BASED_USE_IO_BITMAPS VMCS_CONTROL_BIT(USE_IO_BITMAPS) |
45 | #define CPU_BASED_MONITOR_TRAP_FLAG VMCS_CONTROL_BIT(MONITOR_TRAP_FLAG) |
46 | #define CPU_BASED_USE_MSR_BITMAPS VMCS_CONTROL_BIT(USE_MSR_BITMAPS) |
47 | #define CPU_BASED_MONITOR_EXITING VMCS_CONTROL_BIT(MONITOR_EXITING) |
48 | #define CPU_BASED_PAUSE_EXITING VMCS_CONTROL_BIT(PAUSE_EXITING) |
49 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS VMCS_CONTROL_BIT(SEC_CONTROLS) |
50 | |
51 | #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 |
52 | |
53 | /* |
54 | * Definitions of Secondary Processor-Based VM-Execution Controls. |
55 | */ |
56 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES VMCS_CONTROL_BIT(VIRT_APIC_ACCESSES) |
57 | #define SECONDARY_EXEC_ENABLE_EPT VMCS_CONTROL_BIT(EPT) |
58 | #define SECONDARY_EXEC_DESC VMCS_CONTROL_BIT(DESC_EXITING) |
59 | #define SECONDARY_EXEC_ENABLE_RDTSCP VMCS_CONTROL_BIT(RDTSCP) |
60 | #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE VMCS_CONTROL_BIT(VIRTUAL_X2APIC) |
61 | #define SECONDARY_EXEC_ENABLE_VPID VMCS_CONTROL_BIT(VPID) |
62 | #define SECONDARY_EXEC_WBINVD_EXITING VMCS_CONTROL_BIT(WBINVD_EXITING) |
63 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST VMCS_CONTROL_BIT(UNRESTRICTED_GUEST) |
64 | #define SECONDARY_EXEC_APIC_REGISTER_VIRT VMCS_CONTROL_BIT(APIC_REGISTER_VIRT) |
65 | #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY VMCS_CONTROL_BIT(VIRT_INTR_DELIVERY) |
66 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING VMCS_CONTROL_BIT(PAUSE_LOOP_EXITING) |
67 | #define SECONDARY_EXEC_RDRAND_EXITING VMCS_CONTROL_BIT(RDRAND_EXITING) |
68 | #define SECONDARY_EXEC_ENABLE_INVPCID VMCS_CONTROL_BIT(INVPCID) |
69 | #define SECONDARY_EXEC_ENABLE_VMFUNC VMCS_CONTROL_BIT(VMFUNC) |
70 | #define SECONDARY_EXEC_SHADOW_VMCS VMCS_CONTROL_BIT(SHADOW_VMCS) |
71 | #define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING) |
72 | #define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING) |
73 | #define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING) |
74 | #define SECONDARY_EXEC_EPT_VIOLATION_VE VMCS_CONTROL_BIT(EPT_VIOLATION_VE) |
75 | #define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX) |
76 | #define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES) |
77 | #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC) |
78 | #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) |
79 | #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) |
80 | #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) |
81 | #define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION) |
82 | #define SECONDARY_EXEC_NOTIFY_VM_EXITING VMCS_CONTROL_BIT(NOTIFY_VM_EXITING) |
83 | |
84 | /* |
85 | * Definitions of Tertiary Processor-Based VM-Execution Controls. |
86 | */ |
87 | #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) |
88 | |
89 | #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) |
90 | #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) |
91 | #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) |
92 | #define PIN_BASED_VMX_PREEMPTION_TIMER VMCS_CONTROL_BIT(PREEMPTION_TIMER) |
93 | #define PIN_BASED_POSTED_INTR VMCS_CONTROL_BIT(POSTED_INTR) |
94 | |
95 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 |
96 | |
97 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 |
98 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 |
99 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 |
100 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 |
101 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 |
102 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 |
103 | #define VM_EXIT_SAVE_IA32_EFER 0x00100000 |
104 | #define VM_EXIT_LOAD_IA32_EFER 0x00200000 |
105 | #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 |
106 | #define VM_EXIT_CLEAR_BNDCFGS 0x00800000 |
107 | #define VM_EXIT_PT_CONCEAL_PIP 0x01000000 |
108 | #define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 |
109 | |
110 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff |
111 | |
112 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 |
113 | #define VM_ENTRY_IA32E_MODE 0x00000200 |
114 | #define VM_ENTRY_SMM 0x00000400 |
115 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 |
116 | #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 |
117 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 |
118 | #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 |
119 | #define VM_ENTRY_LOAD_BNDCFGS 0x00010000 |
120 | #define VM_ENTRY_PT_CONCEAL_PIP 0x00020000 |
121 | #define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 |
122 | |
123 | #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff |
124 | |
125 | /* VMFUNC functions */ |
126 | #define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28) |
127 | |
128 | #define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING) |
129 | #define VMFUNC_EPTP_ENTRIES 512 |
130 | |
131 | #define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48) |
132 | #define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49) |
133 | #define VMX_BASIC_INOUT BIT_ULL(54) |
134 | #define VMX_BASIC_TRUE_CTLS BIT_ULL(55) |
135 | |
136 | static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic) |
137 | { |
138 | return vmx_basic & GENMASK_ULL(30, 0); |
139 | } |
140 | |
141 | static inline u32 vmx_basic_vmcs_size(u64 vmx_basic) |
142 | { |
143 | return (vmx_basic & GENMASK_ULL(44, 32)) >> 32; |
144 | } |
145 | |
146 | static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic) |
147 | { |
148 | return (vmx_basic & GENMASK_ULL(53, 50)) >> 50; |
149 | } |
150 | |
151 | static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype) |
152 | { |
153 | return revision | ((u64)size << 32) | ((u64)memtype << 50); |
154 | } |
155 | |
156 | #define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5) |
157 | #define VMX_MISC_ACTIVITY_HLT BIT_ULL(6) |
158 | #define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7) |
159 | #define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8) |
160 | #define VMX_MISC_INTEL_PT BIT_ULL(14) |
161 | #define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15) |
162 | #define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28) |
163 | #define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29) |
164 | #define VMX_MISC_ZERO_LEN_INS BIT_ULL(30) |
165 | #define VMX_MISC_MSR_LIST_MULTIPLIER 512 |
166 | |
167 | static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc) |
168 | { |
169 | return vmx_misc & GENMASK_ULL(4, 0); |
170 | } |
171 | |
172 | static inline int vmx_misc_cr3_count(u64 vmx_misc) |
173 | { |
174 | return (vmx_misc & GENMASK_ULL(24, 16)) >> 16; |
175 | } |
176 | |
177 | static inline int vmx_misc_max_msr(u64 vmx_misc) |
178 | { |
179 | return (vmx_misc & GENMASK_ULL(27, 25)) >> 25; |
180 | } |
181 | |
182 | static inline int vmx_misc_mseg_revid(u64 vmx_misc) |
183 | { |
184 | return (vmx_misc & GENMASK_ULL(63, 32)) >> 32; |
185 | } |
186 | |
187 | /* VMCS Encodings */ |
188 | enum vmcs_field { |
189 | VIRTUAL_PROCESSOR_ID = 0x00000000, |
190 | POSTED_INTR_NV = 0x00000002, |
191 | LAST_PID_POINTER_INDEX = 0x00000008, |
192 | GUEST_ES_SELECTOR = 0x00000800, |
193 | GUEST_CS_SELECTOR = 0x00000802, |
194 | GUEST_SS_SELECTOR = 0x00000804, |
195 | GUEST_DS_SELECTOR = 0x00000806, |
196 | GUEST_FS_SELECTOR = 0x00000808, |
197 | GUEST_GS_SELECTOR = 0x0000080a, |
198 | GUEST_LDTR_SELECTOR = 0x0000080c, |
199 | GUEST_TR_SELECTOR = 0x0000080e, |
200 | GUEST_INTR_STATUS = 0x00000810, |
201 | GUEST_PML_INDEX = 0x00000812, |
202 | HOST_ES_SELECTOR = 0x00000c00, |
203 | HOST_CS_SELECTOR = 0x00000c02, |
204 | HOST_SS_SELECTOR = 0x00000c04, |
205 | HOST_DS_SELECTOR = 0x00000c06, |
206 | HOST_FS_SELECTOR = 0x00000c08, |
207 | HOST_GS_SELECTOR = 0x00000c0a, |
208 | HOST_TR_SELECTOR = 0x00000c0c, |
209 | IO_BITMAP_A = 0x00002000, |
210 | IO_BITMAP_A_HIGH = 0x00002001, |
211 | IO_BITMAP_B = 0x00002002, |
212 | IO_BITMAP_B_HIGH = 0x00002003, |
213 | MSR_BITMAP = 0x00002004, |
214 | MSR_BITMAP_HIGH = 0x00002005, |
215 | VM_EXIT_MSR_STORE_ADDR = 0x00002006, |
216 | VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, |
217 | VM_EXIT_MSR_LOAD_ADDR = 0x00002008, |
218 | VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, |
219 | VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, |
220 | VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, |
221 | PML_ADDRESS = 0x0000200e, |
222 | PML_ADDRESS_HIGH = 0x0000200f, |
223 | TSC_OFFSET = 0x00002010, |
224 | TSC_OFFSET_HIGH = 0x00002011, |
225 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, |
226 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, |
227 | APIC_ACCESS_ADDR = 0x00002014, |
228 | APIC_ACCESS_ADDR_HIGH = 0x00002015, |
229 | POSTED_INTR_DESC_ADDR = 0x00002016, |
230 | POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, |
231 | VM_FUNCTION_CONTROL = 0x00002018, |
232 | VM_FUNCTION_CONTROL_HIGH = 0x00002019, |
233 | EPT_POINTER = 0x0000201a, |
234 | EPT_POINTER_HIGH = 0x0000201b, |
235 | EOI_EXIT_BITMAP0 = 0x0000201c, |
236 | EOI_EXIT_BITMAP0_HIGH = 0x0000201d, |
237 | EOI_EXIT_BITMAP1 = 0x0000201e, |
238 | EOI_EXIT_BITMAP1_HIGH = 0x0000201f, |
239 | EOI_EXIT_BITMAP2 = 0x00002020, |
240 | EOI_EXIT_BITMAP2_HIGH = 0x00002021, |
241 | EOI_EXIT_BITMAP3 = 0x00002022, |
242 | EOI_EXIT_BITMAP3_HIGH = 0x00002023, |
243 | EPTP_LIST_ADDRESS = 0x00002024, |
244 | EPTP_LIST_ADDRESS_HIGH = 0x00002025, |
245 | VMREAD_BITMAP = 0x00002026, |
246 | VMREAD_BITMAP_HIGH = 0x00002027, |
247 | VMWRITE_BITMAP = 0x00002028, |
248 | VMWRITE_BITMAP_HIGH = 0x00002029, |
249 | VE_INFORMATION_ADDRESS = 0x0000202A, |
250 | VE_INFORMATION_ADDRESS_HIGH = 0x0000202B, |
251 | XSS_EXIT_BITMAP = 0x0000202C, |
252 | XSS_EXIT_BITMAP_HIGH = 0x0000202D, |
253 | ENCLS_EXITING_BITMAP = 0x0000202E, |
254 | ENCLS_EXITING_BITMAP_HIGH = 0x0000202F, |
255 | TSC_MULTIPLIER = 0x00002032, |
256 | TSC_MULTIPLIER_HIGH = 0x00002033, |
257 | TERTIARY_VM_EXEC_CONTROL = 0x00002034, |
258 | TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, |
259 | SHARED_EPT_POINTER = 0x0000203C, |
260 | PID_POINTER_TABLE = 0x00002042, |
261 | PID_POINTER_TABLE_HIGH = 0x00002043, |
262 | GUEST_PHYSICAL_ADDRESS = 0x00002400, |
263 | GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, |
264 | VMCS_LINK_POINTER = 0x00002800, |
265 | VMCS_LINK_POINTER_HIGH = 0x00002801, |
266 | GUEST_IA32_DEBUGCTL = 0x00002802, |
267 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, |
268 | GUEST_IA32_PAT = 0x00002804, |
269 | GUEST_IA32_PAT_HIGH = 0x00002805, |
270 | GUEST_IA32_EFER = 0x00002806, |
271 | GUEST_IA32_EFER_HIGH = 0x00002807, |
272 | GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, |
273 | GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, |
274 | GUEST_PDPTR0 = 0x0000280a, |
275 | GUEST_PDPTR0_HIGH = 0x0000280b, |
276 | GUEST_PDPTR1 = 0x0000280c, |
277 | GUEST_PDPTR1_HIGH = 0x0000280d, |
278 | GUEST_PDPTR2 = 0x0000280e, |
279 | GUEST_PDPTR2_HIGH = 0x0000280f, |
280 | GUEST_PDPTR3 = 0x00002810, |
281 | GUEST_PDPTR3_HIGH = 0x00002811, |
282 | GUEST_BNDCFGS = 0x00002812, |
283 | GUEST_BNDCFGS_HIGH = 0x00002813, |
284 | GUEST_IA32_RTIT_CTL = 0x00002814, |
285 | GUEST_IA32_RTIT_CTL_HIGH = 0x00002815, |
286 | HOST_IA32_PAT = 0x00002c00, |
287 | HOST_IA32_PAT_HIGH = 0x00002c01, |
288 | HOST_IA32_EFER = 0x00002c02, |
289 | HOST_IA32_EFER_HIGH = 0x00002c03, |
290 | HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, |
291 | HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, |
292 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, |
293 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, |
294 | EXCEPTION_BITMAP = 0x00004004, |
295 | PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, |
296 | PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, |
297 | CR3_TARGET_COUNT = 0x0000400a, |
298 | VM_EXIT_CONTROLS = 0x0000400c, |
299 | VM_EXIT_MSR_STORE_COUNT = 0x0000400e, |
300 | VM_EXIT_MSR_LOAD_COUNT = 0x00004010, |
301 | VM_ENTRY_CONTROLS = 0x00004012, |
302 | VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, |
303 | VM_ENTRY_INTR_INFO_FIELD = 0x00004016, |
304 | VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, |
305 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, |
306 | TPR_THRESHOLD = 0x0000401c, |
307 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, |
308 | PLE_GAP = 0x00004020, |
309 | PLE_WINDOW = 0x00004022, |
310 | NOTIFY_WINDOW = 0x00004024, |
311 | VM_INSTRUCTION_ERROR = 0x00004400, |
312 | VM_EXIT_REASON = 0x00004402, |
313 | VM_EXIT_INTR_INFO = 0x00004404, |
314 | VM_EXIT_INTR_ERROR_CODE = 0x00004406, |
315 | IDT_VECTORING_INFO_FIELD = 0x00004408, |
316 | IDT_VECTORING_ERROR_CODE = 0x0000440a, |
317 | VM_EXIT_INSTRUCTION_LEN = 0x0000440c, |
318 | VMX_INSTRUCTION_INFO = 0x0000440e, |
319 | GUEST_ES_LIMIT = 0x00004800, |
320 | GUEST_CS_LIMIT = 0x00004802, |
321 | GUEST_SS_LIMIT = 0x00004804, |
322 | GUEST_DS_LIMIT = 0x00004806, |
323 | GUEST_FS_LIMIT = 0x00004808, |
324 | GUEST_GS_LIMIT = 0x0000480a, |
325 | GUEST_LDTR_LIMIT = 0x0000480c, |
326 | GUEST_TR_LIMIT = 0x0000480e, |
327 | GUEST_GDTR_LIMIT = 0x00004810, |
328 | GUEST_IDTR_LIMIT = 0x00004812, |
329 | GUEST_ES_AR_BYTES = 0x00004814, |
330 | GUEST_CS_AR_BYTES = 0x00004816, |
331 | GUEST_SS_AR_BYTES = 0x00004818, |
332 | GUEST_DS_AR_BYTES = 0x0000481a, |
333 | GUEST_FS_AR_BYTES = 0x0000481c, |
334 | GUEST_GS_AR_BYTES = 0x0000481e, |
335 | GUEST_LDTR_AR_BYTES = 0x00004820, |
336 | GUEST_TR_AR_BYTES = 0x00004822, |
337 | GUEST_INTERRUPTIBILITY_INFO = 0x00004824, |
338 | GUEST_ACTIVITY_STATE = 0x00004826, |
339 | GUEST_SYSENTER_CS = 0x0000482A, |
340 | VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, |
341 | HOST_IA32_SYSENTER_CS = 0x00004c00, |
342 | CR0_GUEST_HOST_MASK = 0x00006000, |
343 | CR4_GUEST_HOST_MASK = 0x00006002, |
344 | CR0_READ_SHADOW = 0x00006004, |
345 | CR4_READ_SHADOW = 0x00006006, |
346 | CR3_TARGET_VALUE0 = 0x00006008, |
347 | CR3_TARGET_VALUE1 = 0x0000600a, |
348 | CR3_TARGET_VALUE2 = 0x0000600c, |
349 | CR3_TARGET_VALUE3 = 0x0000600e, |
350 | EXIT_QUALIFICATION = 0x00006400, |
351 | GUEST_LINEAR_ADDRESS = 0x0000640a, |
352 | GUEST_CR0 = 0x00006800, |
353 | GUEST_CR3 = 0x00006802, |
354 | GUEST_CR4 = 0x00006804, |
355 | GUEST_ES_BASE = 0x00006806, |
356 | GUEST_CS_BASE = 0x00006808, |
357 | GUEST_SS_BASE = 0x0000680a, |
358 | GUEST_DS_BASE = 0x0000680c, |
359 | GUEST_FS_BASE = 0x0000680e, |
360 | GUEST_GS_BASE = 0x00006810, |
361 | GUEST_LDTR_BASE = 0x00006812, |
362 | GUEST_TR_BASE = 0x00006814, |
363 | GUEST_GDTR_BASE = 0x00006816, |
364 | GUEST_IDTR_BASE = 0x00006818, |
365 | GUEST_DR7 = 0x0000681a, |
366 | GUEST_RSP = 0x0000681c, |
367 | GUEST_RIP = 0x0000681e, |
368 | GUEST_RFLAGS = 0x00006820, |
369 | GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, |
370 | GUEST_SYSENTER_ESP = 0x00006824, |
371 | GUEST_SYSENTER_EIP = 0x00006826, |
372 | HOST_CR0 = 0x00006c00, |
373 | HOST_CR3 = 0x00006c02, |
374 | HOST_CR4 = 0x00006c04, |
375 | HOST_FS_BASE = 0x00006c06, |
376 | HOST_GS_BASE = 0x00006c08, |
377 | HOST_TR_BASE = 0x00006c0a, |
378 | HOST_GDTR_BASE = 0x00006c0c, |
379 | HOST_IDTR_BASE = 0x00006c0e, |
380 | HOST_IA32_SYSENTER_ESP = 0x00006c10, |
381 | HOST_IA32_SYSENTER_EIP = 0x00006c12, |
382 | HOST_RSP = 0x00006c14, |
383 | HOST_RIP = 0x00006c16, |
384 | }; |
385 | |
386 | /* |
387 | * Interruption-information format |
388 | */ |
389 | #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ |
390 | #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ |
391 | #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ |
392 | #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ |
393 | #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ |
394 | #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 |
395 | |
396 | #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK |
397 | #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK |
398 | #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK |
399 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK |
400 | |
401 | #define INTR_TYPE_EXT_INTR (EVENT_TYPE_EXTINT << 8) /* external interrupt */ |
402 | #define INTR_TYPE_RESERVED (EVENT_TYPE_RESERVED << 8) /* reserved */ |
403 | #define INTR_TYPE_NMI_INTR (EVENT_TYPE_NMI << 8) /* NMI */ |
404 | #define INTR_TYPE_HARD_EXCEPTION (EVENT_TYPE_HWEXC << 8) /* processor exception */ |
405 | #define INTR_TYPE_SOFT_INTR (EVENT_TYPE_SWINT << 8) /* software interrupt */ |
406 | #define INTR_TYPE_PRIV_SW_EXCEPTION (EVENT_TYPE_PRIV_SWEXC << 8) /* ICE breakpoint */ |
407 | #define INTR_TYPE_SOFT_EXCEPTION (EVENT_TYPE_SWEXC << 8) /* software exception */ |
408 | #define INTR_TYPE_OTHER_EVENT (EVENT_TYPE_OTHER << 8) /* other event */ |
409 | |
410 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ |
411 | #define GUEST_INTR_STATE_STI 0x00000001 |
412 | #define GUEST_INTR_STATE_MOV_SS 0x00000002 |
413 | #define GUEST_INTR_STATE_SMI 0x00000004 |
414 | #define GUEST_INTR_STATE_NMI 0x00000008 |
415 | #define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010 |
416 | |
417 | /* GUEST_ACTIVITY_STATE flags */ |
418 | #define GUEST_ACTIVITY_ACTIVE 0 |
419 | #define GUEST_ACTIVITY_HLT 1 |
420 | #define GUEST_ACTIVITY_SHUTDOWN 2 |
421 | #define GUEST_ACTIVITY_WAIT_SIPI 3 |
422 | |
423 | /* |
424 | * Exit Qualifications for MOV for Control Register Access |
425 | */ |
426 | #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ |
427 | #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ |
428 | #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ |
429 | #define LMSW_SOURCE_DATA_SHIFT 16 |
430 | #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ |
431 | #define REG_EAX (0 << 8) |
432 | #define REG_ECX (1 << 8) |
433 | #define REG_EDX (2 << 8) |
434 | #define REG_EBX (3 << 8) |
435 | #define REG_ESP (4 << 8) |
436 | #define REG_EBP (5 << 8) |
437 | #define REG_ESI (6 << 8) |
438 | #define REG_EDI (7 << 8) |
439 | #define REG_R8 (8 << 8) |
440 | #define REG_R9 (9 << 8) |
441 | #define REG_R10 (10 << 8) |
442 | #define REG_R11 (11 << 8) |
443 | #define REG_R12 (12 << 8) |
444 | #define REG_R13 (13 << 8) |
445 | #define REG_R14 (14 << 8) |
446 | #define REG_R15 (15 << 8) |
447 | |
448 | /* |
449 | * Exit Qualifications for MOV for Debug Register Access |
450 | */ |
451 | #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ |
452 | #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ |
453 | #define TYPE_MOV_TO_DR (0 << 4) |
454 | #define TYPE_MOV_FROM_DR (1 << 4) |
455 | #define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ |
456 | |
457 | |
458 | /* |
459 | * Exit Qualifications for APIC-Access |
460 | */ |
461 | #define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */ |
462 | #define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */ |
463 | #define TYPE_LINEAR_APIC_INST_READ (0 << 12) |
464 | #define TYPE_LINEAR_APIC_INST_WRITE (1 << 12) |
465 | #define TYPE_LINEAR_APIC_INST_FETCH (2 << 12) |
466 | #define TYPE_LINEAR_APIC_EVENT (3 << 12) |
467 | #define TYPE_PHYSICAL_APIC_EVENT (10 << 12) |
468 | #define TYPE_PHYSICAL_APIC_INST (15 << 12) |
469 | |
470 | /* segment AR in VMCS -- these are different from what LAR reports */ |
471 | #define VMX_SEGMENT_AR_L_MASK (1 << 13) |
472 | |
473 | #define VMX_AR_TYPE_ACCESSES_MASK 1 |
474 | #define VMX_AR_TYPE_READABLE_MASK (1 << 1) |
475 | #define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2) |
476 | #define VMX_AR_TYPE_CODE_MASK (1 << 3) |
477 | #define VMX_AR_TYPE_MASK 0x0f |
478 | #define VMX_AR_TYPE_BUSY_64_TSS 11 |
479 | #define VMX_AR_TYPE_BUSY_32_TSS 11 |
480 | #define VMX_AR_TYPE_BUSY_16_TSS 3 |
481 | #define VMX_AR_TYPE_LDT 2 |
482 | |
483 | #define VMX_AR_UNUSABLE_MASK (1 << 16) |
484 | #define VMX_AR_S_MASK (1 << 4) |
485 | #define VMX_AR_P_MASK (1 << 7) |
486 | #define VMX_AR_L_MASK (1 << 13) |
487 | #define VMX_AR_DB_MASK (1 << 14) |
488 | #define VMX_AR_G_MASK (1 << 15) |
489 | #define VMX_AR_DPL_SHIFT 5 |
490 | #define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3) |
491 | |
492 | #define VMX_AR_RESERVD_MASK 0xfffe0f00 |
493 | |
494 | #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0) |
495 | #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1) |
496 | #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2) |
497 | |
498 | #define VMX_NR_VPIDS (1 << 16) |
499 | #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0 |
500 | #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 |
501 | #define VMX_VPID_EXTENT_ALL_CONTEXT 2 |
502 | #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3 |
503 | |
504 | #define VMX_EPT_EXTENT_CONTEXT 1 |
505 | #define VMX_EPT_EXTENT_GLOBAL 2 |
506 | #define VMX_EPT_EXTENT_SHIFT 24 |
507 | |
508 | #define VMX_EPT_EXECUTE_ONLY_BIT (1ull) |
509 | #define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6) |
510 | #define VMX_EPT_PAGE_WALK_5_BIT (1ull << 7) |
511 | #define VMX_EPTP_UC_BIT (1ull << 8) |
512 | #define VMX_EPTP_WB_BIT (1ull << 14) |
513 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) |
514 | #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) |
515 | #define VMX_EPT_INVEPT_BIT (1ull << 20) |
516 | #define VMX_EPT_AD_BIT (1ull << 21) |
517 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) |
518 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) |
519 | |
520 | #define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */ |
521 | #define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */ |
522 | #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */ |
523 | #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */ |
524 | #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */ |
525 | |
526 | #define VMX_EPT_MT_EPTE_SHIFT 3 |
527 | #define VMX_EPTP_PWL_MASK 0x38ull |
528 | #define VMX_EPTP_PWL_4 0x18ull |
529 | #define VMX_EPTP_PWL_5 0x20ull |
530 | #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6) |
531 | /* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */ |
532 | #define VMX_EPTP_MT_MASK 0x7ull |
533 | #define VMX_EPTP_MT_WB X86_MEMTYPE_WB |
534 | #define VMX_EPTP_MT_UC X86_MEMTYPE_UC |
535 | #define VMX_EPT_READABLE_MASK 0x1ull |
536 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
537 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
538 | #define VMX_EPT_IPAT_BIT (1ull << 6) |
539 | #define VMX_EPT_ACCESS_BIT (1ull << 8) |
540 | #define VMX_EPT_DIRTY_BIT (1ull << 9) |
541 | #define VMX_EPT_SUPPRESS_VE_BIT (1ull << 63) |
542 | #define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \ |
543 | VMX_EPT_WRITABLE_MASK | \ |
544 | VMX_EPT_EXECUTABLE_MASK) |
545 | #define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT) |
546 | |
547 | static inline u8 vmx_eptp_page_walk_level(u64 eptp) |
548 | { |
549 | u64 encoded_level = eptp & VMX_EPTP_PWL_MASK; |
550 | |
551 | if (encoded_level == VMX_EPTP_PWL_5) |
552 | return 5; |
553 | |
554 | /* @eptp must be pre-validated by the caller. */ |
555 | WARN_ON_ONCE(encoded_level != VMX_EPTP_PWL_4); |
556 | return 4; |
557 | } |
558 | |
559 | /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */ |
560 | #define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \ |
561 | VMX_EPT_EXECUTABLE_MASK) |
562 | |
563 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
564 | |
565 | struct vmx_msr_entry { |
566 | u32 index; |
567 | u32 reserved; |
568 | u64 value; |
569 | } __aligned(16); |
570 | |
571 | /* |
572 | * Exit Qualifications for entry failure during or after loading guest state |
573 | */ |
574 | enum vm_entry_failure_code { |
575 | ENTRY_FAIL_DEFAULT = 0, |
576 | ENTRY_FAIL_PDPTE = 2, |
577 | ENTRY_FAIL_NMI = 3, |
578 | ENTRY_FAIL_VMCS_LINK_PTR = 4, |
579 | }; |
580 | |
581 | /* |
582 | * Exit Qualifications for EPT Violations |
583 | */ |
584 | #define EPT_VIOLATION_ACC_READ BIT(0) |
585 | #define EPT_VIOLATION_ACC_WRITE BIT(1) |
586 | #define EPT_VIOLATION_ACC_INSTR BIT(2) |
587 | #define EPT_VIOLATION_PROT_READ BIT(3) |
588 | #define EPT_VIOLATION_PROT_WRITE BIT(4) |
589 | #define EPT_VIOLATION_PROT_EXEC BIT(5) |
590 | #define EPT_VIOLATION_EXEC_FOR_RING3_LIN BIT(6) |
591 | #define EPT_VIOLATION_PROT_MASK (EPT_VIOLATION_PROT_READ | \ |
592 | EPT_VIOLATION_PROT_WRITE | \ |
593 | EPT_VIOLATION_PROT_EXEC) |
594 | #define EPT_VIOLATION_GVA_IS_VALID BIT(7) |
595 | #define EPT_VIOLATION_GVA_TRANSLATED BIT(8) |
596 | |
597 | #define EPT_VIOLATION_RWX_TO_PROT(__epte) (((__epte) & VMX_EPT_RWX_MASK) << 3) |
598 | |
599 | static_assert(EPT_VIOLATION_RWX_TO_PROT(VMX_EPT_RWX_MASK) == |
600 | (EPT_VIOLATION_PROT_READ | EPT_VIOLATION_PROT_WRITE | EPT_VIOLATION_PROT_EXEC)); |
601 | |
602 | /* |
603 | * Exit Qualifications for NOTIFY VM EXIT |
604 | */ |
605 | #define NOTIFY_VM_CONTEXT_INVALID BIT(0) |
606 | |
607 | /* |
608 | * VM-instruction error numbers |
609 | */ |
610 | enum vm_instruction_error_number { |
611 | VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1, |
612 | VMXERR_VMCLEAR_INVALID_ADDRESS = 2, |
613 | VMXERR_VMCLEAR_VMXON_POINTER = 3, |
614 | VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4, |
615 | VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5, |
616 | VMXERR_VMRESUME_AFTER_VMXOFF = 6, |
617 | VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7, |
618 | VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8, |
619 | VMXERR_VMPTRLD_INVALID_ADDRESS = 9, |
620 | VMXERR_VMPTRLD_VMXON_POINTER = 10, |
621 | VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11, |
622 | VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12, |
623 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13, |
624 | VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15, |
625 | VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16, |
626 | VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17, |
627 | VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18, |
628 | VMXERR_VMCALL_NONCLEAR_VMCS = 19, |
629 | VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20, |
630 | VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22, |
631 | VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23, |
632 | VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24, |
633 | VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25, |
634 | VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26, |
635 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, |
636 | }; |
637 | |
638 | /* |
639 | * VM-instruction errors that can be encountered on VM-Enter, used to trace |
640 | * nested VM-Enter failures reported by hardware. Errors unique to VM-Enter |
641 | * from a SMI Transfer Monitor are not included as things have gone seriously |
642 | * sideways if we get one of those... |
643 | */ |
644 | #define VMX_VMENTER_INSTRUCTION_ERRORS \ |
645 | { VMXERR_VMLAUNCH_NONCLEAR_VMCS, "VMLAUNCH_NONCLEAR_VMCS" }, \ |
646 | { VMXERR_VMRESUME_NONLAUNCHED_VMCS, "VMRESUME_NONLAUNCHED_VMCS" }, \ |
647 | { VMXERR_VMRESUME_AFTER_VMXOFF, "VMRESUME_AFTER_VMXOFF" }, \ |
648 | { VMXERR_ENTRY_INVALID_CONTROL_FIELD, "VMENTRY_INVALID_CONTROL_FIELD" }, \ |
649 | { VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, "VMENTRY_INVALID_HOST_STATE_FIELD" }, \ |
650 | { VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, "VMENTRY_EVENTS_BLOCKED_BY_MOV_SS" } |
651 | |
652 | enum vmx_l1d_flush_state { |
653 | VMENTER_L1D_FLUSH_AUTO, |
654 | VMENTER_L1D_FLUSH_NEVER, |
655 | VMENTER_L1D_FLUSH_COND, |
656 | VMENTER_L1D_FLUSH_ALWAYS, |
657 | VMENTER_L1D_FLUSH_EPT_DISABLED, |
658 | VMENTER_L1D_FLUSH_NOT_REQUIRED, |
659 | }; |
660 | |
661 | extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; |
662 | |
663 | struct vmx_ve_information { |
664 | u32 exit_reason; |
665 | u32 delivery; |
666 | u64 exit_qualification; |
667 | u64 guest_linear_address; |
668 | u64 guest_physical_address; |
669 | u16 eptp_index; |
670 | }; |
671 | |
672 | #endif |
673 | |