1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __SVM_H |
3 | #define __SVM_H |
4 | |
5 | #include <uapi/asm/svm.h> |
6 | #include <uapi/asm/kvm.h> |
7 | |
8 | #include <hyperv/hvhdk.h> |
9 | |
10 | /* |
11 | * 32-bit intercept words in the VMCB Control Area, starting |
12 | * at Byte offset 000h. |
13 | */ |
14 | |
15 | enum intercept_words { |
16 | INTERCEPT_CR = 0, |
17 | INTERCEPT_DR, |
18 | INTERCEPT_EXCEPTION, |
19 | INTERCEPT_WORD3, |
20 | INTERCEPT_WORD4, |
21 | INTERCEPT_WORD5, |
22 | MAX_INTERCEPT, |
23 | }; |
24 | |
25 | enum { |
26 | /* Byte offset 000h (word 0) */ |
27 | INTERCEPT_CR0_READ = 0, |
28 | INTERCEPT_CR3_READ = 3, |
29 | INTERCEPT_CR4_READ = 4, |
30 | INTERCEPT_CR8_READ = 8, |
31 | INTERCEPT_CR0_WRITE = 16, |
32 | INTERCEPT_CR3_WRITE = 16 + 3, |
33 | INTERCEPT_CR4_WRITE = 16 + 4, |
34 | INTERCEPT_CR8_WRITE = 16 + 8, |
35 | /* Byte offset 004h (word 1) */ |
36 | INTERCEPT_DR0_READ = 32, |
37 | INTERCEPT_DR1_READ, |
38 | INTERCEPT_DR2_READ, |
39 | INTERCEPT_DR3_READ, |
40 | INTERCEPT_DR4_READ, |
41 | INTERCEPT_DR5_READ, |
42 | INTERCEPT_DR6_READ, |
43 | INTERCEPT_DR7_READ, |
44 | INTERCEPT_DR0_WRITE = 48, |
45 | INTERCEPT_DR1_WRITE, |
46 | INTERCEPT_DR2_WRITE, |
47 | INTERCEPT_DR3_WRITE, |
48 | INTERCEPT_DR4_WRITE, |
49 | INTERCEPT_DR5_WRITE, |
50 | INTERCEPT_DR6_WRITE, |
51 | INTERCEPT_DR7_WRITE, |
52 | /* Byte offset 008h (word 2) */ |
53 | INTERCEPT_EXCEPTION_OFFSET = 64, |
54 | /* Byte offset 00Ch (word 3) */ |
55 | INTERCEPT_INTR = 96, |
56 | INTERCEPT_NMI, |
57 | INTERCEPT_SMI, |
58 | INTERCEPT_INIT, |
59 | INTERCEPT_VINTR, |
60 | INTERCEPT_SELECTIVE_CR0, |
61 | INTERCEPT_STORE_IDTR, |
62 | INTERCEPT_STORE_GDTR, |
63 | INTERCEPT_STORE_LDTR, |
64 | INTERCEPT_STORE_TR, |
65 | INTERCEPT_LOAD_IDTR, |
66 | INTERCEPT_LOAD_GDTR, |
67 | INTERCEPT_LOAD_LDTR, |
68 | INTERCEPT_LOAD_TR, |
69 | INTERCEPT_RDTSC, |
70 | INTERCEPT_RDPMC, |
71 | INTERCEPT_PUSHF, |
72 | INTERCEPT_POPF, |
73 | INTERCEPT_CPUID, |
74 | INTERCEPT_RSM, |
75 | INTERCEPT_IRET, |
76 | INTERCEPT_INTn, |
77 | INTERCEPT_INVD, |
78 | INTERCEPT_PAUSE, |
79 | INTERCEPT_HLT, |
80 | INTERCEPT_INVLPG, |
81 | INTERCEPT_INVLPGA, |
82 | INTERCEPT_IOIO_PROT, |
83 | INTERCEPT_MSR_PROT, |
84 | INTERCEPT_TASK_SWITCH, |
85 | INTERCEPT_FERR_FREEZE, |
86 | INTERCEPT_SHUTDOWN, |
87 | /* Byte offset 010h (word 4) */ |
88 | INTERCEPT_VMRUN = 128, |
89 | INTERCEPT_VMMCALL, |
90 | INTERCEPT_VMLOAD, |
91 | INTERCEPT_VMSAVE, |
92 | INTERCEPT_STGI, |
93 | INTERCEPT_CLGI, |
94 | INTERCEPT_SKINIT, |
95 | INTERCEPT_RDTSCP, |
96 | INTERCEPT_ICEBP, |
97 | INTERCEPT_WBINVD, |
98 | INTERCEPT_MONITOR, |
99 | INTERCEPT_MWAIT, |
100 | INTERCEPT_MWAIT_COND, |
101 | INTERCEPT_XSETBV, |
102 | INTERCEPT_RDPRU, |
103 | TRAP_EFER_WRITE, |
104 | TRAP_CR0_WRITE, |
105 | TRAP_CR1_WRITE, |
106 | TRAP_CR2_WRITE, |
107 | TRAP_CR3_WRITE, |
108 | TRAP_CR4_WRITE, |
109 | TRAP_CR5_WRITE, |
110 | TRAP_CR6_WRITE, |
111 | TRAP_CR7_WRITE, |
112 | TRAP_CR8_WRITE, |
113 | /* Byte offset 014h (word 5) */ |
114 | INTERCEPT_INVLPGB = 160, |
115 | INTERCEPT_INVLPGB_ILLEGAL, |
116 | INTERCEPT_INVPCID, |
117 | INTERCEPT_MCOMMIT, |
118 | INTERCEPT_TLBSYNC, |
119 | INTERCEPT_BUSLOCK, |
120 | INTERCEPT_IDLE_HLT = 166, |
121 | }; |
122 | |
123 | |
124 | struct __attribute__ ((__packed__)) vmcb_control_area { |
125 | u32 intercepts[MAX_INTERCEPT]; |
126 | u32 reserved_1[15 - MAX_INTERCEPT]; |
127 | u16 pause_filter_thresh; |
128 | u16 pause_filter_count; |
129 | u64 iopm_base_pa; |
130 | u64 msrpm_base_pa; |
131 | u64 tsc_offset; |
132 | u32 asid; |
133 | u8 tlb_ctl; |
134 | u8 reserved_2[3]; |
135 | u32 int_ctl; |
136 | u32 int_vector; |
137 | u32 int_state; |
138 | u8 reserved_3[4]; |
139 | u32 exit_code; |
140 | u32 exit_code_hi; |
141 | u64 exit_info_1; |
142 | u64 exit_info_2; |
143 | u32 exit_int_info; |
144 | u32 exit_int_info_err; |
145 | u64 nested_ctl; |
146 | u64 avic_vapic_bar; |
147 | u64 ghcb_gpa; |
148 | u32 event_inj; |
149 | u32 event_inj_err; |
150 | u64 nested_cr3; |
151 | u64 virt_ext; |
152 | u32 clean; |
153 | u32 reserved_5; |
154 | u64 next_rip; |
155 | u8 insn_len; |
156 | u8 insn_bytes[15]; |
157 | u64 avic_backing_page; /* Offset 0xe0 */ |
158 | u8 reserved_6[8]; /* Offset 0xe8 */ |
159 | u64 avic_logical_id; /* Offset 0xf0 */ |
160 | u64 avic_physical_id; /* Offset 0xf8 */ |
161 | u8 reserved_7[8]; |
162 | u64 vmsa_pa; /* Used for an SEV-ES guest */ |
163 | u8 reserved_8[16]; |
164 | u16 bus_lock_counter; /* Offset 0x120 */ |
165 | u8 reserved_9[22]; |
166 | u64 allowed_sev_features; /* Offset 0x138 */ |
167 | u64 guest_sev_features; /* Offset 0x140 */ |
168 | u8 reserved_10[664]; |
169 | /* |
170 | * Offset 0x3e0, 32 bytes reserved |
171 | * for use by hypervisor/software. |
172 | */ |
173 | union { |
174 | struct hv_vmcb_enlightenments hv_enlightenments; |
175 | u8 reserved_sw[32]; |
176 | }; |
177 | }; |
178 | |
179 | |
180 | #define TLB_CONTROL_DO_NOTHING 0 |
181 | #define TLB_CONTROL_FLUSH_ALL_ASID 1 |
182 | #define TLB_CONTROL_FLUSH_ASID 3 |
183 | #define TLB_CONTROL_FLUSH_ASID_LOCAL 7 |
184 | |
185 | #define V_TPR_MASK 0x0f |
186 | |
187 | #define V_IRQ_SHIFT 8 |
188 | #define V_IRQ_MASK (1 << V_IRQ_SHIFT) |
189 | |
190 | #define V_GIF_SHIFT 9 |
191 | #define V_GIF_MASK (1 << V_GIF_SHIFT) |
192 | |
193 | #define V_NMI_PENDING_SHIFT 11 |
194 | #define V_NMI_PENDING_MASK (1 << V_NMI_PENDING_SHIFT) |
195 | |
196 | #define V_NMI_BLOCKING_SHIFT 12 |
197 | #define V_NMI_BLOCKING_MASK (1 << V_NMI_BLOCKING_SHIFT) |
198 | |
199 | #define V_INTR_PRIO_SHIFT 16 |
200 | #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) |
201 | |
202 | #define V_IGN_TPR_SHIFT 20 |
203 | #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) |
204 | |
205 | #define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) |
206 | |
207 | #define V_INTR_MASKING_SHIFT 24 |
208 | #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) |
209 | |
210 | #define V_GIF_ENABLE_SHIFT 25 |
211 | #define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT) |
212 | |
213 | #define V_NMI_ENABLE_SHIFT 26 |
214 | #define V_NMI_ENABLE_MASK (1 << V_NMI_ENABLE_SHIFT) |
215 | |
216 | #define AVIC_ENABLE_SHIFT 31 |
217 | #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT) |
218 | |
219 | #define X2APIC_MODE_SHIFT 30 |
220 | #define X2APIC_MODE_MASK (1 << X2APIC_MODE_SHIFT) |
221 | |
222 | #define LBR_CTL_ENABLE_MASK BIT_ULL(0) |
223 | #define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1) |
224 | |
225 | #define SVM_INTERRUPT_SHADOW_MASK BIT_ULL(0) |
226 | #define SVM_GUEST_INTERRUPT_MASK BIT_ULL(1) |
227 | |
228 | #define SVM_IOIO_STR_SHIFT 2 |
229 | #define SVM_IOIO_REP_SHIFT 3 |
230 | #define SVM_IOIO_SIZE_SHIFT 4 |
231 | #define SVM_IOIO_ASIZE_SHIFT 7 |
232 | |
233 | #define SVM_IOIO_TYPE_MASK 1 |
234 | #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) |
235 | #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) |
236 | #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) |
237 | #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) |
238 | |
239 | #define SVM_NESTED_CTL_NP_ENABLE BIT(0) |
240 | #define SVM_NESTED_CTL_SEV_ENABLE BIT(1) |
241 | #define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2) |
242 | |
243 | |
244 | #define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL |
245 | #define SVM_TSC_RATIO_MIN 0x0000000000000001ULL |
246 | #define SVM_TSC_RATIO_MAX 0x000000ffffffffffULL |
247 | #define SVM_TSC_RATIO_DEFAULT 0x0100000000ULL |
248 | |
249 | |
250 | /* AVIC */ |
251 | #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFFULL) |
252 | #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 |
253 | #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) |
254 | |
255 | #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0) |
256 | #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) |
257 | #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) |
258 | #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) |
259 | #define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFFULL) |
260 | |
261 | #define AVIC_DOORBELL_PHYSICAL_ID_MASK GENMASK_ULL(11, 0) |
262 | |
263 | #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL |
264 | |
265 | #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1 |
266 | #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0 |
267 | #define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF |
268 | |
269 | enum avic_ipi_failure_cause { |
270 | AVIC_IPI_FAILURE_INVALID_INT_TYPE, |
271 | AVIC_IPI_FAILURE_TARGET_NOT_RUNNING, |
272 | AVIC_IPI_FAILURE_INVALID_TARGET, |
273 | AVIC_IPI_FAILURE_INVALID_BACKING_PAGE, |
274 | AVIC_IPI_FAILURE_INVALID_IPI_VECTOR, |
275 | }; |
276 | |
277 | #define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0) |
278 | |
279 | /* |
280 | * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as |
281 | * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually. |
282 | */ |
283 | #define AVIC_MAX_PHYSICAL_ID 0XFEULL |
284 | |
285 | /* |
286 | * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511). |
287 | */ |
288 | #define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL |
289 | |
290 | static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID); |
291 | static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID); |
292 | |
293 | #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF) |
294 | |
295 | #define SVM_SEV_FEAT_SNP_ACTIVE BIT(0) |
296 | #define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3) |
297 | #define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4) |
298 | #define SVM_SEV_FEAT_DEBUG_SWAP BIT(5) |
299 | |
300 | #define VMCB_ALLOWED_SEV_FEATURES_VALID BIT_ULL(63) |
301 | |
302 | struct vmcb_seg { |
303 | u16 selector; |
304 | u16 attrib; |
305 | u32 limit; |
306 | u64 base; |
307 | } __packed; |
308 | |
309 | /* Save area definition for legacy and SEV-MEM guests */ |
310 | struct vmcb_save_area { |
311 | struct vmcb_seg es; |
312 | struct vmcb_seg cs; |
313 | struct vmcb_seg ss; |
314 | struct vmcb_seg ds; |
315 | struct vmcb_seg fs; |
316 | struct vmcb_seg gs; |
317 | struct vmcb_seg gdtr; |
318 | struct vmcb_seg ldtr; |
319 | struct vmcb_seg idtr; |
320 | struct vmcb_seg tr; |
321 | /* Reserved fields are named following their struct offset */ |
322 | u8 reserved_0xa0[42]; |
323 | u8 vmpl; |
324 | u8 cpl; |
325 | u8 reserved_0xcc[4]; |
326 | u64 efer; |
327 | u8 reserved_0xd8[112]; |
328 | u64 cr4; |
329 | u64 cr3; |
330 | u64 cr0; |
331 | u64 dr7; |
332 | u64 dr6; |
333 | u64 rflags; |
334 | u64 rip; |
335 | u8 reserved_0x180[88]; |
336 | u64 rsp; |
337 | u64 s_cet; |
338 | u64 ssp; |
339 | u64 isst_addr; |
340 | u64 rax; |
341 | u64 star; |
342 | u64 lstar; |
343 | u64 cstar; |
344 | u64 sfmask; |
345 | u64 kernel_gs_base; |
346 | u64 sysenter_cs; |
347 | u64 sysenter_esp; |
348 | u64 sysenter_eip; |
349 | u64 cr2; |
350 | u8 reserved_0x248[32]; |
351 | u64 g_pat; |
352 | u64 dbgctl; |
353 | u64 br_from; |
354 | u64 br_to; |
355 | u64 last_excp_from; |
356 | u64 last_excp_to; |
357 | u8 reserved_0x298[72]; |
358 | u64 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */ |
359 | } __packed; |
360 | |
361 | /* Save area definition for SEV-ES and SEV-SNP guests */ |
362 | struct sev_es_save_area { |
363 | struct vmcb_seg es; |
364 | struct vmcb_seg cs; |
365 | struct vmcb_seg ss; |
366 | struct vmcb_seg ds; |
367 | struct vmcb_seg fs; |
368 | struct vmcb_seg gs; |
369 | struct vmcb_seg gdtr; |
370 | struct vmcb_seg ldtr; |
371 | struct vmcb_seg idtr; |
372 | struct vmcb_seg tr; |
373 | u64 pl0_ssp; |
374 | u64 pl1_ssp; |
375 | u64 pl2_ssp; |
376 | u64 pl3_ssp; |
377 | u64 u_cet; |
378 | u8 reserved_0xc8[2]; |
379 | u8 vmpl; |
380 | u8 cpl; |
381 | u8 reserved_0xcc[4]; |
382 | u64 efer; |
383 | u8 reserved_0xd8[104]; |
384 | u64 xss; |
385 | u64 cr4; |
386 | u64 cr3; |
387 | u64 cr0; |
388 | u64 dr7; |
389 | u64 dr6; |
390 | u64 rflags; |
391 | u64 rip; |
392 | u64 dr0; |
393 | u64 dr1; |
394 | u64 dr2; |
395 | u64 dr3; |
396 | u64 dr0_addr_mask; |
397 | u64 dr1_addr_mask; |
398 | u64 dr2_addr_mask; |
399 | u64 dr3_addr_mask; |
400 | u8 reserved_0x1c0[24]; |
401 | u64 rsp; |
402 | u64 s_cet; |
403 | u64 ssp; |
404 | u64 isst_addr; |
405 | u64 rax; |
406 | u64 star; |
407 | u64 lstar; |
408 | u64 cstar; |
409 | u64 sfmask; |
410 | u64 kernel_gs_base; |
411 | u64 sysenter_cs; |
412 | u64 sysenter_esp; |
413 | u64 sysenter_eip; |
414 | u64 cr2; |
415 | u8 reserved_0x248[32]; |
416 | u64 g_pat; |
417 | u64 dbgctl; |
418 | u64 br_from; |
419 | u64 br_to; |
420 | u64 last_excp_from; |
421 | u64 last_excp_to; |
422 | u8 reserved_0x298[80]; |
423 | u32 pkru; |
424 | u32 tsc_aux; |
425 | u64 tsc_scale; |
426 | u64 tsc_offset; |
427 | u8 reserved_0x300[8]; |
428 | u64 rcx; |
429 | u64 rdx; |
430 | u64 rbx; |
431 | u64 reserved_0x320; /* rsp already available at 0x01d8 */ |
432 | u64 rbp; |
433 | u64 rsi; |
434 | u64 rdi; |
435 | u64 r8; |
436 | u64 r9; |
437 | u64 r10; |
438 | u64 r11; |
439 | u64 r12; |
440 | u64 r13; |
441 | u64 r14; |
442 | u64 r15; |
443 | u8 reserved_0x380[16]; |
444 | u64 guest_exit_info_1; |
445 | u64 guest_exit_info_2; |
446 | u64 guest_exit_int_info; |
447 | u64 guest_nrip; |
448 | u64 sev_features; |
449 | u64 vintr_ctrl; |
450 | u64 guest_exit_code; |
451 | u64 virtual_tom; |
452 | u64 tlb_id; |
453 | u64 pcpu_id; |
454 | u64 event_inj; |
455 | u64 xcr0; |
456 | u8 reserved_0x3f0[16]; |
457 | |
458 | /* Floating point area */ |
459 | u64 x87_dp; |
460 | u32 mxcsr; |
461 | u16 x87_ftw; |
462 | u16 x87_fsw; |
463 | u16 x87_fcw; |
464 | u16 x87_fop; |
465 | u16 x87_ds; |
466 | u16 x87_cs; |
467 | u64 x87_rip; |
468 | u8 fpreg_x87[80]; |
469 | u8 fpreg_xmm[256]; |
470 | u8 fpreg_ymm[256]; |
471 | } __packed; |
472 | |
473 | struct ghcb_save_area { |
474 | u8 reserved_0x0[203]; |
475 | u8 cpl; |
476 | u8 reserved_0xcc[116]; |
477 | u64 xss; |
478 | u8 reserved_0x148[24]; |
479 | u64 dr7; |
480 | u8 reserved_0x168[16]; |
481 | u64 rip; |
482 | u8 reserved_0x180[88]; |
483 | u64 rsp; |
484 | u8 reserved_0x1e0[24]; |
485 | u64 rax; |
486 | u8 reserved_0x200[264]; |
487 | u64 rcx; |
488 | u64 rdx; |
489 | u64 rbx; |
490 | u8 reserved_0x320[8]; |
491 | u64 rbp; |
492 | u64 rsi; |
493 | u64 rdi; |
494 | u64 r8; |
495 | u64 r9; |
496 | u64 r10; |
497 | u64 r11; |
498 | u64 r12; |
499 | u64 r13; |
500 | u64 r14; |
501 | u64 r15; |
502 | u8 reserved_0x380[16]; |
503 | u64 sw_exit_code; |
504 | u64 sw_exit_info_1; |
505 | u64 sw_exit_info_2; |
506 | u64 sw_scratch; |
507 | u8 reserved_0x3b0[56]; |
508 | u64 xcr0; |
509 | u8 valid_bitmap[16]; |
510 | u64 x87_state_gpa; |
511 | } __packed; |
512 | |
513 | #define GHCB_SHARED_BUF_SIZE 2032 |
514 | |
515 | struct ghcb { |
516 | struct ghcb_save_area save; |
517 | u8 reserved_save[2048 - sizeof(struct ghcb_save_area)]; |
518 | |
519 | u8 shared_buffer[GHCB_SHARED_BUF_SIZE]; |
520 | |
521 | u8 reserved_0xff0[10]; |
522 | u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */ |
523 | u32 ghcb_usage; |
524 | } __packed; |
525 | |
526 | struct vmcb { |
527 | struct vmcb_control_area control; |
528 | union { |
529 | struct vmcb_save_area save; |
530 | |
531 | /* |
532 | * For SEV-ES VMs, the save area in the VMCB is used only to |
533 | * save/load host state. Guest state resides in a separate |
534 | * page, the aptly named VM Save Area (VMSA), that is encrypted |
535 | * with the guest's private key. |
536 | */ |
537 | struct sev_es_save_area host_sev_es_save; |
538 | }; |
539 | } __packed; |
540 | |
541 | #define EXPECTED_VMCB_SAVE_AREA_SIZE 744 |
542 | #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032 |
543 | #define EXPECTED_SEV_ES_SAVE_AREA_SIZE 1648 |
544 | #define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024 |
545 | #define EXPECTED_GHCB_SIZE PAGE_SIZE |
546 | |
547 | #define BUILD_BUG_RESERVED_OFFSET(x, y) \ |
548 | ASSERT_STRUCT_OFFSET(struct x, reserved ## _ ## y, y) |
549 | |
550 | static inline void __unused_size_checks(void) |
551 | { |
552 | BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE); |
553 | BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE); |
554 | BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE); |
555 | BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE); |
556 | BUILD_BUG_ON(offsetof(struct vmcb, save) != EXPECTED_VMCB_CONTROL_AREA_SIZE); |
557 | BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); |
558 | |
559 | /* Check offsets of reserved fields */ |
560 | |
561 | BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xa0); |
562 | BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xcc); |
563 | BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xd8); |
564 | BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x180); |
565 | BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x248); |
566 | BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x298); |
567 | |
568 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xc8); |
569 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xcc); |
570 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xd8); |
571 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0); |
572 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248); |
573 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298); |
574 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x300); |
575 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320); |
576 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380); |
577 | BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0); |
578 | |
579 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x0); |
580 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0xcc); |
581 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x148); |
582 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x168); |
583 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x180); |
584 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x1e0); |
585 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x200); |
586 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x320); |
587 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x380); |
588 | BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x3b0); |
589 | |
590 | BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0); |
591 | } |
592 | |
593 | #define SVM_CPUID_FUNC 0x8000000a |
594 | |
595 | #define SVM_SELECTOR_S_SHIFT 4 |
596 | #define SVM_SELECTOR_DPL_SHIFT 5 |
597 | #define SVM_SELECTOR_P_SHIFT 7 |
598 | #define SVM_SELECTOR_AVL_SHIFT 8 |
599 | #define SVM_SELECTOR_L_SHIFT 9 |
600 | #define SVM_SELECTOR_DB_SHIFT 10 |
601 | #define SVM_SELECTOR_G_SHIFT 11 |
602 | |
603 | #define SVM_SELECTOR_TYPE_MASK (0xf) |
604 | #define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) |
605 | #define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) |
606 | #define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) |
607 | #define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) |
608 | #define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) |
609 | #define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) |
610 | #define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) |
611 | |
612 | #define SVM_SELECTOR_WRITE_MASK (1 << 1) |
613 | #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK |
614 | #define SVM_SELECTOR_CODE_MASK (1 << 3) |
615 | |
616 | #define SVM_EVTINJ_VEC_MASK 0xff |
617 | |
618 | #define SVM_EVTINJ_TYPE_SHIFT 8 |
619 | #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) |
620 | |
621 | #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) |
622 | #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) |
623 | #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) |
624 | #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) |
625 | |
626 | #define SVM_EVTINJ_VALID (1 << 31) |
627 | #define SVM_EVTINJ_VALID_ERR (1 << 11) |
628 | |
629 | #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK |
630 | #define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK |
631 | |
632 | #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR |
633 | #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI |
634 | #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT |
635 | #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT |
636 | |
637 | #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID |
638 | #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR |
639 | |
640 | #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 |
641 | #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 |
642 | #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44 |
643 | |
644 | #define SVM_EXITINFO_REG_MASK 0x0F |
645 | |
646 | #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) |
647 | |
648 | /* GHCB Accessor functions */ |
649 | |
650 | #define GHCB_BITMAP_IDX(field) \ |
651 | (offsetof(struct ghcb_save_area, field) / sizeof(u64)) |
652 | |
653 | #define DEFINE_GHCB_ACCESSORS(field) \ |
654 | static __always_inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \ |
655 | { \ |
656 | return test_bit(GHCB_BITMAP_IDX(field), \ |
657 | (unsigned long *)&ghcb->save.valid_bitmap); \ |
658 | } \ |
659 | \ |
660 | static __always_inline u64 ghcb_get_##field(struct ghcb *ghcb) \ |
661 | { \ |
662 | return ghcb->save.field; \ |
663 | } \ |
664 | \ |
665 | static __always_inline u64 ghcb_get_##field##_if_valid(struct ghcb *ghcb) \ |
666 | { \ |
667 | return ghcb_##field##_is_valid(ghcb) ? ghcb->save.field : 0; \ |
668 | } \ |
669 | \ |
670 | static __always_inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \ |
671 | { \ |
672 | __set_bit(GHCB_BITMAP_IDX(field), \ |
673 | (unsigned long *)&ghcb->save.valid_bitmap); \ |
674 | ghcb->save.field = value; \ |
675 | } |
676 | |
677 | DEFINE_GHCB_ACCESSORS(cpl) |
678 | DEFINE_GHCB_ACCESSORS(rip) |
679 | DEFINE_GHCB_ACCESSORS(rsp) |
680 | DEFINE_GHCB_ACCESSORS(rax) |
681 | DEFINE_GHCB_ACCESSORS(rcx) |
682 | DEFINE_GHCB_ACCESSORS(rdx) |
683 | DEFINE_GHCB_ACCESSORS(rbx) |
684 | DEFINE_GHCB_ACCESSORS(rbp) |
685 | DEFINE_GHCB_ACCESSORS(rsi) |
686 | DEFINE_GHCB_ACCESSORS(rdi) |
687 | DEFINE_GHCB_ACCESSORS(r8) |
688 | DEFINE_GHCB_ACCESSORS(r9) |
689 | DEFINE_GHCB_ACCESSORS(r10) |
690 | DEFINE_GHCB_ACCESSORS(r11) |
691 | DEFINE_GHCB_ACCESSORS(r12) |
692 | DEFINE_GHCB_ACCESSORS(r13) |
693 | DEFINE_GHCB_ACCESSORS(r14) |
694 | DEFINE_GHCB_ACCESSORS(r15) |
695 | DEFINE_GHCB_ACCESSORS(sw_exit_code) |
696 | DEFINE_GHCB_ACCESSORS(sw_exit_info_1) |
697 | DEFINE_GHCB_ACCESSORS(sw_exit_info_2) |
698 | DEFINE_GHCB_ACCESSORS(sw_scratch) |
699 | DEFINE_GHCB_ACCESSORS(xcr0) |
700 | |
701 | #endif |
702 | |