1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Hyper-V Isolation VM interface with paravisor and hypervisor
4 *
5 * Author:
6 * Tianyu Lan <Tianyu.Lan@microsoft.com>
7 */
8
9#include <linux/bitfield.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <asm/svm.h>
14#include <asm/sev.h>
15#include <asm/io.h>
16#include <asm/coco.h>
17#include <asm/mem_encrypt.h>
18#include <asm/set_memory.h>
19#include <asm/mshyperv.h>
20#include <asm/hypervisor.h>
21#include <asm/mtrr.h>
22#include <asm/io_apic.h>
23#include <asm/realmode.h>
24#include <asm/e820/api.h>
25#include <asm/desc.h>
26#include <asm/msr.h>
27#include <uapi/asm/vmx.h>
28
29#ifdef CONFIG_AMD_MEM_ENCRYPT
30
31#define GHCB_USAGE_HYPERV_CALL 1
32
33union hv_ghcb {
34 struct ghcb ghcb;
35 struct {
36 u64 hypercalldata[509];
37 u64 outputgpa;
38 union {
39 union {
40 struct {
41 u32 callcode : 16;
42 u32 isfast : 1;
43 u32 reserved1 : 14;
44 u32 isnested : 1;
45 u32 countofelements : 12;
46 u32 reserved2 : 4;
47 u32 repstartindex : 12;
48 u32 reserved3 : 4;
49 };
50 u64 asuint64;
51 } hypercallinput;
52 union {
53 struct {
54 u16 callstatus;
55 u16 reserved1;
56 u32 elementsprocessed : 12;
57 u32 reserved2 : 20;
58 };
59 u64 asunit64;
60 } hypercalloutput;
61 };
62 u64 reserved2;
63 } hypercall;
64} __packed __aligned(HV_HYP_PAGE_SIZE);
65
66/* Only used in an SNP VM with the paravisor */
67static u16 hv_ghcb_version __ro_after_init;
68
69/* Functions only used in an SNP VM with the paravisor go here. */
70u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
71{
72 union hv_ghcb *hv_ghcb;
73 void **ghcb_base;
74 unsigned long flags;
75 u64 status;
76
77 if (!hv_ghcb_pg)
78 return -EFAULT;
79
80 WARN_ON(in_nmi());
81
82 local_irq_save(flags);
83 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
84 hv_ghcb = (union hv_ghcb *)*ghcb_base;
85 if (!hv_ghcb) {
86 local_irq_restore(flags);
87 return -EFAULT;
88 }
89
90 hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
91 hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
92
93 hv_ghcb->hypercall.outputgpa = (u64)output;
94 hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
95 hv_ghcb->hypercall.hypercallinput.callcode = control;
96
97 if (input_size)
98 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
99
100 VMGEXIT();
101
102 hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
103 memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
104 sizeof(hv_ghcb->ghcb.save.valid_bitmap));
105
106 status = hv_ghcb->hypercall.hypercalloutput.callstatus;
107
108 local_irq_restore(flags);
109
110 return status;
111}
112
113static inline u64 rd_ghcb_msr(void)
114{
115 return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
116}
117
118static inline void wr_ghcb_msr(u64 val)
119{
120 native_wrmsrq(MSR_AMD64_SEV_ES_GHCB, val);
121}
122
123static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
124 u64 exit_info_1, u64 exit_info_2)
125{
126 /* Fill in protocol and format specifiers */
127 ghcb->protocol_version = hv_ghcb_version;
128 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
129
130 ghcb_set_sw_exit_code(ghcb, value: exit_code);
131 ghcb_set_sw_exit_info_1(ghcb, value: exit_info_1);
132 ghcb_set_sw_exit_info_2(ghcb, value: exit_info_2);
133
134 VMGEXIT();
135
136 if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
137 return ES_VMM_ERROR;
138 else
139 return ES_OK;
140}
141
142void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
143{
144 u64 val = GHCB_MSR_TERM_REQ;
145
146 /* Tell the hypervisor what went wrong. */
147 val |= GHCB_SEV_TERM_REASON(set, reason);
148
149 /* Request Guest Termination from Hypervisor */
150 wr_ghcb_msr(val);
151 VMGEXIT();
152
153 while (true)
154 asm volatile("hlt\n" : : : "memory");
155}
156
157bool hv_ghcb_negotiate_protocol(void)
158{
159 u64 ghcb_gpa;
160 u64 val;
161
162 /* Save ghcb page gpa. */
163 ghcb_gpa = rd_ghcb_msr();
164
165 /* Do the GHCB protocol version negotiation */
166 wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
167 VMGEXIT();
168 val = rd_ghcb_msr();
169
170 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
171 return false;
172
173 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
174 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
175 return false;
176
177 hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
178 GHCB_PROTOCOL_MAX);
179
180 /* Write ghcb page back after negotiating protocol. */
181 wr_ghcb_msr(val: ghcb_gpa);
182 VMGEXIT();
183
184 return true;
185}
186
187static void hv_ghcb_msr_write(u64 msr, u64 value)
188{
189 union hv_ghcb *hv_ghcb;
190 void **ghcb_base;
191 unsigned long flags;
192
193 if (!hv_ghcb_pg)
194 return;
195
196 WARN_ON(in_nmi());
197
198 local_irq_save(flags);
199 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
200 hv_ghcb = (union hv_ghcb *)*ghcb_base;
201 if (!hv_ghcb) {
202 local_irq_restore(flags);
203 return;
204 }
205
206 ghcb_set_rcx(ghcb: &hv_ghcb->ghcb, value: msr);
207 ghcb_set_rax(ghcb: &hv_ghcb->ghcb, lower_32_bits(value));
208 ghcb_set_rdx(ghcb: &hv_ghcb->ghcb, upper_32_bits(value));
209
210 if (hv_ghcb_hv_call(ghcb: &hv_ghcb->ghcb, SVM_EXIT_MSR, exit_info_1: 1, exit_info_2: 0))
211 pr_warn("Fail to write msr via ghcb %llx.\n", msr);
212
213 local_irq_restore(flags);
214}
215
216static void hv_ghcb_msr_read(u64 msr, u64 *value)
217{
218 union hv_ghcb *hv_ghcb;
219 void **ghcb_base;
220 unsigned long flags;
221
222 /* Check size of union hv_ghcb here. */
223 BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
224
225 if (!hv_ghcb_pg)
226 return;
227
228 WARN_ON(in_nmi());
229
230 local_irq_save(flags);
231 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
232 hv_ghcb = (union hv_ghcb *)*ghcb_base;
233 if (!hv_ghcb) {
234 local_irq_restore(flags);
235 return;
236 }
237
238 ghcb_set_rcx(ghcb: &hv_ghcb->ghcb, value: msr);
239 if (hv_ghcb_hv_call(ghcb: &hv_ghcb->ghcb, SVM_EXIT_MSR, exit_info_1: 0, exit_info_2: 0))
240 pr_warn("Fail to read msr via ghcb %llx.\n", msr);
241 else
242 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
243 | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
244 local_irq_restore(flags);
245}
246
247/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
248static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
249static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
250static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
251
252/* Functions only used in an SNP VM without the paravisor go here. */
253
254#define hv_populate_vmcb_seg(seg, gdtr_base) \
255do { \
256 if (seg.selector) { \
257 seg.base = 0; \
258 seg.limit = HV_AP_SEGMENT_LIMIT; \
259 seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
260 seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
261 } \
262} while (0) \
263
264static int snp_set_vmsa(void *va, bool vmsa)
265{
266 u64 attrs;
267
268 /*
269 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
270 * using the RMPADJUST instruction. However, for the instruction to
271 * succeed it must target the permissions of a lesser privileged
272 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
273 * instruction in the AMD64 APM Volume 3).
274 */
275 attrs = 1;
276 if (vmsa)
277 attrs |= RMPADJUST_VMSA_PAGE_BIT;
278
279 return rmpadjust(vaddr: (unsigned long)va, RMP_PG_SIZE_4K, attrs);
280}
281
282static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
283{
284 int err;
285
286 err = snp_set_vmsa(va: vmsa, vmsa: false);
287 if (err)
288 pr_err("clear VMSA page failed (%u), leaking page\n", err);
289 else
290 free_page((unsigned long)vmsa);
291}
292
293int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
294{
295 struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
296 __get_free_page(GFP_KERNEL | __GFP_ZERO);
297 struct sev_es_save_area *cur_vmsa;
298 struct desc_ptr gdtr;
299 u64 ret, retry = 5;
300 struct hv_enable_vp_vtl *start_vp_input;
301 unsigned long flags;
302 int vp_index;
303
304 if (!vmsa)
305 return -ENOMEM;
306
307 /* Find the Hyper-V VP index which might be not the same as APIC ID */
308 vp_index = hv_apicid_to_vp_index(apic_id);
309 if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index)
310 return -EINVAL;
311
312 native_store_gdt(dtr: &gdtr);
313
314 vmsa->gdtr.base = gdtr.address;
315 vmsa->gdtr.limit = gdtr.size;
316
317 asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
318 hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
319
320 asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
321 hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
322
323 asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
324 hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
325
326 asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
327 hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
328
329 vmsa->efer = native_read_msr(MSR_EFER);
330
331 vmsa->cr4 = native_read_cr4();
332 vmsa->cr3 = __native_read_cr3();
333 vmsa->cr0 = native_read_cr0();
334
335 vmsa->xcr0 = 1;
336 vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
337 vmsa->rip = (u64)secondary_startup_64_no_verify;
338 vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
339
340 /*
341 * Set the SNP-specific fields for this VMSA:
342 * VMPL level
343 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
344 */
345 vmsa->vmpl = 0;
346 vmsa->sev_features = sev_status >> 2;
347
348 ret = snp_set_vmsa(va: vmsa, vmsa: true);
349 if (ret) {
350 pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
351 free_page((u64)vmsa);
352 return ret;
353 }
354
355 local_irq_save(flags);
356 start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
357 memset(start_vp_input, 0, sizeof(*start_vp_input));
358 start_vp_input->partition_id = -1;
359 start_vp_input->vp_index = vp_index;
360 start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
361 *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
362
363 do {
364 ret = hv_do_hypercall(HVCALL_START_VP,
365 inputaddr: start_vp_input, NULL);
366 } while (hv_result(status: ret) == HV_STATUS_TIME_OUT && retry--);
367
368 local_irq_restore(flags);
369
370 if (!hv_result_success(status: ret)) {
371 pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
372 snp_cleanup_vmsa(vmsa);
373 vmsa = NULL;
374 }
375
376 cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
377 /* Free up any previous VMSA page */
378 if (cur_vmsa)
379 snp_cleanup_vmsa(vmsa: cur_vmsa);
380
381 /* Record the current VMSA page */
382 per_cpu(hv_sev_vmsa, cpu) = vmsa;
383
384 return ret;
385}
386
387#else
388static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
389static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
390#endif /* CONFIG_AMD_MEM_ENCRYPT */
391
392#ifdef CONFIG_INTEL_TDX_GUEST
393static void hv_tdx_msr_write(u64 msr, u64 val)
394{
395 struct tdx_module_args args = {
396 .r10 = TDX_HYPERCALL_STANDARD,
397 .r11 = EXIT_REASON_MSR_WRITE,
398 .r12 = msr,
399 .r13 = val,
400 };
401
402 u64 ret = __tdx_hypercall(args: &args);
403
404 WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
405}
406
407static void hv_tdx_msr_read(u64 msr, u64 *val)
408{
409 struct tdx_module_args args = {
410 .r10 = TDX_HYPERCALL_STANDARD,
411 .r11 = EXIT_REASON_MSR_READ,
412 .r12 = msr,
413 };
414
415 u64 ret = __tdx_hypercall(args: &args);
416
417 if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
418 *val = 0;
419 else
420 *val = args.r11;
421}
422
423u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
424{
425 struct tdx_module_args args = { };
426
427 args.r10 = control;
428 args.rdx = param1;
429 args.r8 = param2;
430
431 (void)__tdx_hypercall(args: &args);
432
433 return args.r11;
434}
435
436#else
437static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
438static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
439#endif /* CONFIG_INTEL_TDX_GUEST */
440
441#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
442void hv_ivm_msr_write(u64 msr, u64 value)
443{
444 if (!ms_hyperv.paravisor_present)
445 return;
446
447 if (hv_isolation_type_tdx())
448 hv_tdx_msr_write(msr, val: value);
449 else if (hv_isolation_type_snp())
450 hv_ghcb_msr_write(msr, value);
451}
452
453void hv_ivm_msr_read(u64 msr, u64 *value)
454{
455 if (!ms_hyperv.paravisor_present)
456 return;
457
458 if (hv_isolation_type_tdx())
459 hv_tdx_msr_read(msr, val: value);
460 else if (hv_isolation_type_snp())
461 hv_ghcb_msr_read(msr, value);
462}
463
464/*
465 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
466 *
467 * In Isolation VM, all guest memory is encrypted from host and guest
468 * needs to set memory visible to host via hvcall before sharing memory
469 * with host.
470 */
471static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
472 enum hv_mem_host_visibility visibility)
473{
474 struct hv_gpa_range_for_visibility *input;
475 u64 hv_status;
476 unsigned long flags;
477
478 /* no-op if partition isolation is not enabled */
479 if (!hv_is_isolation_supported())
480 return 0;
481
482 if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
483 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
484 HV_MAX_MODIFY_GPA_REP_COUNT);
485 return -EINVAL;
486 }
487
488 local_irq_save(flags);
489 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
490
491 if (unlikely(!input)) {
492 local_irq_restore(flags);
493 return -EINVAL;
494 }
495
496 input->partition_id = HV_PARTITION_ID_SELF;
497 input->host_visibility = visibility;
498 input->reserved0 = 0;
499 input->reserved1 = 0;
500 memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
501 hv_status = hv_do_rep_hypercall(
502 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, rep_count: count,
503 varhead_size: 0, input, NULL);
504 local_irq_restore(flags);
505
506 if (hv_result_success(status: hv_status))
507 return 0;
508 else
509 return -EFAULT;
510}
511
512/*
513 * When transitioning memory between encrypted and decrypted, the caller
514 * of set_memory_encrypted() or set_memory_decrypted() is responsible for
515 * ensuring that the memory isn't in use and isn't referenced while the
516 * transition is in progress. The transition has multiple steps, and the
517 * memory is in an inconsistent state until all steps are complete. A
518 * reference while the state is inconsistent could result in an exception
519 * that can't be cleanly fixed up.
520 *
521 * But the Linux kernel load_unaligned_zeropad() mechanism could cause a
522 * stray reference that can't be prevented by the caller, so Linux has
523 * specific code to handle this case. But when the #VC and #VE exceptions
524 * routed to a paravisor, the specific code doesn't work. To avoid this
525 * problem, mark the pages as "not present" while the transition is in
526 * progress. If load_unaligned_zeropad() causes a stray reference, a normal
527 * page fault is generated instead of #VC or #VE, and the page-fault-based
528 * handlers for load_unaligned_zeropad() resolve the reference. When the
529 * transition is complete, hv_vtom_set_host_visibility() marks the pages
530 * as "present" again.
531 */
532static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
533{
534 return set_memory_np(addr: kbuffer, numpages: pagecount);
535}
536
537/*
538 * hv_vtom_set_host_visibility - Set specified memory visible to host.
539 *
540 * In Isolation VM, all guest memory is encrypted from host and guest
541 * needs to set memory visible to host via hvcall before sharing memory
542 * with host. This function works as wrap of hv_mark_gpa_visibility()
543 * with memory base and size.
544 */
545static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
546{
547 enum hv_mem_host_visibility visibility = enc ?
548 VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
549 u64 *pfn_array;
550 phys_addr_t paddr;
551 int i, pfn, err;
552 void *vaddr;
553 int ret = 0;
554
555 pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
556 if (!pfn_array) {
557 ret = -ENOMEM;
558 goto err_set_memory_p;
559 }
560
561 for (i = 0, pfn = 0; i < pagecount; i++) {
562 /*
563 * Use slow_virt_to_phys() because the PRESENT bit has been
564 * temporarily cleared in the PTEs. slow_virt_to_phys() works
565 * without the PRESENT bit while virt_to_hvpfn() or similar
566 * does not.
567 */
568 vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
569 paddr = slow_virt_to_phys(address: vaddr);
570 pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
571 pfn++;
572
573 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
574 ret = hv_mark_gpa_visibility(count: pfn, pfn: pfn_array,
575 visibility);
576 if (ret)
577 goto err_free_pfn_array;
578 pfn = 0;
579 }
580 }
581
582err_free_pfn_array:
583 kfree(objp: pfn_array);
584
585err_set_memory_p:
586 /*
587 * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present()
588 * did. Do this even if there is an error earlier in this function in
589 * order to avoid leaving the memory range in a "broken" state. Setting
590 * the PRESENT bits shouldn't fail, but return an error if it does.
591 */
592 err = set_memory_p(addr: kbuffer, numpages: pagecount);
593 if (err && !ret)
594 ret = err;
595
596 return ret;
597}
598
599static bool hv_vtom_tlb_flush_required(bool private)
600{
601 /*
602 * Since hv_vtom_clear_present() marks the PTEs as "not present"
603 * and flushes the TLB, they can't be in the TLB. That makes the
604 * flush controlled by this function redundant, so return "false".
605 */
606 return false;
607}
608
609static bool hv_vtom_cache_flush_required(void)
610{
611 return false;
612}
613
614static bool hv_is_private_mmio(u64 addr)
615{
616 /*
617 * Hyper-V always provides a single IO-APIC in a guest VM.
618 * When a paravisor is used, it is emulated by the paravisor
619 * in the guest context and must be mapped private.
620 */
621 if (addr >= HV_IOAPIC_BASE_ADDRESS &&
622 addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
623 return true;
624
625 /* Same with a vTPM */
626 if (addr >= VTPM_BASE_ADDRESS &&
627 addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
628 return true;
629
630 return false;
631}
632
633void __init hv_vtom_init(void)
634{
635 enum hv_isolation_type type = hv_get_isolation_type();
636
637 switch (type) {
638 case HV_ISOLATION_TYPE_VBS:
639 fallthrough;
640 /*
641 * By design, a VM using vTOM doesn't see the SEV setting,
642 * so SEV initialization is bypassed and sev_status isn't set.
643 * Set it here to indicate a vTOM VM.
644 *
645 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
646 * defined as 0ULL, to which we can't assigned a value.
647 */
648#ifdef CONFIG_AMD_MEM_ENCRYPT
649 case HV_ISOLATION_TYPE_SNP:
650 sev_status = MSR_AMD64_SNP_VTOM;
651 cc_vendor = CC_VENDOR_AMD;
652 break;
653#endif
654
655 case HV_ISOLATION_TYPE_TDX:
656 cc_vendor = CC_VENDOR_INTEL;
657 break;
658
659 default:
660 panic(fmt: "hv_vtom_init: unsupported isolation type %d\n", type);
661 }
662
663 cc_set_mask(mask: ms_hyperv.shared_gpa_boundary);
664 physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
665
666 x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
667 x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
668 x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
669 x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present;
670 x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
671
672 /* Set WB as the default cache mode. */
673 guest_force_mtrr_state(NULL, num_var: 0, MTRR_TYPE_WRBACK);
674}
675
676#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
677
678enum hv_isolation_type hv_get_isolation_type(void)
679{
680 if (!(ms_hyperv.priv_high & HV_ISOLATION))
681 return HV_ISOLATION_TYPE_NONE;
682 return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
683}
684EXPORT_SYMBOL_GPL(hv_get_isolation_type);
685
686/*
687 * hv_is_isolation_supported - Check system runs in the Hyper-V
688 * isolation VM.
689 */
690bool hv_is_isolation_supported(void)
691{
692 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
693 return false;
694
695 if (!hypervisor_is_type(type: X86_HYPER_MS_HYPERV))
696 return false;
697
698 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
699}
700
701DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
702
703/*
704 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
705 * isolation VM.
706 */
707bool hv_isolation_type_snp(void)
708{
709 return static_branch_unlikely(&isolation_type_snp);
710}
711
712DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
713/*
714 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
715 * isolated VM.
716 */
717bool hv_isolation_type_tdx(void)
718{
719 return static_branch_unlikely(&isolation_type_tdx);
720}
721

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/arch/x86/hyperv/ivm.c