1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_KVM_PARA_H |
3 | #define _ASM_X86_KVM_PARA_H |
4 | |
5 | #include <asm/processor.h> |
6 | #include <asm/alternative.h> |
7 | #include <linux/interrupt.h> |
8 | #include <uapi/asm/kvm_para.h> |
9 | |
10 | #include <asm/tdx.h> |
11 | |
12 | #ifdef CONFIG_KVM_GUEST |
13 | bool kvm_check_and_clear_guest_paused(void); |
14 | #else |
15 | static inline bool kvm_check_and_clear_guest_paused(void) |
16 | { |
17 | return false; |
18 | } |
19 | #endif /* CONFIG_KVM_GUEST */ |
20 | |
21 | #define KVM_HYPERCALL \ |
22 | ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL) |
23 | |
24 | /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall |
25 | * instruction. The hypervisor may replace it with something else but only the |
26 | * instructions are guaranteed to be supported. |
27 | * |
28 | * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. |
29 | * The hypercall number should be placed in rax and the return value will be |
30 | * placed in rax. No other registers will be clobbered unless explicitly |
31 | * noted by the particular hypercall. |
32 | */ |
33 | |
34 | static inline long kvm_hypercall0(unsigned int nr) |
35 | { |
36 | long ret; |
37 | |
38 | if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) |
39 | return tdx_kvm_hypercall(nr, p1: 0, p2: 0, p3: 0, p4: 0); |
40 | |
41 | asm volatile(KVM_HYPERCALL |
42 | : "=a" (ret) |
43 | : "a" (nr) |
44 | : "memory" ); |
45 | return ret; |
46 | } |
47 | |
48 | static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) |
49 | { |
50 | long ret; |
51 | |
52 | if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) |
53 | return tdx_kvm_hypercall(nr, p1, p2: 0, p3: 0, p4: 0); |
54 | |
55 | asm volatile(KVM_HYPERCALL |
56 | : "=a" (ret) |
57 | : "a" (nr), "b" (p1) |
58 | : "memory" ); |
59 | return ret; |
60 | } |
61 | |
62 | static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, |
63 | unsigned long p2) |
64 | { |
65 | long ret; |
66 | |
67 | if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) |
68 | return tdx_kvm_hypercall(nr, p1, p2, p3: 0, p4: 0); |
69 | |
70 | asm volatile(KVM_HYPERCALL |
71 | : "=a" (ret) |
72 | : "a" (nr), "b" (p1), "c" (p2) |
73 | : "memory" ); |
74 | return ret; |
75 | } |
76 | |
77 | static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, |
78 | unsigned long p2, unsigned long p3) |
79 | { |
80 | long ret; |
81 | |
82 | if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) |
83 | return tdx_kvm_hypercall(nr, p1, p2, p3, p4: 0); |
84 | |
85 | asm volatile(KVM_HYPERCALL |
86 | : "=a" (ret) |
87 | : "a" (nr), "b" (p1), "c" (p2), "d" (p3) |
88 | : "memory" ); |
89 | return ret; |
90 | } |
91 | |
92 | static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, |
93 | unsigned long p2, unsigned long p3, |
94 | unsigned long p4) |
95 | { |
96 | long ret; |
97 | |
98 | if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) |
99 | return tdx_kvm_hypercall(nr, p1, p2, p3, p4); |
100 | |
101 | asm volatile(KVM_HYPERCALL |
102 | : "=a" (ret) |
103 | : "a" (nr), "b" (p1), "c" (p2), "d" (p3), "S" (p4) |
104 | : "memory" ); |
105 | return ret; |
106 | } |
107 | |
108 | static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1, |
109 | unsigned long p2, unsigned long p3) |
110 | { |
111 | long ret; |
112 | |
113 | asm volatile("vmmcall" |
114 | : "=a" (ret) |
115 | : "a" (nr), "b" (p1), "c" (p2), "d" (p3) |
116 | : "memory" ); |
117 | return ret; |
118 | } |
119 | |
120 | #ifdef CONFIG_KVM_GUEST |
121 | void kvmclock_init(void); |
122 | void kvmclock_disable(void); |
123 | bool kvm_para_available(void); |
124 | unsigned int kvm_arch_para_features(void); |
125 | unsigned int kvm_arch_para_hints(void); |
126 | void kvm_async_pf_task_wait_schedule(u32 token); |
127 | void kvm_async_pf_task_wake(u32 token); |
128 | u32 kvm_read_and_reset_apf_flags(void); |
129 | bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); |
130 | |
131 | DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled); |
132 | |
133 | static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) |
134 | { |
135 | if (static_branch_unlikely(&kvm_async_pf_enabled)) |
136 | return __kvm_handle_async_pf(regs, token); |
137 | else |
138 | return false; |
139 | } |
140 | |
141 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
142 | void __init kvm_spinlock_init(void); |
143 | #else /* !CONFIG_PARAVIRT_SPINLOCKS */ |
144 | static inline void kvm_spinlock_init(void) |
145 | { |
146 | } |
147 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
148 | |
149 | #else /* CONFIG_KVM_GUEST */ |
150 | #define kvm_async_pf_task_wait_schedule(T) do {} while(0) |
151 | #define kvm_async_pf_task_wake(T) do {} while(0) |
152 | |
153 | static inline bool kvm_para_available(void) |
154 | { |
155 | return false; |
156 | } |
157 | |
158 | static inline unsigned int kvm_arch_para_features(void) |
159 | { |
160 | return 0; |
161 | } |
162 | |
163 | static inline unsigned int kvm_arch_para_hints(void) |
164 | { |
165 | return 0; |
166 | } |
167 | |
168 | static inline u32 kvm_read_and_reset_apf_flags(void) |
169 | { |
170 | return 0; |
171 | } |
172 | |
173 | static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) |
174 | { |
175 | return false; |
176 | } |
177 | #endif |
178 | |
179 | #endif /* _ASM_X86_KVM_PARA_H */ |
180 | |