1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef ARCH_X86_KVM_CPUID_H |
3 | #define ARCH_X86_KVM_CPUID_H |
4 | |
5 | #include "x86.h" |
6 | #include "reverse_cpuid.h" |
7 | #include <asm/cpu.h> |
8 | #include <asm/processor.h> |
9 | #include <uapi/asm/kvm_para.h> |
10 | |
11 | extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; |
12 | void kvm_set_cpu_caps(void); |
13 | |
14 | void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); |
15 | void kvm_update_pv_runtime(struct kvm_vcpu *vcpu); |
16 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, |
17 | u32 function, u32 index); |
18 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
19 | u32 function); |
20 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
21 | struct kvm_cpuid_entry2 __user *entries, |
22 | unsigned int type); |
23 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
24 | struct kvm_cpuid *cpuid, |
25 | struct kvm_cpuid_entry __user *entries); |
26 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
27 | struct kvm_cpuid2 *cpuid, |
28 | struct kvm_cpuid_entry2 __user *entries); |
29 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
30 | struct kvm_cpuid2 *cpuid, |
31 | struct kvm_cpuid_entry2 __user *entries); |
32 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
33 | u32 *ecx, u32 *edx, bool exact_only); |
34 | |
35 | u32 xstate_required_size(u64 xstate_bv, bool compacted); |
36 | |
37 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); |
38 | u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu); |
39 | |
40 | static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) |
41 | { |
42 | return vcpu->arch.maxphyaddr; |
43 | } |
44 | |
45 | static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
46 | { |
47 | return !(gpa & vcpu->arch.reserved_gpa_bits); |
48 | } |
49 | |
50 | static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, |
51 | gpa_t gpa, gpa_t alignment) |
52 | { |
53 | return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); |
54 | } |
55 | |
56 | static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) |
57 | { |
58 | return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); |
59 | } |
60 | |
61 | static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, |
62 | unsigned int leaf) |
63 | { |
64 | u32 *reg = cpuid_entry_get_reg(entry, x86_feature: leaf * 32); |
65 | |
66 | BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); |
67 | *reg = kvm_cpu_caps[leaf]; |
68 | } |
69 | |
70 | static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, |
71 | unsigned int x86_feature) |
72 | { |
73 | const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); |
74 | struct kvm_cpuid_entry2 *entry; |
75 | |
76 | entry = kvm_find_cpuid_entry_index(vcpu, function: cpuid.function, index: cpuid.index); |
77 | if (!entry) |
78 | return NULL; |
79 | |
80 | return __cpuid_entry_get_reg(entry, reg: cpuid.reg); |
81 | } |
82 | |
83 | static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, |
84 | unsigned int x86_feature) |
85 | { |
86 | u32 *reg; |
87 | |
88 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
89 | if (!reg) |
90 | return false; |
91 | |
92 | return *reg & __feature_bit(x86_feature); |
93 | } |
94 | |
95 | static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, |
96 | unsigned int x86_feature) |
97 | { |
98 | u32 *reg; |
99 | |
100 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
101 | if (reg) |
102 | *reg &= ~__feature_bit(x86_feature); |
103 | } |
104 | |
105 | static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) |
106 | { |
107 | struct kvm_cpuid_entry2 *best; |
108 | |
109 | best = kvm_find_cpuid_entry(vcpu, function: 0); |
110 | return best && |
111 | (is_guest_vendor_amd(ebx: best->ebx, ecx: best->ecx, edx: best->edx) || |
112 | is_guest_vendor_hygon(ebx: best->ebx, ecx: best->ecx, edx: best->edx)); |
113 | } |
114 | |
115 | static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu) |
116 | { |
117 | struct kvm_cpuid_entry2 *best; |
118 | |
119 | best = kvm_find_cpuid_entry(vcpu, function: 0); |
120 | return best && is_guest_vendor_intel(ebx: best->ebx, ecx: best->ecx, edx: best->edx); |
121 | } |
122 | |
123 | static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu) |
124 | { |
125 | return vcpu->arch.is_amd_compatible; |
126 | } |
127 | |
128 | static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu) |
129 | { |
130 | return !guest_cpuid_is_amd_compatible(vcpu); |
131 | } |
132 | |
133 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) |
134 | { |
135 | struct kvm_cpuid_entry2 *best; |
136 | |
137 | best = kvm_find_cpuid_entry(vcpu, function: 0x1); |
138 | if (!best) |
139 | return -1; |
140 | |
141 | return x86_family(sig: best->eax); |
142 | } |
143 | |
144 | static inline int guest_cpuid_model(struct kvm_vcpu *vcpu) |
145 | { |
146 | struct kvm_cpuid_entry2 *best; |
147 | |
148 | best = kvm_find_cpuid_entry(vcpu, function: 0x1); |
149 | if (!best) |
150 | return -1; |
151 | |
152 | return x86_model(sig: best->eax); |
153 | } |
154 | |
155 | static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu) |
156 | { |
157 | return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); |
158 | } |
159 | |
160 | static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) |
161 | { |
162 | struct kvm_cpuid_entry2 *best; |
163 | |
164 | best = kvm_find_cpuid_entry(vcpu, function: 0x1); |
165 | if (!best) |
166 | return -1; |
167 | |
168 | return x86_stepping(sig: best->eax); |
169 | } |
170 | |
171 | static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) |
172 | { |
173 | return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || |
174 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) || |
175 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) || |
176 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)); |
177 | } |
178 | |
179 | static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) |
180 | { |
181 | return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || |
182 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) || |
183 | guest_cpuid_has(vcpu, X86_FEATURE_SBPB)); |
184 | } |
185 | |
186 | static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) |
187 | { |
188 | return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; |
189 | } |
190 | |
191 | static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) |
192 | { |
193 | return vcpu->arch.msr_misc_features_enables & |
194 | MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; |
195 | } |
196 | |
197 | static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) |
198 | { |
199 | unsigned int x86_leaf = __feature_leaf(x86_feature); |
200 | |
201 | reverse_cpuid_check(x86_leaf); |
202 | kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); |
203 | } |
204 | |
205 | static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) |
206 | { |
207 | unsigned int x86_leaf = __feature_leaf(x86_feature); |
208 | |
209 | reverse_cpuid_check(x86_leaf); |
210 | kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); |
211 | } |
212 | |
213 | static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) |
214 | { |
215 | unsigned int x86_leaf = __feature_leaf(x86_feature); |
216 | |
217 | reverse_cpuid_check(x86_leaf); |
218 | return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); |
219 | } |
220 | |
221 | static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature) |
222 | { |
223 | return !!kvm_cpu_cap_get(x86_feature); |
224 | } |
225 | |
226 | static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) |
227 | { |
228 | if (boot_cpu_has(x86_feature)) |
229 | kvm_cpu_cap_set(x86_feature); |
230 | } |
231 | |
232 | static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, |
233 | unsigned int kvm_feature) |
234 | { |
235 | if (!vcpu->arch.pv_cpuid.enforce) |
236 | return true; |
237 | |
238 | return vcpu->arch.pv_cpuid.features & (1u << kvm_feature); |
239 | } |
240 | |
241 | enum kvm_governed_features { |
242 | #define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x, |
243 | #include "governed_features.h" |
244 | KVM_NR_GOVERNED_FEATURES |
245 | }; |
246 | |
247 | static __always_inline int kvm_governed_feature_index(unsigned int x86_feature) |
248 | { |
249 | switch (x86_feature) { |
250 | #define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x; |
251 | #include "governed_features.h" |
252 | default: |
253 | return -1; |
254 | } |
255 | } |
256 | |
257 | static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature) |
258 | { |
259 | return kvm_governed_feature_index(x86_feature) >= 0; |
260 | } |
261 | |
262 | static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, |
263 | unsigned int x86_feature) |
264 | { |
265 | BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); |
266 | |
267 | __set_bit(kvm_governed_feature_index(x86_feature), |
268 | vcpu->arch.governed_features.enabled); |
269 | } |
270 | |
271 | static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu, |
272 | unsigned int x86_feature) |
273 | { |
274 | if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature)) |
275 | kvm_governed_feature_set(vcpu, x86_feature); |
276 | } |
277 | |
278 | static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, |
279 | unsigned int x86_feature) |
280 | { |
281 | BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); |
282 | |
283 | return test_bit(kvm_governed_feature_index(x86_feature), |
284 | vcpu->arch.governed_features.enabled); |
285 | } |
286 | |
287 | static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
288 | { |
289 | if (guest_can_use(vcpu, X86_FEATURE_LAM)) |
290 | cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); |
291 | |
292 | return kvm_vcpu_is_legal_gpa(vcpu, gpa: cr3); |
293 | } |
294 | |
295 | #endif |
296 | |