1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Kernel-based Virtual Machine driver for Linux |
4 | * cpuid support routines |
5 | * |
6 | * derived from arch/x86/kvm/x86.c |
7 | * |
8 | * Copyright 2011 Red Hat, Inc. and/or its affiliates. |
9 | * Copyright IBM Corporation, 2008 |
10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | |
13 | #include <linux/kvm_host.h> |
14 | #include "linux/lockdep.h" |
15 | #include <linux/export.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/uaccess.h> |
18 | #include <linux/sched/stat.h> |
19 | |
20 | #include <asm/processor.h> |
21 | #include <asm/user.h> |
22 | #include <asm/fpu/xstate.h> |
23 | #include <asm/sgx.h> |
24 | #include <asm/cpuid.h> |
25 | #include "cpuid.h" |
26 | #include "lapic.h" |
27 | #include "mmu.h" |
28 | #include "trace.h" |
29 | #include "pmu.h" |
30 | #include "xen.h" |
31 | |
32 | /* |
33 | * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be |
34 | * aligned to sizeof(unsigned long) because it's not accessed via bitops. |
35 | */ |
36 | u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; |
37 | EXPORT_SYMBOL_GPL(kvm_cpu_caps); |
38 | |
39 | u32 xstate_required_size(u64 xstate_bv, bool compacted) |
40 | { |
41 | int feature_bit = 0; |
42 | u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
43 | |
44 | xstate_bv &= XFEATURE_MASK_EXTEND; |
45 | while (xstate_bv) { |
46 | if (xstate_bv & 0x1) { |
47 | u32 eax, ebx, ecx, edx, offset; |
48 | cpuid_count(op: 0xD, count: feature_bit, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
49 | /* ECX[1]: 64B alignment in compacted form */ |
50 | if (compacted) |
51 | offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret; |
52 | else |
53 | offset = ebx; |
54 | ret = max(ret, offset + eax); |
55 | } |
56 | |
57 | xstate_bv >>= 1; |
58 | feature_bit++; |
59 | } |
60 | |
61 | return ret; |
62 | } |
63 | |
64 | #define F feature_bit |
65 | |
66 | /* Scattered Flag - For features that are scattered by cpufeatures.h. */ |
67 | #define SF(name) \ |
68 | ({ \ |
69 | BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ |
70 | (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \ |
71 | }) |
72 | |
73 | /* |
74 | * Magic value used by KVM when querying userspace-provided CPUID entries and |
75 | * doesn't care about the CPIUD index because the index of the function in |
76 | * question is not significant. Note, this magic value must have at least one |
77 | * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find() |
78 | * to avoid false positives when processing guest CPUID input. |
79 | */ |
80 | #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull |
81 | |
82 | static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( |
83 | struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index) |
84 | { |
85 | struct kvm_cpuid_entry2 *e; |
86 | int i; |
87 | |
88 | /* |
89 | * KVM has a semi-arbitrary rule that querying the guest's CPUID model |
90 | * with IRQs disabled is disallowed. The CPUID model can legitimately |
91 | * have over one hundred entries, i.e. the lookup is slow, and IRQs are |
92 | * typically disabled in KVM only when KVM is in a performance critical |
93 | * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break |
94 | * if this rule is violated, this assertion is purely to flag potential |
95 | * performance issues. If this fires, consider moving the lookup out |
96 | * of the hotpath, e.g. by caching information during CPUID updates. |
97 | */ |
98 | lockdep_assert_irqs_enabled(); |
99 | |
100 | for (i = 0; i < nent; i++) { |
101 | e = &entries[i]; |
102 | |
103 | if (e->function != function) |
104 | continue; |
105 | |
106 | /* |
107 | * If the index isn't significant, use the first entry with a |
108 | * matching function. It's userspace's responsibility to not |
109 | * provide "duplicate" entries in all cases. |
110 | */ |
111 | if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index) |
112 | return e; |
113 | |
114 | |
115 | /* |
116 | * Similarly, use the first matching entry if KVM is doing a |
117 | * lookup (as opposed to emulating CPUID) for a function that's |
118 | * architecturally defined as not having a significant index. |
119 | */ |
120 | if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) { |
121 | /* |
122 | * Direct lookups from KVM should not diverge from what |
123 | * KVM defines internally (the architectural behavior). |
124 | */ |
125 | WARN_ON_ONCE(cpuid_function_is_indexed(function)); |
126 | return e; |
127 | } |
128 | } |
129 | |
130 | return NULL; |
131 | } |
132 | |
133 | static int kvm_check_cpuid(struct kvm_vcpu *vcpu, |
134 | struct kvm_cpuid_entry2 *entries, |
135 | int nent) |
136 | { |
137 | struct kvm_cpuid_entry2 *best; |
138 | u64 xfeatures; |
139 | |
140 | /* |
141 | * The existing code assumes virtual address is 48-bit or 57-bit in the |
142 | * canonical address checks; exit if it is ever changed. |
143 | */ |
144 | best = cpuid_entry2_find(entries, nent, function: 0x80000008, |
145 | KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
146 | if (best) { |
147 | int vaddr_bits = (best->eax & 0xff00) >> 8; |
148 | |
149 | if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) |
150 | return -EINVAL; |
151 | } |
152 | |
153 | /* |
154 | * Exposing dynamic xfeatures to the guest requires additional |
155 | * enabling in the FPU, e.g. to expand the guest XSAVE state size. |
156 | */ |
157 | best = cpuid_entry2_find(entries, nent, function: 0xd, index: 0); |
158 | if (!best) |
159 | return 0; |
160 | |
161 | xfeatures = best->eax | ((u64)best->edx << 32); |
162 | xfeatures &= XFEATURE_MASK_USER_DYNAMIC; |
163 | if (!xfeatures) |
164 | return 0; |
165 | |
166 | return fpu_enable_guest_xfd_features(guest_fpu: &vcpu->arch.guest_fpu, xfeatures); |
167 | } |
168 | |
169 | /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */ |
170 | static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, |
171 | int nent) |
172 | { |
173 | struct kvm_cpuid_entry2 *orig; |
174 | int i; |
175 | |
176 | if (nent != vcpu->arch.cpuid_nent) |
177 | return -EINVAL; |
178 | |
179 | for (i = 0; i < nent; i++) { |
180 | orig = &vcpu->arch.cpuid_entries[i]; |
181 | if (e2[i].function != orig->function || |
182 | e2[i].index != orig->index || |
183 | e2[i].flags != orig->flags || |
184 | e2[i].eax != orig->eax || e2[i].ebx != orig->ebx || |
185 | e2[i].ecx != orig->ecx || e2[i].edx != orig->edx) |
186 | return -EINVAL; |
187 | } |
188 | |
189 | return 0; |
190 | } |
191 | |
192 | static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries, |
193 | int nent, const char *sig) |
194 | { |
195 | struct kvm_hypervisor_cpuid cpuid = {}; |
196 | struct kvm_cpuid_entry2 *entry; |
197 | u32 base; |
198 | |
199 | for_each_possible_hypervisor_cpuid_base(base) { |
200 | entry = cpuid_entry2_find(entries, nent, function: base, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
201 | |
202 | if (entry) { |
203 | u32 signature[3]; |
204 | |
205 | signature[0] = entry->ebx; |
206 | signature[1] = entry->ecx; |
207 | signature[2] = entry->edx; |
208 | |
209 | if (!memcmp(p: signature, q: sig, size: sizeof(signature))) { |
210 | cpuid.base = base; |
211 | cpuid.limit = entry->eax; |
212 | break; |
213 | } |
214 | } |
215 | } |
216 | |
217 | return cpuid; |
218 | } |
219 | |
220 | static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, |
221 | const char *sig) |
222 | { |
223 | return __kvm_get_hypervisor_cpuid(entries: vcpu->arch.cpuid_entries, |
224 | nent: vcpu->arch.cpuid_nent, sig); |
225 | } |
226 | |
227 | static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries, |
228 | int nent, u32 kvm_cpuid_base) |
229 | { |
230 | return cpuid_entry2_find(entries, nent, function: kvm_cpuid_base | KVM_CPUID_FEATURES, |
231 | KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
232 | } |
233 | |
234 | static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu) |
235 | { |
236 | u32 base = vcpu->arch.kvm_cpuid.base; |
237 | |
238 | if (!base) |
239 | return NULL; |
240 | |
241 | return __kvm_find_kvm_cpuid_features(entries: vcpu->arch.cpuid_entries, |
242 | nent: vcpu->arch.cpuid_nent, kvm_cpuid_base: base); |
243 | } |
244 | |
245 | void kvm_update_pv_runtime(struct kvm_vcpu *vcpu) |
246 | { |
247 | struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu); |
248 | |
249 | /* |
250 | * save the feature bitmap to avoid cpuid lookup for every PV |
251 | * operation |
252 | */ |
253 | if (best) |
254 | vcpu->arch.pv_cpuid.features = best->eax; |
255 | } |
256 | |
257 | /* |
258 | * Calculate guest's supported XCR0 taking into account guest CPUID data and |
259 | * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0). |
260 | */ |
261 | static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent) |
262 | { |
263 | struct kvm_cpuid_entry2 *best; |
264 | |
265 | best = cpuid_entry2_find(entries, nent, function: 0xd, index: 0); |
266 | if (!best) |
267 | return 0; |
268 | |
269 | return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; |
270 | } |
271 | |
272 | static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, |
273 | int nent) |
274 | { |
275 | struct kvm_cpuid_entry2 *best; |
276 | struct kvm_hypervisor_cpuid kvm_cpuid; |
277 | |
278 | best = cpuid_entry2_find(entries, nent, function: 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
279 | if (best) { |
280 | /* Update OSXSAVE bit */ |
281 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
282 | cpuid_entry_change(entry: best, X86_FEATURE_OSXSAVE, |
283 | set: kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)); |
284 | |
285 | cpuid_entry_change(entry: best, X86_FEATURE_APIC, |
286 | set: vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); |
287 | } |
288 | |
289 | best = cpuid_entry2_find(entries, nent, function: 7, index: 0); |
290 | if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) |
291 | cpuid_entry_change(entry: best, X86_FEATURE_OSPKE, |
292 | set: kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); |
293 | |
294 | best = cpuid_entry2_find(entries, nent, function: 0xD, index: 0); |
295 | if (best) |
296 | best->ebx = xstate_required_size(xstate_bv: vcpu->arch.xcr0, compacted: false); |
297 | |
298 | best = cpuid_entry2_find(entries, nent, function: 0xD, index: 1); |
299 | if (best && (cpuid_entry_has(entry: best, X86_FEATURE_XSAVES) || |
300 | cpuid_entry_has(entry: best, X86_FEATURE_XSAVEC))) |
301 | best->ebx = xstate_required_size(xstate_bv: vcpu->arch.xcr0, compacted: true); |
302 | |
303 | kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE); |
304 | if (kvm_cpuid.base) { |
305 | best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid_base: kvm_cpuid.base); |
306 | if (kvm_hlt_in_guest(kvm: vcpu->kvm) && best) |
307 | best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); |
308 | } |
309 | |
310 | if (!kvm_check_has_quirk(kvm: vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { |
311 | best = cpuid_entry2_find(entries, nent, function: 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
312 | if (best) |
313 | cpuid_entry_change(entry: best, X86_FEATURE_MWAIT, |
314 | set: vcpu->arch.ia32_misc_enable_msr & |
315 | MSR_IA32_MISC_ENABLE_MWAIT); |
316 | } |
317 | } |
318 | |
319 | void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) |
320 | { |
321 | __kvm_update_cpuid_runtime(vcpu, entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent); |
322 | } |
323 | EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); |
324 | |
325 | static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent) |
326 | { |
327 | #ifdef CONFIG_KVM_HYPERV |
328 | struct kvm_cpuid_entry2 *entry; |
329 | |
330 | entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE, |
331 | KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
332 | return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; |
333 | #else |
334 | return false; |
335 | #endif |
336 | } |
337 | |
338 | static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) |
339 | { |
340 | struct kvm_lapic *apic = vcpu->arch.apic; |
341 | struct kvm_cpuid_entry2 *best; |
342 | bool allow_gbpages; |
343 | |
344 | BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES); |
345 | bitmap_zero(dst: vcpu->arch.governed_features.enabled, |
346 | KVM_MAX_NR_GOVERNED_FEATURES); |
347 | |
348 | /* |
349 | * If TDP is enabled, let the guest use GBPAGES if they're supported in |
350 | * hardware. The hardware page walker doesn't let KVM disable GBPAGES, |
351 | * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA |
352 | * walk for performance and complexity reasons. Not to mention KVM |
353 | * _can't_ solve the problem because GVA->GPA walks aren't visible to |
354 | * KVM once a TDP translation is installed. Mimic hardware behavior so |
355 | * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. |
356 | * If TDP is disabled, honor *only* guest CPUID as KVM has full control |
357 | * and can install smaller shadow pages if the host lacks 1GiB support. |
358 | */ |
359 | allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : |
360 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES); |
361 | if (allow_gbpages) |
362 | kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES); |
363 | |
364 | best = kvm_find_cpuid_entry(vcpu, function: 1); |
365 | if (best && apic) { |
366 | if (cpuid_entry_has(entry: best, X86_FEATURE_TSC_DEADLINE_TIMER)) |
367 | apic->lapic_timer.timer_mode_mask = 3 << 17; |
368 | else |
369 | apic->lapic_timer.timer_mode_mask = 1 << 17; |
370 | |
371 | kvm_apic_set_version(vcpu); |
372 | } |
373 | |
374 | vcpu->arch.guest_supported_xcr0 = |
375 | cpuid_get_supported_xcr0(entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent); |
376 | |
377 | kvm_update_pv_runtime(vcpu); |
378 | |
379 | vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu); |
380 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); |
381 | vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); |
382 | |
383 | kvm_pmu_refresh(vcpu); |
384 | vcpu->arch.cr4_guest_rsvd_bits = |
385 | __cr4_reserved_bits(guest_cpuid_has, vcpu); |
386 | |
387 | kvm_hv_set_cpuid(vcpu, hyperv_enabled: kvm_cpuid_has_hyperv(entries: vcpu->arch.cpuid_entries, |
388 | nent: vcpu->arch.cpuid_nent)); |
389 | |
390 | /* Invoke the vendor callback only after the above state is updated. */ |
391 | static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); |
392 | |
393 | /* |
394 | * Except for the MMU, which needs to do its thing any vendor specific |
395 | * adjustments to the reserved GPA bits. |
396 | */ |
397 | kvm_mmu_after_set_cpuid(vcpu); |
398 | } |
399 | |
400 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) |
401 | { |
402 | struct kvm_cpuid_entry2 *best; |
403 | |
404 | best = kvm_find_cpuid_entry(vcpu, function: 0x80000000); |
405 | if (!best || best->eax < 0x80000008) |
406 | goto not_found; |
407 | best = kvm_find_cpuid_entry(vcpu, function: 0x80000008); |
408 | if (best) |
409 | return best->eax & 0xff; |
410 | not_found: |
411 | return 36; |
412 | } |
413 | |
414 | /* |
415 | * This "raw" version returns the reserved GPA bits without any adjustments for |
416 | * encryption technologies that usurp bits. The raw mask should be used if and |
417 | * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. |
418 | */ |
419 | u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) |
420 | { |
421 | return rsvd_bits(s: cpuid_maxphyaddr(vcpu), e: 63); |
422 | } |
423 | |
424 | static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, |
425 | int nent) |
426 | { |
427 | int r; |
428 | |
429 | __kvm_update_cpuid_runtime(vcpu, entries: e2, nent); |
430 | |
431 | /* |
432 | * KVM does not correctly handle changing guest CPUID after KVM_RUN, as |
433 | * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't |
434 | * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page |
435 | * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with |
436 | * the core vCPU model on the fly. It would've been better to forbid any |
437 | * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately |
438 | * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do |
439 | * KVM_SET_CPUID{,2} again. To support this legacy behavior, check |
440 | * whether the supplied CPUID data is equal to what's already set. |
441 | */ |
442 | if (kvm_vcpu_has_run(vcpu)) { |
443 | r = kvm_cpuid_check_equal(vcpu, e2, nent); |
444 | if (r) |
445 | return r; |
446 | |
447 | kvfree(addr: e2); |
448 | return 0; |
449 | } |
450 | |
451 | #ifdef CONFIG_KVM_HYPERV |
452 | if (kvm_cpuid_has_hyperv(entries: e2, nent)) { |
453 | r = kvm_hv_vcpu_init(vcpu); |
454 | if (r) |
455 | return r; |
456 | } |
457 | #endif |
458 | |
459 | r = kvm_check_cpuid(vcpu, entries: e2, nent); |
460 | if (r) |
461 | return r; |
462 | |
463 | kvfree(addr: vcpu->arch.cpuid_entries); |
464 | vcpu->arch.cpuid_entries = e2; |
465 | vcpu->arch.cpuid_nent = nent; |
466 | |
467 | vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE); |
468 | #ifdef CONFIG_KVM_XEN |
469 | vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE); |
470 | #endif |
471 | kvm_vcpu_after_set_cpuid(vcpu); |
472 | |
473 | return 0; |
474 | } |
475 | |
476 | /* when an old userspace process fills a new kernel module */ |
477 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
478 | struct kvm_cpuid *cpuid, |
479 | struct kvm_cpuid_entry __user *entries) |
480 | { |
481 | int r, i; |
482 | struct kvm_cpuid_entry *e = NULL; |
483 | struct kvm_cpuid_entry2 *e2 = NULL; |
484 | |
485 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
486 | return -E2BIG; |
487 | |
488 | if (cpuid->nent) { |
489 | e = vmemdup_array_user(src: entries, n: cpuid->nent, size: sizeof(*e)); |
490 | if (IS_ERR(ptr: e)) |
491 | return PTR_ERR(ptr: e); |
492 | |
493 | e2 = kvmalloc_array(n: cpuid->nent, size: sizeof(*e2), GFP_KERNEL_ACCOUNT); |
494 | if (!e2) { |
495 | r = -ENOMEM; |
496 | goto out_free_cpuid; |
497 | } |
498 | } |
499 | for (i = 0; i < cpuid->nent; i++) { |
500 | e2[i].function = e[i].function; |
501 | e2[i].eax = e[i].eax; |
502 | e2[i].ebx = e[i].ebx; |
503 | e2[i].ecx = e[i].ecx; |
504 | e2[i].edx = e[i].edx; |
505 | e2[i].index = 0; |
506 | e2[i].flags = 0; |
507 | e2[i].padding[0] = 0; |
508 | e2[i].padding[1] = 0; |
509 | e2[i].padding[2] = 0; |
510 | } |
511 | |
512 | r = kvm_set_cpuid(vcpu, e2, nent: cpuid->nent); |
513 | if (r) |
514 | kvfree(addr: e2); |
515 | |
516 | out_free_cpuid: |
517 | kvfree(addr: e); |
518 | |
519 | return r; |
520 | } |
521 | |
522 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
523 | struct kvm_cpuid2 *cpuid, |
524 | struct kvm_cpuid_entry2 __user *entries) |
525 | { |
526 | struct kvm_cpuid_entry2 *e2 = NULL; |
527 | int r; |
528 | |
529 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
530 | return -E2BIG; |
531 | |
532 | if (cpuid->nent) { |
533 | e2 = vmemdup_array_user(src: entries, n: cpuid->nent, size: sizeof(*e2)); |
534 | if (IS_ERR(ptr: e2)) |
535 | return PTR_ERR(ptr: e2); |
536 | } |
537 | |
538 | r = kvm_set_cpuid(vcpu, e2, nent: cpuid->nent); |
539 | if (r) |
540 | kvfree(addr: e2); |
541 | |
542 | return r; |
543 | } |
544 | |
545 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
546 | struct kvm_cpuid2 *cpuid, |
547 | struct kvm_cpuid_entry2 __user *entries) |
548 | { |
549 | if (cpuid->nent < vcpu->arch.cpuid_nent) |
550 | return -E2BIG; |
551 | |
552 | if (copy_to_user(to: entries, from: vcpu->arch.cpuid_entries, |
553 | n: vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) |
554 | return -EFAULT; |
555 | |
556 | cpuid->nent = vcpu->arch.cpuid_nent; |
557 | return 0; |
558 | } |
559 | |
560 | /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */ |
561 | static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) |
562 | { |
563 | const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature: leaf * 32); |
564 | struct kvm_cpuid_entry2 entry; |
565 | |
566 | reverse_cpuid_check(x86_leaf: leaf); |
567 | |
568 | cpuid_count(op: cpuid.function, count: cpuid.index, |
569 | eax: &entry.eax, ebx: &entry.ebx, ecx: &entry.ecx, edx: &entry.edx); |
570 | |
571 | kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(entry: &entry, reg: cpuid.reg); |
572 | } |
573 | |
574 | static __always_inline |
575 | void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask) |
576 | { |
577 | /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */ |
578 | BUILD_BUG_ON(leaf < NCAPINTS); |
579 | |
580 | kvm_cpu_caps[leaf] = mask; |
581 | |
582 | __kvm_cpu_cap_mask(leaf); |
583 | } |
584 | |
585 | static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) |
586 | { |
587 | /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */ |
588 | BUILD_BUG_ON(leaf >= NCAPINTS); |
589 | |
590 | kvm_cpu_caps[leaf] &= mask; |
591 | |
592 | __kvm_cpu_cap_mask(leaf); |
593 | } |
594 | |
595 | void kvm_set_cpu_caps(void) |
596 | { |
597 | #ifdef CONFIG_X86_64 |
598 | unsigned int f_gbpages = F(GBPAGES); |
599 | unsigned int f_lm = F(LM); |
600 | unsigned int f_xfd = F(XFD); |
601 | #else |
602 | unsigned int f_gbpages = 0; |
603 | unsigned int f_lm = 0; |
604 | unsigned int f_xfd = 0; |
605 | #endif |
606 | memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); |
607 | |
608 | BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > |
609 | sizeof(boot_cpu_data.x86_capability)); |
610 | |
611 | memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, |
612 | sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps))); |
613 | |
614 | kvm_cpu_cap_mask(leaf: CPUID_1_ECX, |
615 | /* |
616 | * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* |
617 | * advertised to guests via CPUID! |
618 | */ |
619 | F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | |
620 | 0 /* DS-CPL, VMX, SMX, EST */ | |
621 | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | |
622 | F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | |
623 | F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | |
624 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | |
625 | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | |
626 | F(F16C) | F(RDRAND) |
627 | ); |
628 | /* KVM emulates x2apic in software irrespective of host support. */ |
629 | kvm_cpu_cap_set(X86_FEATURE_X2APIC); |
630 | |
631 | kvm_cpu_cap_mask(leaf: CPUID_1_EDX, |
632 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
633 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
634 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | |
635 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
636 | F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | |
637 | 0 /* Reserved, DS, ACPI */ | F(MMX) | |
638 | F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | |
639 | 0 /* HTT, TM, Reserved, PBE */ |
640 | ); |
641 | |
642 | kvm_cpu_cap_mask(leaf: CPUID_7_0_EBX, |
643 | F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | |
644 | F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) | |
645 | F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) | |
646 | F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | |
647 | F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) | |
648 | F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) | |
649 | F(AVX512VL)); |
650 | |
651 | kvm_cpu_cap_mask(leaf: CPUID_7_ECX, |
652 | F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | |
653 | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | |
654 | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | |
655 | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | |
656 | F(SGX_LC) | F(BUS_LOCK_DETECT) |
657 | ); |
658 | /* Set LA57 based on hardware capability. */ |
659 | if (cpuid_ecx(op: 7) & F(LA57)) |
660 | kvm_cpu_cap_set(X86_FEATURE_LA57); |
661 | |
662 | /* |
663 | * PKU not yet implemented for shadow paging and requires OSPKE |
664 | * to be set on the host. Clear it if that is not the case |
665 | */ |
666 | if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) |
667 | kvm_cpu_cap_clear(X86_FEATURE_PKU); |
668 | |
669 | kvm_cpu_cap_mask(leaf: CPUID_7_EDX, |
670 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | |
671 | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | |
672 | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | |
673 | F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | |
674 | F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D) |
675 | ); |
676 | |
677 | /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ |
678 | kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST); |
679 | kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES); |
680 | |
681 | if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) |
682 | kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); |
683 | if (boot_cpu_has(X86_FEATURE_STIBP)) |
684 | kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); |
685 | if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
686 | kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); |
687 | |
688 | kvm_cpu_cap_mask(leaf: CPUID_7_1_EAX, |
689 | F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | |
690 | F(FZRM) | F(FSRS) | F(FSRC) | |
691 | F(AMX_FP16) | F(AVX_IFMA) | F(LAM) |
692 | ); |
693 | |
694 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_7_1_EDX, |
695 | F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) | |
696 | F(AMX_COMPLEX) |
697 | ); |
698 | |
699 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_7_2_EDX, |
700 | F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) | |
701 | F(BHI_CTRL) | F(MCDT_NO) |
702 | ); |
703 | |
704 | kvm_cpu_cap_mask(leaf: CPUID_D_1_EAX, |
705 | F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd |
706 | ); |
707 | |
708 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_12_EAX, |
709 | SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA) |
710 | ); |
711 | |
712 | kvm_cpu_cap_mask(leaf: CPUID_8000_0001_ECX, |
713 | F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | |
714 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | |
715 | F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | |
716 | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | |
717 | F(TOPOEXT) | 0 /* PERFCTR_CORE */ |
718 | ); |
719 | |
720 | kvm_cpu_cap_mask(leaf: CPUID_8000_0001_EDX, |
721 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
722 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
723 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | |
724 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
725 | F(PAT) | F(PSE36) | 0 /* Reserved */ | |
726 | F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | |
727 | F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | |
728 | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) |
729 | ); |
730 | |
731 | if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) |
732 | kvm_cpu_cap_set(X86_FEATURE_GBPAGES); |
733 | |
734 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_8000_0007_EDX, |
735 | SF(CONSTANT_TSC) |
736 | ); |
737 | |
738 | kvm_cpu_cap_mask(leaf: CPUID_8000_0008_EBX, |
739 | F(CLZERO) | F(XSAVEERPTR) | |
740 | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | |
741 | F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | |
742 | F(AMD_PSFD) |
743 | ); |
744 | |
745 | /* |
746 | * AMD has separate bits for each SPEC_CTRL bit. |
747 | * arch/x86/kernel/cpu/bugs.c is kind enough to |
748 | * record that in cpufeatures so use them. |
749 | */ |
750 | if (boot_cpu_has(X86_FEATURE_IBPB)) |
751 | kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); |
752 | if (boot_cpu_has(X86_FEATURE_IBRS)) |
753 | kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); |
754 | if (boot_cpu_has(X86_FEATURE_STIBP)) |
755 | kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); |
756 | if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
757 | kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); |
758 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
759 | kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); |
760 | /* |
761 | * The preference is to use SPEC CTRL MSR instead of the |
762 | * VIRT_SPEC MSR. |
763 | */ |
764 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
765 | !boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
766 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); |
767 | |
768 | /* |
769 | * Hide all SVM features by default, SVM will set the cap bits for |
770 | * features it emulates and/or exposes for L1. |
771 | */ |
772 | kvm_cpu_cap_mask(leaf: CPUID_8000_000A_EDX, mask: 0); |
773 | |
774 | kvm_cpu_cap_mask(leaf: CPUID_8000_001F_EAX, |
775 | mask: 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) | |
776 | F(SME_COHERENT)); |
777 | |
778 | kvm_cpu_cap_mask(leaf: CPUID_8000_0021_EAX, |
779 | F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | |
780 | F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ | |
781 | F(WRMSR_XX_BASE_NS) |
782 | ); |
783 | |
784 | kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB); |
785 | kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE); |
786 | kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO); |
787 | |
788 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_8000_0022_EAX, |
789 | F(PERFMON_V2) |
790 | ); |
791 | |
792 | /* |
793 | * Synthesize "LFENCE is serializing" into the AMD-defined entry in |
794 | * KVM's supported CPUID if the feature is reported as supported by the |
795 | * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long |
796 | * before AMD joined the bandwagon, e.g. LFENCE is serializing on most |
797 | * CPUs that support SSE2. On CPUs that don't support AMD's leaf, |
798 | * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing |
799 | * the mask with the raw host CPUID, and reporting support in AMD's |
800 | * leaf can make it easier for userspace to detect the feature. |
801 | */ |
802 | if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) |
803 | kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC); |
804 | if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) |
805 | kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE); |
806 | kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR); |
807 | |
808 | kvm_cpu_cap_mask(leaf: CPUID_C000_0001_EDX, |
809 | F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | |
810 | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | |
811 | F(PMM) | F(PMM_EN) |
812 | ); |
813 | |
814 | /* |
815 | * Hide RDTSCP and RDPID if either feature is reported as supported but |
816 | * probing MSR_TSC_AUX failed. This is purely a sanity check and |
817 | * should never happen, but the guest will likely crash if RDTSCP or |
818 | * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in |
819 | * the past. For example, the sanity check may fire if this instance of |
820 | * KVM is running as L1 on top of an older, broken KVM. |
821 | */ |
822 | if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) || |
823 | kvm_cpu_cap_has(X86_FEATURE_RDPID)) && |
824 | !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) { |
825 | kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); |
826 | kvm_cpu_cap_clear(X86_FEATURE_RDPID); |
827 | } |
828 | } |
829 | EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); |
830 | |
831 | struct kvm_cpuid_array { |
832 | struct kvm_cpuid_entry2 *entries; |
833 | int maxnent; |
834 | int nent; |
835 | }; |
836 | |
837 | static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) |
838 | { |
839 | if (array->nent >= array->maxnent) |
840 | return NULL; |
841 | |
842 | return &array->entries[array->nent++]; |
843 | } |
844 | |
845 | static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, |
846 | u32 function, u32 index) |
847 | { |
848 | struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); |
849 | |
850 | if (!entry) |
851 | return NULL; |
852 | |
853 | memset(entry, 0, sizeof(*entry)); |
854 | entry->function = function; |
855 | entry->index = index; |
856 | switch (function & 0xC0000000) { |
857 | case 0x40000000: |
858 | /* Hypervisor leaves are always synthesized by __do_cpuid_func. */ |
859 | return entry; |
860 | |
861 | case 0x80000000: |
862 | /* |
863 | * 0x80000021 is sometimes synthesized by __do_cpuid_func, which |
864 | * would result in out-of-bounds calls to do_host_cpuid. |
865 | */ |
866 | { |
867 | static int max_cpuid_80000000; |
868 | if (!READ_ONCE(max_cpuid_80000000)) |
869 | WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); |
870 | if (function > READ_ONCE(max_cpuid_80000000)) |
871 | return entry; |
872 | } |
873 | break; |
874 | |
875 | default: |
876 | break; |
877 | } |
878 | |
879 | cpuid_count(op: entry->function, count: entry->index, |
880 | eax: &entry->eax, ebx: &entry->ebx, ecx: &entry->ecx, edx: &entry->edx); |
881 | |
882 | if (cpuid_function_is_indexed(function)) |
883 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
884 | |
885 | return entry; |
886 | } |
887 | |
888 | static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) |
889 | { |
890 | struct kvm_cpuid_entry2 *entry; |
891 | |
892 | if (array->nent >= array->maxnent) |
893 | return -E2BIG; |
894 | |
895 | entry = &array->entries[array->nent]; |
896 | entry->function = func; |
897 | entry->index = 0; |
898 | entry->flags = 0; |
899 | |
900 | switch (func) { |
901 | case 0: |
902 | entry->eax = 7; |
903 | ++array->nent; |
904 | break; |
905 | case 1: |
906 | entry->ecx = F(MOVBE); |
907 | ++array->nent; |
908 | break; |
909 | case 7: |
910 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
911 | entry->eax = 0; |
912 | if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) |
913 | entry->ecx = F(RDPID); |
914 | ++array->nent; |
915 | break; |
916 | default: |
917 | break; |
918 | } |
919 | |
920 | return 0; |
921 | } |
922 | |
923 | static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) |
924 | { |
925 | struct kvm_cpuid_entry2 *entry; |
926 | int r, i, max_idx; |
927 | |
928 | /* all calls to cpuid_count() should be made on the same cpu */ |
929 | get_cpu(); |
930 | |
931 | r = -E2BIG; |
932 | |
933 | entry = do_host_cpuid(array, function, index: 0); |
934 | if (!entry) |
935 | goto out; |
936 | |
937 | switch (function) { |
938 | case 0: |
939 | /* Limited to the highest leaf implemented in KVM. */ |
940 | entry->eax = min(entry->eax, 0x1fU); |
941 | break; |
942 | case 1: |
943 | cpuid_entry_override(entry, leaf: CPUID_1_EDX); |
944 | cpuid_entry_override(entry, leaf: CPUID_1_ECX); |
945 | break; |
946 | case 2: |
947 | /* |
948 | * On ancient CPUs, function 2 entries are STATEFUL. That is, |
949 | * CPUID(function=2, index=0) may return different results each |
950 | * time, with the least-significant byte in EAX enumerating the |
951 | * number of times software should do CPUID(2, 0). |
952 | * |
953 | * Modern CPUs, i.e. every CPU KVM has *ever* run on are less |
954 | * idiotic. Intel's SDM states that EAX & 0xff "will always |
955 | * return 01H. Software should ignore this value and not |
956 | * interpret it as an informational descriptor", while AMD's |
957 | * APM states that CPUID(2) is reserved. |
958 | * |
959 | * WARN if a frankenstein CPU that supports virtualization and |
960 | * a stateful CPUID.0x2 is encountered. |
961 | */ |
962 | WARN_ON_ONCE((entry->eax & 0xff) > 1); |
963 | break; |
964 | /* functions 4 and 0x8000001d have additional index. */ |
965 | case 4: |
966 | case 0x8000001d: |
967 | /* |
968 | * Read entries until the cache type in the previous entry is |
969 | * zero, i.e. indicates an invalid entry. |
970 | */ |
971 | for (i = 1; entry->eax & 0x1f; ++i) { |
972 | entry = do_host_cpuid(array, function, index: i); |
973 | if (!entry) |
974 | goto out; |
975 | } |
976 | break; |
977 | case 6: /* Thermal management */ |
978 | entry->eax = 0x4; /* allow ARAT */ |
979 | entry->ebx = 0; |
980 | entry->ecx = 0; |
981 | entry->edx = 0; |
982 | break; |
983 | /* function 7 has additional index. */ |
984 | case 7: |
985 | max_idx = entry->eax = min(entry->eax, 2u); |
986 | cpuid_entry_override(entry, leaf: CPUID_7_0_EBX); |
987 | cpuid_entry_override(entry, leaf: CPUID_7_ECX); |
988 | cpuid_entry_override(entry, leaf: CPUID_7_EDX); |
989 | |
990 | /* KVM only supports up to 0x7.2, capped above via min(). */ |
991 | if (max_idx >= 1) { |
992 | entry = do_host_cpuid(array, function, index: 1); |
993 | if (!entry) |
994 | goto out; |
995 | |
996 | cpuid_entry_override(entry, leaf: CPUID_7_1_EAX); |
997 | cpuid_entry_override(entry, leaf: CPUID_7_1_EDX); |
998 | entry->ebx = 0; |
999 | entry->ecx = 0; |
1000 | } |
1001 | if (max_idx >= 2) { |
1002 | entry = do_host_cpuid(array, function, index: 2); |
1003 | if (!entry) |
1004 | goto out; |
1005 | |
1006 | cpuid_entry_override(entry, leaf: CPUID_7_2_EDX); |
1007 | entry->ecx = 0; |
1008 | entry->ebx = 0; |
1009 | entry->eax = 0; |
1010 | } |
1011 | break; |
1012 | case 0xa: { /* Architectural Performance Monitoring */ |
1013 | union cpuid10_eax eax; |
1014 | union cpuid10_edx edx; |
1015 | |
1016 | if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { |
1017 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1018 | break; |
1019 | } |
1020 | |
1021 | eax.split.version_id = kvm_pmu_cap.version; |
1022 | eax.split.num_counters = kvm_pmu_cap.num_counters_gp; |
1023 | eax.split.bit_width = kvm_pmu_cap.bit_width_gp; |
1024 | eax.split.mask_length = kvm_pmu_cap.events_mask_len; |
1025 | edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed; |
1026 | edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed; |
1027 | |
1028 | if (kvm_pmu_cap.version) |
1029 | edx.split.anythread_deprecated = 1; |
1030 | edx.split.reserved1 = 0; |
1031 | edx.split.reserved2 = 0; |
1032 | |
1033 | entry->eax = eax.full; |
1034 | entry->ebx = kvm_pmu_cap.events_mask; |
1035 | entry->ecx = 0; |
1036 | entry->edx = edx.full; |
1037 | break; |
1038 | } |
1039 | case 0x1f: |
1040 | case 0xb: |
1041 | /* |
1042 | * No topology; a valid topology is indicated by the presence |
1043 | * of subleaf 1. |
1044 | */ |
1045 | entry->eax = entry->ebx = entry->ecx = 0; |
1046 | break; |
1047 | case 0xd: { |
1048 | u64 permitted_xcr0 = kvm_get_filtered_xcr0(); |
1049 | u64 permitted_xss = kvm_caps.supported_xss; |
1050 | |
1051 | entry->eax &= permitted_xcr0; |
1052 | entry->ebx = xstate_required_size(xstate_bv: permitted_xcr0, compacted: false); |
1053 | entry->ecx = entry->ebx; |
1054 | entry->edx &= permitted_xcr0 >> 32; |
1055 | if (!permitted_xcr0) |
1056 | break; |
1057 | |
1058 | entry = do_host_cpuid(array, function, index: 1); |
1059 | if (!entry) |
1060 | goto out; |
1061 | |
1062 | cpuid_entry_override(entry, leaf: CPUID_D_1_EAX); |
1063 | if (entry->eax & (F(XSAVES)|F(XSAVEC))) |
1064 | entry->ebx = xstate_required_size(xstate_bv: permitted_xcr0 | permitted_xss, |
1065 | compacted: true); |
1066 | else { |
1067 | WARN_ON_ONCE(permitted_xss != 0); |
1068 | entry->ebx = 0; |
1069 | } |
1070 | entry->ecx &= permitted_xss; |
1071 | entry->edx &= permitted_xss >> 32; |
1072 | |
1073 | for (i = 2; i < 64; ++i) { |
1074 | bool s_state; |
1075 | if (permitted_xcr0 & BIT_ULL(i)) |
1076 | s_state = false; |
1077 | else if (permitted_xss & BIT_ULL(i)) |
1078 | s_state = true; |
1079 | else |
1080 | continue; |
1081 | |
1082 | entry = do_host_cpuid(array, function, index: i); |
1083 | if (!entry) |
1084 | goto out; |
1085 | |
1086 | /* |
1087 | * The supported check above should have filtered out |
1088 | * invalid sub-leafs. Only valid sub-leafs should |
1089 | * reach this point, and they should have a non-zero |
1090 | * save state size. Furthermore, check whether the |
1091 | * processor agrees with permitted_xcr0/permitted_xss |
1092 | * on whether this is an XCR0- or IA32_XSS-managed area. |
1093 | */ |
1094 | if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { |
1095 | --array->nent; |
1096 | continue; |
1097 | } |
1098 | |
1099 | if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) |
1100 | entry->ecx &= ~BIT_ULL(2); |
1101 | entry->edx = 0; |
1102 | } |
1103 | break; |
1104 | } |
1105 | case 0x12: |
1106 | /* Intel SGX */ |
1107 | if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { |
1108 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1109 | break; |
1110 | } |
1111 | |
1112 | /* |
1113 | * Index 0: Sub-features, MISCSELECT (a.k.a extended features) |
1114 | * and max enclave sizes. The SGX sub-features and MISCSELECT |
1115 | * are restricted by kernel and KVM capabilities (like most |
1116 | * feature flags), while enclave size is unrestricted. |
1117 | */ |
1118 | cpuid_entry_override(entry, leaf: CPUID_12_EAX); |
1119 | entry->ebx &= SGX_MISC_EXINFO; |
1120 | |
1121 | entry = do_host_cpuid(array, function, index: 1); |
1122 | if (!entry) |
1123 | goto out; |
1124 | |
1125 | /* |
1126 | * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la |
1127 | * feature flags. Advertise all supported flags, including |
1128 | * privileged attributes that require explicit opt-in from |
1129 | * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is |
1130 | * expected to derive it from supported XCR0. |
1131 | */ |
1132 | entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK; |
1133 | entry->ebx &= 0; |
1134 | break; |
1135 | /* Intel PT */ |
1136 | case 0x14: |
1137 | if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { |
1138 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1139 | break; |
1140 | } |
1141 | |
1142 | for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { |
1143 | if (!do_host_cpuid(array, function, index: i)) |
1144 | goto out; |
1145 | } |
1146 | break; |
1147 | /* Intel AMX TILE */ |
1148 | case 0x1d: |
1149 | if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { |
1150 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1151 | break; |
1152 | } |
1153 | |
1154 | for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { |
1155 | if (!do_host_cpuid(array, function, index: i)) |
1156 | goto out; |
1157 | } |
1158 | break; |
1159 | case 0x1e: /* TMUL information */ |
1160 | if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { |
1161 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1162 | break; |
1163 | } |
1164 | break; |
1165 | case KVM_CPUID_SIGNATURE: { |
1166 | const u32 *sigptr = (const u32 *)KVM_SIGNATURE; |
1167 | entry->eax = KVM_CPUID_FEATURES; |
1168 | entry->ebx = sigptr[0]; |
1169 | entry->ecx = sigptr[1]; |
1170 | entry->edx = sigptr[2]; |
1171 | break; |
1172 | } |
1173 | case KVM_CPUID_FEATURES: |
1174 | entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | |
1175 | (1 << KVM_FEATURE_NOP_IO_DELAY) | |
1176 | (1 << KVM_FEATURE_CLOCKSOURCE2) | |
1177 | (1 << KVM_FEATURE_ASYNC_PF) | |
1178 | (1 << KVM_FEATURE_PV_EOI) | |
1179 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | |
1180 | (1 << KVM_FEATURE_PV_UNHALT) | |
1181 | (1 << KVM_FEATURE_PV_TLB_FLUSH) | |
1182 | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | |
1183 | (1 << KVM_FEATURE_PV_SEND_IPI) | |
1184 | (1 << KVM_FEATURE_POLL_CONTROL) | |
1185 | (1 << KVM_FEATURE_PV_SCHED_YIELD) | |
1186 | (1 << KVM_FEATURE_ASYNC_PF_INT); |
1187 | |
1188 | if (sched_info_on()) |
1189 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); |
1190 | |
1191 | entry->ebx = 0; |
1192 | entry->ecx = 0; |
1193 | entry->edx = 0; |
1194 | break; |
1195 | case 0x80000000: |
1196 | entry->eax = min(entry->eax, 0x80000022); |
1197 | /* |
1198 | * Serializing LFENCE is reported in a multitude of ways, and |
1199 | * NullSegClearsBase is not reported in CPUID on Zen2; help |
1200 | * userspace by providing the CPUID leaf ourselves. |
1201 | * |
1202 | * However, only do it if the host has CPUID leaf 0x8000001d. |
1203 | * QEMU thinks that it can query the host blindly for that |
1204 | * CPUID leaf if KVM reports that it supports 0x8000001d or |
1205 | * above. The processor merrily returns values from the |
1206 | * highest Intel leaf which QEMU tries to use as the guest's |
1207 | * 0x8000001d. Even worse, this can result in an infinite |
1208 | * loop if said highest leaf has no subleaves indexed by ECX. |
1209 | */ |
1210 | if (entry->eax >= 0x8000001d && |
1211 | (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) |
1212 | || !static_cpu_has_bug(X86_BUG_NULL_SEG))) |
1213 | entry->eax = max(entry->eax, 0x80000021); |
1214 | break; |
1215 | case 0x80000001: |
1216 | entry->ebx &= ~GENMASK(27, 16); |
1217 | cpuid_entry_override(entry, leaf: CPUID_8000_0001_EDX); |
1218 | cpuid_entry_override(entry, leaf: CPUID_8000_0001_ECX); |
1219 | break; |
1220 | case 0x80000005: |
1221 | /* Pass host L1 cache and TLB info. */ |
1222 | break; |
1223 | case 0x80000006: |
1224 | /* Drop reserved bits, pass host L2 cache and TLB info. */ |
1225 | entry->edx &= ~GENMASK(17, 16); |
1226 | break; |
1227 | case 0x80000007: /* Advanced power management */ |
1228 | cpuid_entry_override(entry, leaf: CPUID_8000_0007_EDX); |
1229 | |
1230 | /* mask against host */ |
1231 | entry->edx &= boot_cpu_data.x86_power; |
1232 | entry->eax = entry->ebx = entry->ecx = 0; |
1233 | break; |
1234 | case 0x80000008: { |
1235 | unsigned g_phys_as = (entry->eax >> 16) & 0xff; |
1236 | unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); |
1237 | unsigned phys_as = entry->eax & 0xff; |
1238 | |
1239 | /* |
1240 | * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as |
1241 | * the guest operates in the same PA space as the host, i.e. |
1242 | * reductions in MAXPHYADDR for memory encryption affect shadow |
1243 | * paging, too. |
1244 | * |
1245 | * If TDP is enabled but an explicit guest MAXPHYADDR is not |
1246 | * provided, use the raw bare metal MAXPHYADDR as reductions to |
1247 | * the HPAs do not affect GPAs. |
1248 | */ |
1249 | if (!tdp_enabled) |
1250 | g_phys_as = boot_cpu_data.x86_phys_bits; |
1251 | else if (!g_phys_as) |
1252 | g_phys_as = phys_as; |
1253 | |
1254 | entry->eax = g_phys_as | (virt_as << 8); |
1255 | entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); |
1256 | entry->edx = 0; |
1257 | cpuid_entry_override(entry, leaf: CPUID_8000_0008_EBX); |
1258 | break; |
1259 | } |
1260 | case 0x8000000A: |
1261 | if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { |
1262 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1263 | break; |
1264 | } |
1265 | entry->eax = 1; /* SVM revision 1 */ |
1266 | entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper |
1267 | ASID emulation to nested SVM */ |
1268 | entry->ecx = 0; /* Reserved */ |
1269 | cpuid_entry_override(entry, leaf: CPUID_8000_000A_EDX); |
1270 | break; |
1271 | case 0x80000019: |
1272 | entry->ecx = entry->edx = 0; |
1273 | break; |
1274 | case 0x8000001a: |
1275 | entry->eax &= GENMASK(2, 0); |
1276 | entry->ebx = entry->ecx = entry->edx = 0; |
1277 | break; |
1278 | case 0x8000001e: |
1279 | /* Do not return host topology information. */ |
1280 | entry->eax = entry->ebx = entry->ecx = 0; |
1281 | entry->edx = 0; /* reserved */ |
1282 | break; |
1283 | case 0x8000001F: |
1284 | if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { |
1285 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1286 | } else { |
1287 | cpuid_entry_override(entry, leaf: CPUID_8000_001F_EAX); |
1288 | /* Clear NumVMPL since KVM does not support VMPL. */ |
1289 | entry->ebx &= ~GENMASK(31, 12); |
1290 | /* |
1291 | * Enumerate '0' for "PA bits reduction", the adjusted |
1292 | * MAXPHYADDR is enumerated directly (see 0x80000008). |
1293 | */ |
1294 | entry->ebx &= ~GENMASK(11, 6); |
1295 | } |
1296 | break; |
1297 | case 0x80000020: |
1298 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1299 | break; |
1300 | case 0x80000021: |
1301 | entry->ebx = entry->ecx = entry->edx = 0; |
1302 | cpuid_entry_override(entry, leaf: CPUID_8000_0021_EAX); |
1303 | break; |
1304 | /* AMD Extended Performance Monitoring and Debug */ |
1305 | case 0x80000022: { |
1306 | union cpuid_0x80000022_ebx ebx; |
1307 | |
1308 | entry->ecx = entry->edx = 0; |
1309 | if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { |
1310 | entry->eax = entry->ebx; |
1311 | break; |
1312 | } |
1313 | |
1314 | cpuid_entry_override(entry, leaf: CPUID_8000_0022_EAX); |
1315 | |
1316 | if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) |
1317 | ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp; |
1318 | else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE)) |
1319 | ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE; |
1320 | else |
1321 | ebx.split.num_core_pmc = AMD64_NUM_COUNTERS; |
1322 | |
1323 | entry->ebx = ebx.full; |
1324 | break; |
1325 | } |
1326 | /*Add support for Centaur's CPUID instruction*/ |
1327 | case 0xC0000000: |
1328 | /*Just support up to 0xC0000004 now*/ |
1329 | entry->eax = min(entry->eax, 0xC0000004); |
1330 | break; |
1331 | case 0xC0000001: |
1332 | cpuid_entry_override(entry, leaf: CPUID_C000_0001_EDX); |
1333 | break; |
1334 | case 3: /* Processor serial number */ |
1335 | case 5: /* MONITOR/MWAIT */ |
1336 | case 0xC0000002: |
1337 | case 0xC0000003: |
1338 | case 0xC0000004: |
1339 | default: |
1340 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1341 | break; |
1342 | } |
1343 | |
1344 | r = 0; |
1345 | |
1346 | out: |
1347 | put_cpu(); |
1348 | |
1349 | return r; |
1350 | } |
1351 | |
1352 | static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, |
1353 | unsigned int type) |
1354 | { |
1355 | if (type == KVM_GET_EMULATED_CPUID) |
1356 | return __do_cpuid_func_emulated(array, func); |
1357 | |
1358 | return __do_cpuid_func(array, function: func); |
1359 | } |
1360 | |
1361 | #define CENTAUR_CPUID_SIGNATURE 0xC0000000 |
1362 | |
1363 | static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, |
1364 | unsigned int type) |
1365 | { |
1366 | u32 limit; |
1367 | int r; |
1368 | |
1369 | if (func == CENTAUR_CPUID_SIGNATURE && |
1370 | boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) |
1371 | return 0; |
1372 | |
1373 | r = do_cpuid_func(array, func, type); |
1374 | if (r) |
1375 | return r; |
1376 | |
1377 | limit = array->entries[array->nent - 1].eax; |
1378 | for (func = func + 1; func <= limit; ++func) { |
1379 | r = do_cpuid_func(array, func, type); |
1380 | if (r) |
1381 | break; |
1382 | } |
1383 | |
1384 | return r; |
1385 | } |
1386 | |
1387 | static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, |
1388 | __u32 num_entries, unsigned int ioctl_type) |
1389 | { |
1390 | int i; |
1391 | __u32 pad[3]; |
1392 | |
1393 | if (ioctl_type != KVM_GET_EMULATED_CPUID) |
1394 | return false; |
1395 | |
1396 | /* |
1397 | * We want to make sure that ->padding is being passed clean from |
1398 | * userspace in case we want to use it for something in the future. |
1399 | * |
1400 | * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we |
1401 | * have to give ourselves satisfied only with the emulated side. /me |
1402 | * sheds a tear. |
1403 | */ |
1404 | for (i = 0; i < num_entries; i++) { |
1405 | if (copy_from_user(to: pad, from: entries[i].padding, n: sizeof(pad))) |
1406 | return true; |
1407 | |
1408 | if (pad[0] || pad[1] || pad[2]) |
1409 | return true; |
1410 | } |
1411 | return false; |
1412 | } |
1413 | |
1414 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
1415 | struct kvm_cpuid_entry2 __user *entries, |
1416 | unsigned int type) |
1417 | { |
1418 | static const u32 funcs[] = { |
1419 | 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, |
1420 | }; |
1421 | |
1422 | struct kvm_cpuid_array array = { |
1423 | .nent = 0, |
1424 | }; |
1425 | int r, i; |
1426 | |
1427 | if (cpuid->nent < 1) |
1428 | return -E2BIG; |
1429 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
1430 | cpuid->nent = KVM_MAX_CPUID_ENTRIES; |
1431 | |
1432 | if (sanity_check_entries(entries, num_entries: cpuid->nent, ioctl_type: type)) |
1433 | return -EINVAL; |
1434 | |
1435 | array.entries = kvcalloc(n: cpuid->nent, size: sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); |
1436 | if (!array.entries) |
1437 | return -ENOMEM; |
1438 | |
1439 | array.maxnent = cpuid->nent; |
1440 | |
1441 | for (i = 0; i < ARRAY_SIZE(funcs); i++) { |
1442 | r = get_cpuid_func(array: &array, func: funcs[i], type); |
1443 | if (r) |
1444 | goto out_free; |
1445 | } |
1446 | cpuid->nent = array.nent; |
1447 | |
1448 | if (copy_to_user(to: entries, from: array.entries, |
1449 | n: array.nent * sizeof(struct kvm_cpuid_entry2))) |
1450 | r = -EFAULT; |
1451 | |
1452 | out_free: |
1453 | kvfree(addr: array.entries); |
1454 | return r; |
1455 | } |
1456 | |
1457 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, |
1458 | u32 function, u32 index) |
1459 | { |
1460 | return cpuid_entry2_find(entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent, |
1461 | function, index); |
1462 | } |
1463 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); |
1464 | |
1465 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
1466 | u32 function) |
1467 | { |
1468 | return cpuid_entry2_find(entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent, |
1469 | function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
1470 | } |
1471 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); |
1472 | |
1473 | /* |
1474 | * Intel CPUID semantics treats any query for an out-of-range leaf as if the |
1475 | * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics |
1476 | * returns all zeroes for any undefined leaf, whether or not the leaf is in |
1477 | * range. Centaur/VIA follows Intel semantics. |
1478 | * |
1479 | * A leaf is considered out-of-range if its function is higher than the maximum |
1480 | * supported leaf of its associated class or if its associated class does not |
1481 | * exist. |
1482 | * |
1483 | * There are three primary classes to be considered, with their respective |
1484 | * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary |
1485 | * class exists if a guest CPUID entry for its <base> leaf exists. For a given |
1486 | * class, CPUID.<base>.EAX contains the max supported leaf for the class. |
1487 | * |
1488 | * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff |
1489 | * - Hypervisor: 0x40000000 - 0x4fffffff |
1490 | * - Extended: 0x80000000 - 0xbfffffff |
1491 | * - Centaur: 0xc0000000 - 0xcfffffff |
1492 | * |
1493 | * The Hypervisor class is further subdivided into sub-classes that each act as |
1494 | * their own independent class associated with a 0x100 byte range. E.g. if Qemu |
1495 | * is advertising support for both HyperV and KVM, the resulting Hypervisor |
1496 | * CPUID sub-classes are: |
1497 | * |
1498 | * - HyperV: 0x40000000 - 0x400000ff |
1499 | * - KVM: 0x40000100 - 0x400001ff |
1500 | */ |
1501 | static struct kvm_cpuid_entry2 * |
1502 | get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) |
1503 | { |
1504 | struct kvm_cpuid_entry2 *basic, *class; |
1505 | u32 function = *fn_ptr; |
1506 | |
1507 | basic = kvm_find_cpuid_entry(vcpu, 0); |
1508 | if (!basic) |
1509 | return NULL; |
1510 | |
1511 | if (is_guest_vendor_amd(ebx: basic->ebx, ecx: basic->ecx, edx: basic->edx) || |
1512 | is_guest_vendor_hygon(ebx: basic->ebx, ecx: basic->ecx, edx: basic->edx)) |
1513 | return NULL; |
1514 | |
1515 | if (function >= 0x40000000 && function <= 0x4fffffff) |
1516 | class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); |
1517 | else if (function >= 0xc0000000) |
1518 | class = kvm_find_cpuid_entry(vcpu, 0xc0000000); |
1519 | else |
1520 | class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); |
1521 | |
1522 | if (class && function <= class->eax) |
1523 | return NULL; |
1524 | |
1525 | /* |
1526 | * Leaf specific adjustments are also applied when redirecting to the |
1527 | * max basic entry, e.g. if the max basic leaf is 0xb but there is no |
1528 | * entry for CPUID.0xb.index (see below), then the output value for EDX |
1529 | * needs to be pulled from CPUID.0xb.1. |
1530 | */ |
1531 | *fn_ptr = basic->eax; |
1532 | |
1533 | /* |
1534 | * The class does not exist or the requested function is out of range; |
1535 | * the effective CPUID entry is the max basic leaf. Note, the index of |
1536 | * the original requested leaf is observed! |
1537 | */ |
1538 | return kvm_find_cpuid_entry_index(vcpu, basic->eax, index); |
1539 | } |
1540 | |
1541 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
1542 | u32 *ecx, u32 *edx, bool exact_only) |
1543 | { |
1544 | u32 orig_function = *eax, function = *eax, index = *ecx; |
1545 | struct kvm_cpuid_entry2 *entry; |
1546 | bool exact, used_max_basic = false; |
1547 | |
1548 | entry = kvm_find_cpuid_entry_index(vcpu, function, index); |
1549 | exact = !!entry; |
1550 | |
1551 | if (!entry && !exact_only) { |
1552 | entry = get_out_of_range_cpuid_entry(vcpu, fn_ptr: &function, index); |
1553 | used_max_basic = !!entry; |
1554 | } |
1555 | |
1556 | if (entry) { |
1557 | *eax = entry->eax; |
1558 | *ebx = entry->ebx; |
1559 | *ecx = entry->ecx; |
1560 | *edx = entry->edx; |
1561 | if (function == 7 && index == 0) { |
1562 | u64 data; |
1563 | if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, data: &data, host_initiated: true) && |
1564 | (data & TSX_CTRL_CPUID_CLEAR)) |
1565 | *ebx &= ~(F(RTM) | F(HLE)); |
1566 | } else if (function == 0x80000007) { |
1567 | if (kvm_hv_invtsc_suppressed(vcpu)) |
1568 | *edx &= ~SF(CONSTANT_TSC); |
1569 | } |
1570 | } else { |
1571 | *eax = *ebx = *ecx = *edx = 0; |
1572 | /* |
1573 | * When leaf 0BH or 1FH is defined, CL is pass-through |
1574 | * and EDX is always the x2APIC ID, even for undefined |
1575 | * subleaves. Index 1 will exist iff the leaf is |
1576 | * implemented, so we pass through CL iff leaf 1 |
1577 | * exists. EDX can be copied from any existing index. |
1578 | */ |
1579 | if (function == 0xb || function == 0x1f) { |
1580 | entry = kvm_find_cpuid_entry_index(vcpu, function, 1); |
1581 | if (entry) { |
1582 | *ecx = index & 0xff; |
1583 | *edx = entry->edx; |
1584 | } |
1585 | } |
1586 | } |
1587 | trace_kvm_cpuid(function: orig_function, index, rax: *eax, rbx: *ebx, rcx: *ecx, rdx: *edx, found: exact, |
1588 | used_max_basic); |
1589 | return exact; |
1590 | } |
1591 | EXPORT_SYMBOL_GPL(kvm_cpuid); |
1592 | |
1593 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) |
1594 | { |
1595 | u32 eax, ebx, ecx, edx; |
1596 | |
1597 | if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, required_cpl: 0)) |
1598 | return 1; |
1599 | |
1600 | eax = kvm_rax_read(vcpu); |
1601 | ecx = kvm_rcx_read(vcpu); |
1602 | kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); |
1603 | kvm_rax_write(vcpu, val: eax); |
1604 | kvm_rbx_write(vcpu, val: ebx); |
1605 | kvm_rcx_write(vcpu, val: ecx); |
1606 | kvm_rdx_write(vcpu, val: edx); |
1607 | return kvm_skip_emulated_instruction(vcpu); |
1608 | } |
1609 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
1610 | |