1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kvm_host.h>
14 #include "linux/lockdep.h"
15 #include <linux/export.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 #include <linux/sched/stat.h>
19
20 #include <asm/processor.h>
21 #include <asm/user.h>
22 #include <asm/fpu/xstate.h>
23 #include <asm/sgx.h>
24 #include <asm/cpuid.h>
25 #include "cpuid.h"
26 #include "lapic.h"
27 #include "mmu.h"
28 #include "trace.h"
29 #include "pmu.h"
30 #include "xen.h"
31
32 /*
33 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
34 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
35 */
36 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
37 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
38
39 struct cpuid_xstate_sizes {
40 u32 eax;
41 u32 ebx;
42 u32 ecx;
43 };
44
45 static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
46
kvm_init_xstate_sizes(void)47 void __init kvm_init_xstate_sizes(void)
48 {
49 u32 ign;
50 int i;
51
52 for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
53 struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
54
55 cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
56 }
57 }
58
xstate_required_size(u64 xstate_bv,bool compacted)59 u32 xstate_required_size(u64 xstate_bv, bool compacted)
60 {
61 int feature_bit = 0;
62 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
63
64 xstate_bv &= XFEATURE_MASK_EXTEND;
65 while (xstate_bv) {
66 if (xstate_bv & 0x1) {
67 struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
68 u32 offset;
69
70 /* ECX[1]: 64B alignment in compacted form */
71 if (compacted)
72 offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
73 else
74 offset = xs->ebx;
75 ret = max(ret, offset + xs->eax);
76 }
77
78 xstate_bv >>= 1;
79 feature_bit++;
80 }
81
82 return ret;
83 }
84
85 #define F feature_bit
86
87 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
88 #define SF(name) \
89 ({ \
90 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
91 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
92 })
93
94 /*
95 * Magic value used by KVM when querying userspace-provided CPUID entries and
96 * doesn't care about the CPIUD index because the index of the function in
97 * question is not significant. Note, this magic value must have at least one
98 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
99 * to avoid false positives when processing guest CPUID input.
100 */
101 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
102
cpuid_entry2_find(struct kvm_cpuid_entry2 * entries,int nent,u32 function,u64 index)103 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
104 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
105 {
106 struct kvm_cpuid_entry2 *e;
107 int i;
108
109 /*
110 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
111 * with IRQs disabled is disallowed. The CPUID model can legitimately
112 * have over one hundred entries, i.e. the lookup is slow, and IRQs are
113 * typically disabled in KVM only when KVM is in a performance critical
114 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
115 * if this rule is violated, this assertion is purely to flag potential
116 * performance issues. If this fires, consider moving the lookup out
117 * of the hotpath, e.g. by caching information during CPUID updates.
118 */
119 lockdep_assert_irqs_enabled();
120
121 for (i = 0; i < nent; i++) {
122 e = &entries[i];
123
124 if (e->function != function)
125 continue;
126
127 /*
128 * If the index isn't significant, use the first entry with a
129 * matching function. It's userspace's responsibilty to not
130 * provide "duplicate" entries in all cases.
131 */
132 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
133 return e;
134
135
136 /*
137 * Similarly, use the first matching entry if KVM is doing a
138 * lookup (as opposed to emulating CPUID) for a function that's
139 * architecturally defined as not having a significant index.
140 */
141 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
142 /*
143 * Direct lookups from KVM should not diverge from what
144 * KVM defines internally (the architectural behavior).
145 */
146 WARN_ON_ONCE(cpuid_function_is_indexed(function));
147 return e;
148 }
149 }
150
151 return NULL;
152 }
153
kvm_check_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * entries,int nent)154 static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
155 struct kvm_cpuid_entry2 *entries,
156 int nent)
157 {
158 struct kvm_cpuid_entry2 *best;
159 u64 xfeatures;
160
161 /*
162 * The existing code assumes virtual address is 48-bit or 57-bit in the
163 * canonical address checks; exit if it is ever changed.
164 */
165 best = cpuid_entry2_find(entries, nent, 0x80000008,
166 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
167 if (best) {
168 int vaddr_bits = (best->eax & 0xff00) >> 8;
169
170 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
171 return -EINVAL;
172 }
173
174 /*
175 * Exposing dynamic xfeatures to the guest requires additional
176 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
177 */
178 best = cpuid_entry2_find(entries, nent, 0xd, 0);
179 if (!best)
180 return 0;
181
182 xfeatures = best->eax | ((u64)best->edx << 32);
183 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
184 if (!xfeatures)
185 return 0;
186
187 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
188 }
189
190 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
kvm_cpuid_check_equal(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * e2,int nent)191 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
192 int nent)
193 {
194 struct kvm_cpuid_entry2 *orig;
195 int i;
196
197 if (nent != vcpu->arch.cpuid_nent)
198 return -EINVAL;
199
200 for (i = 0; i < nent; i++) {
201 orig = &vcpu->arch.cpuid_entries[i];
202 if (e2[i].function != orig->function ||
203 e2[i].index != orig->index ||
204 e2[i].flags != orig->flags ||
205 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
206 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
207 return -EINVAL;
208 }
209
210 return 0;
211 }
212
kvm_get_hypervisor_cpuid(struct kvm_vcpu * vcpu,const char * sig)213 static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
214 const char *sig)
215 {
216 struct kvm_hypervisor_cpuid cpuid = {};
217 struct kvm_cpuid_entry2 *entry;
218 u32 base;
219
220 for_each_possible_hypervisor_cpuid_base(base) {
221 entry = kvm_find_cpuid_entry(vcpu, base);
222
223 if (entry) {
224 u32 signature[3];
225
226 signature[0] = entry->ebx;
227 signature[1] = entry->ecx;
228 signature[2] = entry->edx;
229
230 if (!memcmp(signature, sig, sizeof(signature))) {
231 cpuid.base = base;
232 cpuid.limit = entry->eax;
233 break;
234 }
235 }
236 }
237
238 return cpuid;
239 }
240
__kvm_find_kvm_cpuid_features(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * entries,int nent)241 static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
242 struct kvm_cpuid_entry2 *entries, int nent)
243 {
244 u32 base = vcpu->arch.kvm_cpuid.base;
245
246 if (!base)
247 return NULL;
248
249 return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
250 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
251 }
252
kvm_find_kvm_cpuid_features(struct kvm_vcpu * vcpu)253 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
254 {
255 return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
256 vcpu->arch.cpuid_nent);
257 }
258
kvm_update_pv_runtime(struct kvm_vcpu * vcpu)259 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
260 {
261 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
262
263 /*
264 * save the feature bitmap to avoid cpuid lookup for every PV
265 * operation
266 */
267 if (best)
268 vcpu->arch.pv_cpuid.features = best->eax;
269 }
270
271 /*
272 * Calculate guest's supported XCR0 taking into account guest CPUID data and
273 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
274 */
cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 * entries,int nent)275 static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
276 {
277 struct kvm_cpuid_entry2 *best;
278
279 best = cpuid_entry2_find(entries, nent, 0xd, 0);
280 if (!best)
281 return 0;
282
283 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
284 }
285
__kvm_update_cpuid_runtime(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * entries,int nent)286 static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
287 int nent)
288 {
289 struct kvm_cpuid_entry2 *best;
290
291 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
292 if (best) {
293 /* Update OSXSAVE bit */
294 if (boot_cpu_has(X86_FEATURE_XSAVE))
295 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
296 kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
297
298 cpuid_entry_change(best, X86_FEATURE_APIC,
299 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
300 }
301
302 best = cpuid_entry2_find(entries, nent, 7, 0);
303 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
304 cpuid_entry_change(best, X86_FEATURE_OSPKE,
305 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
306
307 best = cpuid_entry2_find(entries, nent, 0xD, 0);
308 if (best)
309 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
310
311 best = cpuid_entry2_find(entries, nent, 0xD, 1);
312 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
313 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
314 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
315
316 best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
317 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
318 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
319 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
320
321 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
322 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
323 if (best)
324 cpuid_entry_change(best, X86_FEATURE_MWAIT,
325 vcpu->arch.ia32_misc_enable_msr &
326 MSR_IA32_MISC_ENABLE_MWAIT);
327 }
328 }
329
kvm_update_cpuid_runtime(struct kvm_vcpu * vcpu)330 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
331 {
332 __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
333 }
334 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
335
kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 * entries,int nent)336 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
337 {
338 struct kvm_cpuid_entry2 *entry;
339
340 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
341 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
342 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
343 }
344
kvm_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)345 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
346 {
347 struct kvm_lapic *apic = vcpu->arch.apic;
348 struct kvm_cpuid_entry2 *best;
349 bool allow_gbpages;
350
351 BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES);
352 bitmap_zero(vcpu->arch.governed_features.enabled,
353 KVM_MAX_NR_GOVERNED_FEATURES);
354
355 /*
356 * If TDP is enabled, let the guest use GBPAGES if they're supported in
357 * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
358 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
359 * walk for performance and complexity reasons. Not to mention KVM
360 * _can't_ solve the problem because GVA->GPA walks aren't visible to
361 * KVM once a TDP translation is installed. Mimic hardware behavior so
362 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
363 * If TDP is disabled, honor *only* guest CPUID as KVM has full control
364 * and can install smaller shadow pages if the host lacks 1GiB support.
365 */
366 allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
367 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
368 if (allow_gbpages)
369 kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
370
371 best = kvm_find_cpuid_entry(vcpu, 1);
372 if (best && apic) {
373 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
374 apic->lapic_timer.timer_mode_mask = 3 << 17;
375 else
376 apic->lapic_timer.timer_mode_mask = 1 << 17;
377
378 kvm_apic_set_version(vcpu);
379 }
380
381 vcpu->arch.guest_supported_xcr0 =
382 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
383
384 kvm_update_pv_runtime(vcpu);
385
386 vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
387 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
388 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
389
390 kvm_pmu_refresh(vcpu);
391 vcpu->arch.cr4_guest_rsvd_bits =
392 __cr4_reserved_bits(guest_cpuid_has, vcpu);
393
394 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
395 vcpu->arch.cpuid_nent));
396
397 /* Invoke the vendor callback only after the above state is updated. */
398 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
399
400 /*
401 * Except for the MMU, which needs to do its thing any vendor specific
402 * adjustments to the reserved GPA bits.
403 */
404 kvm_mmu_after_set_cpuid(vcpu);
405 }
406
cpuid_query_maxphyaddr(struct kvm_vcpu * vcpu)407 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
408 {
409 struct kvm_cpuid_entry2 *best;
410
411 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
412 if (!best || best->eax < 0x80000008)
413 goto not_found;
414 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
415 if (best)
416 return best->eax & 0xff;
417 not_found:
418 return 36;
419 }
420
421 /*
422 * This "raw" version returns the reserved GPA bits without any adjustments for
423 * encryption technologies that usurp bits. The raw mask should be used if and
424 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
425 */
kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu * vcpu)426 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
427 {
428 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
429 }
430
kvm_set_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * e2,int nent)431 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
432 int nent)
433 {
434 int r;
435
436 __kvm_update_cpuid_runtime(vcpu, e2, nent);
437
438 /*
439 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
440 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
441 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
442 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
443 * the core vCPU model on the fly. It would've been better to forbid any
444 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
445 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
446 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
447 * whether the supplied CPUID data is equal to what's already set.
448 */
449 if (kvm_vcpu_has_run(vcpu)) {
450 r = kvm_cpuid_check_equal(vcpu, e2, nent);
451 if (r)
452 return r;
453
454 kvfree(e2);
455 return 0;
456 }
457
458 if (kvm_cpuid_has_hyperv(e2, nent)) {
459 r = kvm_hv_vcpu_init(vcpu);
460 if (r)
461 return r;
462 }
463
464 r = kvm_check_cpuid(vcpu, e2, nent);
465 if (r)
466 return r;
467
468 kvfree(vcpu->arch.cpuid_entries);
469 vcpu->arch.cpuid_entries = e2;
470 vcpu->arch.cpuid_nent = nent;
471
472 vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
473 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
474 kvm_vcpu_after_set_cpuid(vcpu);
475
476 return 0;
477 }
478
479 /* when an old userspace process fills a new kernel module */
kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid * cpuid,struct kvm_cpuid_entry __user * entries)480 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
481 struct kvm_cpuid *cpuid,
482 struct kvm_cpuid_entry __user *entries)
483 {
484 int r, i;
485 struct kvm_cpuid_entry *e = NULL;
486 struct kvm_cpuid_entry2 *e2 = NULL;
487
488 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
489 return -E2BIG;
490
491 if (cpuid->nent) {
492 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
493 if (IS_ERR(e))
494 return PTR_ERR(e);
495
496 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
497 if (!e2) {
498 r = -ENOMEM;
499 goto out_free_cpuid;
500 }
501 }
502 for (i = 0; i < cpuid->nent; i++) {
503 e2[i].function = e[i].function;
504 e2[i].eax = e[i].eax;
505 e2[i].ebx = e[i].ebx;
506 e2[i].ecx = e[i].ecx;
507 e2[i].edx = e[i].edx;
508 e2[i].index = 0;
509 e2[i].flags = 0;
510 e2[i].padding[0] = 0;
511 e2[i].padding[1] = 0;
512 e2[i].padding[2] = 0;
513 }
514
515 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
516 if (r)
517 kvfree(e2);
518
519 out_free_cpuid:
520 kvfree(e);
521
522 return r;
523 }
524
kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)525 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
526 struct kvm_cpuid2 *cpuid,
527 struct kvm_cpuid_entry2 __user *entries)
528 {
529 struct kvm_cpuid_entry2 *e2 = NULL;
530 int r;
531
532 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
533 return -E2BIG;
534
535 if (cpuid->nent) {
536 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
537 if (IS_ERR(e2))
538 return PTR_ERR(e2);
539 }
540
541 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
542 if (r)
543 kvfree(e2);
544
545 return r;
546 }
547
kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)548 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
549 struct kvm_cpuid2 *cpuid,
550 struct kvm_cpuid_entry2 __user *entries)
551 {
552 if (cpuid->nent < vcpu->arch.cpuid_nent)
553 return -E2BIG;
554
555 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
556 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
557 return -EFAULT;
558
559 cpuid->nent = vcpu->arch.cpuid_nent;
560 return 0;
561 }
562
563 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
__kvm_cpu_cap_mask(unsigned int leaf)564 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
565 {
566 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
567 struct kvm_cpuid_entry2 entry;
568
569 reverse_cpuid_check(leaf);
570
571 cpuid_count(cpuid.function, cpuid.index,
572 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
573
574 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
575 }
576
577 static __always_inline
kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf,u32 mask)578 void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
579 {
580 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
581 BUILD_BUG_ON(leaf < NCAPINTS);
582
583 kvm_cpu_caps[leaf] = mask;
584
585 __kvm_cpu_cap_mask(leaf);
586 }
587
kvm_cpu_cap_mask(enum cpuid_leafs leaf,u32 mask)588 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
589 {
590 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
591 BUILD_BUG_ON(leaf >= NCAPINTS);
592
593 kvm_cpu_caps[leaf] &= mask;
594
595 __kvm_cpu_cap_mask(leaf);
596 }
597
kvm_set_cpu_caps(void)598 void kvm_set_cpu_caps(void)
599 {
600 #ifdef CONFIG_X86_64
601 unsigned int f_gbpages = F(GBPAGES);
602 unsigned int f_lm = F(LM);
603 unsigned int f_xfd = F(XFD);
604 #else
605 unsigned int f_gbpages = 0;
606 unsigned int f_lm = 0;
607 unsigned int f_xfd = 0;
608 #endif
609 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
610
611 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
612 sizeof(boot_cpu_data.x86_capability));
613
614 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
615 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
616
617 kvm_cpu_cap_mask(CPUID_1_ECX,
618 /*
619 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
620 * advertised to guests via CPUID!
621 */
622 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
623 0 /* DS-CPL, VMX, SMX, EST */ |
624 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
625 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
626 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
627 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
628 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
629 F(F16C) | F(RDRAND)
630 );
631 /* KVM emulates x2apic in software irrespective of host support. */
632 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
633
634 kvm_cpu_cap_mask(CPUID_1_EDX,
635 F(FPU) | F(VME) | F(DE) | F(PSE) |
636 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
637 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
638 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
639 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
640 0 /* Reserved, DS, ACPI */ | F(MMX) |
641 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
642 0 /* HTT, TM, Reserved, PBE */
643 );
644
645 kvm_cpu_cap_mask(CPUID_7_0_EBX,
646 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
647 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
648 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
649 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
650 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
651 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
652 F(AVX512VL));
653
654 kvm_cpu_cap_mask(CPUID_7_ECX,
655 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
656 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
657 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
658 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
659 F(SGX_LC) | F(BUS_LOCK_DETECT)
660 );
661 /* Set LA57 based on hardware capability. */
662 if (cpuid_ecx(7) & F(LA57))
663 kvm_cpu_cap_set(X86_FEATURE_LA57);
664
665 /*
666 * PKU not yet implemented for shadow paging and requires OSPKE
667 * to be set on the host. Clear it if that is not the case
668 */
669 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
670 kvm_cpu_cap_clear(X86_FEATURE_PKU);
671
672 kvm_cpu_cap_mask(CPUID_7_EDX,
673 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
674 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
675 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
676 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
677 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
678 );
679
680 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
681 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
682 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
683
684 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
685 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
686 if (boot_cpu_has(X86_FEATURE_STIBP))
687 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
688 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
689 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
690
691 kvm_cpu_cap_mask(CPUID_7_1_EAX,
692 F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
693 F(FZRM) | F(FSRS) | F(FSRC) |
694 F(AMX_FP16) | F(AVX_IFMA)
695 );
696
697 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
698 F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
699 F(AMX_COMPLEX)
700 );
701
702 kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
703 F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
704 F(BHI_CTRL) | F(MCDT_NO)
705 );
706
707 kvm_cpu_cap_mask(CPUID_D_1_EAX,
708 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
709 );
710
711 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
712 SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
713 );
714
715 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
716 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
717 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
718 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
719 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
720 F(TOPOEXT) | 0 /* PERFCTR_CORE */
721 );
722
723 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
724 F(FPU) | F(VME) | F(DE) | F(PSE) |
725 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
726 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
727 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
728 F(PAT) | F(PSE36) | 0 /* Reserved */ |
729 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
730 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
731 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
732 );
733
734 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
735 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
736
737 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX,
738 SF(CONSTANT_TSC)
739 );
740
741 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
742 F(CLZERO) | F(XSAVEERPTR) |
743 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
744 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
745 F(AMD_PSFD)
746 );
747
748 /*
749 * AMD has separate bits for each SPEC_CTRL bit.
750 * arch/x86/kernel/cpu/bugs.c is kind enough to
751 * record that in cpufeatures so use them.
752 */
753 if (boot_cpu_has(X86_FEATURE_IBPB))
754 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
755 if (boot_cpu_has(X86_FEATURE_IBRS))
756 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
757 if (boot_cpu_has(X86_FEATURE_STIBP))
758 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
759 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
760 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
761 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
762 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
763 /*
764 * The preference is to use SPEC CTRL MSR instead of the
765 * VIRT_SPEC MSR.
766 */
767 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
768 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
769 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
770
771 /*
772 * Hide all SVM features by default, SVM will set the cap bits for
773 * features it emulates and/or exposes for L1.
774 */
775 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
776
777 kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
778 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
779 F(SME_COHERENT));
780
781 kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
782 F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
783 F(VERW_CLEAR) |
784 F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
785 );
786
787 if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
788 kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
789
790 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
791 F(PERFMON_V2)
792 );
793
794 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX,
795 F(TSA_SQ_NO) | F(TSA_L1_NO)
796 );
797
798 /*
799 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
800 * KVM's supported CPUID if the feature is reported as supported by the
801 * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
802 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
803 * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
804 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
805 * the mask with the raw host CPUID, and reporting support in AMD's
806 * leaf can make it easier for userspace to detect the feature.
807 */
808 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
809 kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
810 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
811 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
812 kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
813
814 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
815 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
816 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
817 F(PMM) | F(PMM_EN)
818 );
819
820 /*
821 * Hide RDTSCP and RDPID if either feature is reported as supported but
822 * probing MSR_TSC_AUX failed. This is purely a sanity check and
823 * should never happen, but the guest will likely crash if RDTSCP or
824 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
825 * the past. For example, the sanity check may fire if this instance of
826 * KVM is running as L1 on top of an older, broken KVM.
827 */
828 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
829 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
830 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
831 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
832 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
833 }
834 }
835 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
836
837 struct kvm_cpuid_array {
838 struct kvm_cpuid_entry2 *entries;
839 int maxnent;
840 int nent;
841 };
842
get_next_cpuid(struct kvm_cpuid_array * array)843 static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
844 {
845 if (array->nent >= array->maxnent)
846 return NULL;
847
848 return &array->entries[array->nent++];
849 }
850
do_host_cpuid(struct kvm_cpuid_array * array,u32 function,u32 index)851 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
852 u32 function, u32 index)
853 {
854 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
855
856 if (!entry)
857 return NULL;
858
859 memset(entry, 0, sizeof(*entry));
860 entry->function = function;
861 entry->index = index;
862 switch (function & 0xC0000000) {
863 case 0x40000000:
864 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
865 return entry;
866
867 case 0x80000000:
868 /*
869 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
870 * would result in out-of-bounds calls to do_host_cpuid.
871 */
872 {
873 static int max_cpuid_80000000;
874 if (!READ_ONCE(max_cpuid_80000000))
875 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
876 if (function > READ_ONCE(max_cpuid_80000000))
877 return entry;
878 }
879 break;
880
881 default:
882 break;
883 }
884
885 cpuid_count(entry->function, entry->index,
886 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
887
888 if (cpuid_function_is_indexed(function))
889 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
890
891 return entry;
892 }
893
__do_cpuid_func_emulated(struct kvm_cpuid_array * array,u32 func)894 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
895 {
896 struct kvm_cpuid_entry2 *entry;
897
898 if (array->nent >= array->maxnent)
899 return -E2BIG;
900
901 entry = &array->entries[array->nent];
902 entry->function = func;
903 entry->index = 0;
904 entry->flags = 0;
905
906 switch (func) {
907 case 0:
908 entry->eax = 7;
909 ++array->nent;
910 break;
911 case 1:
912 entry->ecx = F(MOVBE);
913 ++array->nent;
914 break;
915 case 7:
916 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
917 entry->eax = 0;
918 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
919 entry->ecx = F(RDPID);
920 ++array->nent;
921 break;
922 default:
923 break;
924 }
925
926 return 0;
927 }
928
__do_cpuid_func(struct kvm_cpuid_array * array,u32 function)929 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
930 {
931 struct kvm_cpuid_entry2 *entry;
932 int r, i, max_idx;
933
934 /* all calls to cpuid_count() should be made on the same cpu */
935 get_cpu();
936
937 r = -E2BIG;
938
939 entry = do_host_cpuid(array, function, 0);
940 if (!entry)
941 goto out;
942
943 switch (function) {
944 case 0:
945 /* Limited to the highest leaf implemented in KVM. */
946 entry->eax = min(entry->eax, 0x1fU);
947 break;
948 case 1:
949 cpuid_entry_override(entry, CPUID_1_EDX);
950 cpuid_entry_override(entry, CPUID_1_ECX);
951 break;
952 case 2:
953 /*
954 * On ancient CPUs, function 2 entries are STATEFUL. That is,
955 * CPUID(function=2, index=0) may return different results each
956 * time, with the least-significant byte in EAX enumerating the
957 * number of times software should do CPUID(2, 0).
958 *
959 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
960 * idiotic. Intel's SDM states that EAX & 0xff "will always
961 * return 01H. Software should ignore this value and not
962 * interpret it as an informational descriptor", while AMD's
963 * APM states that CPUID(2) is reserved.
964 *
965 * WARN if a frankenstein CPU that supports virtualization and
966 * a stateful CPUID.0x2 is encountered.
967 */
968 WARN_ON_ONCE((entry->eax & 0xff) > 1);
969 break;
970 /* functions 4 and 0x8000001d have additional index. */
971 case 4:
972 case 0x8000001d:
973 /*
974 * Read entries until the cache type in the previous entry is
975 * zero, i.e. indicates an invalid entry.
976 */
977 for (i = 1; entry->eax & 0x1f; ++i) {
978 entry = do_host_cpuid(array, function, i);
979 if (!entry)
980 goto out;
981 }
982 break;
983 case 6: /* Thermal management */
984 entry->eax = 0x4; /* allow ARAT */
985 entry->ebx = 0;
986 entry->ecx = 0;
987 entry->edx = 0;
988 break;
989 /* function 7 has additional index. */
990 case 7:
991 max_idx = entry->eax = min(entry->eax, 2u);
992 cpuid_entry_override(entry, CPUID_7_0_EBX);
993 cpuid_entry_override(entry, CPUID_7_ECX);
994 cpuid_entry_override(entry, CPUID_7_EDX);
995
996 /* KVM only supports up to 0x7.2, capped above via min(). */
997 if (max_idx >= 1) {
998 entry = do_host_cpuid(array, function, 1);
999 if (!entry)
1000 goto out;
1001
1002 cpuid_entry_override(entry, CPUID_7_1_EAX);
1003 cpuid_entry_override(entry, CPUID_7_1_EDX);
1004 entry->ebx = 0;
1005 entry->ecx = 0;
1006 }
1007 if (max_idx >= 2) {
1008 entry = do_host_cpuid(array, function, 2);
1009 if (!entry)
1010 goto out;
1011
1012 cpuid_entry_override(entry, CPUID_7_2_EDX);
1013 entry->ecx = 0;
1014 entry->ebx = 0;
1015 entry->eax = 0;
1016 }
1017 break;
1018 case 0xa: { /* Architectural Performance Monitoring */
1019 union cpuid10_eax eax = { };
1020 union cpuid10_edx edx = { };
1021
1022 if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
1023 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1024 break;
1025 }
1026
1027 eax.split.version_id = kvm_pmu_cap.version;
1028 eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
1029 eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
1030 eax.split.mask_length = kvm_pmu_cap.events_mask_len;
1031 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
1032 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
1033
1034 if (kvm_pmu_cap.version)
1035 edx.split.anythread_deprecated = 1;
1036
1037 entry->eax = eax.full;
1038 entry->ebx = kvm_pmu_cap.events_mask;
1039 entry->ecx = 0;
1040 entry->edx = edx.full;
1041 break;
1042 }
1043 case 0x1f:
1044 case 0xb:
1045 /*
1046 * No topology; a valid topology is indicated by the presence
1047 * of subleaf 1.
1048 */
1049 entry->eax = entry->ebx = entry->ecx = 0;
1050 break;
1051 case 0xd: {
1052 u64 permitted_xcr0 = kvm_get_filtered_xcr0();
1053 u64 permitted_xss = kvm_caps.supported_xss;
1054
1055 entry->eax &= permitted_xcr0;
1056 entry->ebx = xstate_required_size(permitted_xcr0, false);
1057 entry->ecx = entry->ebx;
1058 entry->edx &= permitted_xcr0 >> 32;
1059 if (!permitted_xcr0)
1060 break;
1061
1062 entry = do_host_cpuid(array, function, 1);
1063 if (!entry)
1064 goto out;
1065
1066 cpuid_entry_override(entry, CPUID_D_1_EAX);
1067 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
1068 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1069 true);
1070 else {
1071 WARN_ON_ONCE(permitted_xss != 0);
1072 entry->ebx = 0;
1073 }
1074 entry->ecx &= permitted_xss;
1075 entry->edx &= permitted_xss >> 32;
1076
1077 for (i = 2; i < 64; ++i) {
1078 bool s_state;
1079 if (permitted_xcr0 & BIT_ULL(i))
1080 s_state = false;
1081 else if (permitted_xss & BIT_ULL(i))
1082 s_state = true;
1083 else
1084 continue;
1085
1086 entry = do_host_cpuid(array, function, i);
1087 if (!entry)
1088 goto out;
1089
1090 /*
1091 * The supported check above should have filtered out
1092 * invalid sub-leafs. Only valid sub-leafs should
1093 * reach this point, and they should have a non-zero
1094 * save state size. Furthermore, check whether the
1095 * processor agrees with permitted_xcr0/permitted_xss
1096 * on whether this is an XCR0- or IA32_XSS-managed area.
1097 */
1098 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1099 --array->nent;
1100 continue;
1101 }
1102
1103 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1104 entry->ecx &= ~BIT_ULL(2);
1105 entry->edx = 0;
1106 }
1107 break;
1108 }
1109 case 0x12:
1110 /* Intel SGX */
1111 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1112 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1113 break;
1114 }
1115
1116 /*
1117 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1118 * and max enclave sizes. The SGX sub-features and MISCSELECT
1119 * are restricted by kernel and KVM capabilities (like most
1120 * feature flags), while enclave size is unrestricted.
1121 */
1122 cpuid_entry_override(entry, CPUID_12_EAX);
1123 entry->ebx &= SGX_MISC_EXINFO;
1124
1125 entry = do_host_cpuid(array, function, 1);
1126 if (!entry)
1127 goto out;
1128
1129 /*
1130 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1131 * feature flags. Advertise all supported flags, including
1132 * privileged attributes that require explicit opt-in from
1133 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1134 * expected to derive it from supported XCR0.
1135 */
1136 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1137 entry->ebx &= 0;
1138 break;
1139 /* Intel PT */
1140 case 0x14:
1141 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1142 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1143 break;
1144 }
1145
1146 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1147 if (!do_host_cpuid(array, function, i))
1148 goto out;
1149 }
1150 break;
1151 /* Intel AMX TILE */
1152 case 0x1d:
1153 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1154 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1155 break;
1156 }
1157
1158 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1159 if (!do_host_cpuid(array, function, i))
1160 goto out;
1161 }
1162 break;
1163 case 0x1e: /* TMUL information */
1164 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1165 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1166 break;
1167 }
1168 break;
1169 case KVM_CPUID_SIGNATURE: {
1170 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1171 entry->eax = KVM_CPUID_FEATURES;
1172 entry->ebx = sigptr[0];
1173 entry->ecx = sigptr[1];
1174 entry->edx = sigptr[2];
1175 break;
1176 }
1177 case KVM_CPUID_FEATURES:
1178 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1179 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1180 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1181 (1 << KVM_FEATURE_ASYNC_PF) |
1182 (1 << KVM_FEATURE_PV_EOI) |
1183 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1184 (1 << KVM_FEATURE_PV_UNHALT) |
1185 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1186 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1187 (1 << KVM_FEATURE_PV_SEND_IPI) |
1188 (1 << KVM_FEATURE_POLL_CONTROL) |
1189 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1190 (1 << KVM_FEATURE_ASYNC_PF_INT);
1191
1192 if (sched_info_on())
1193 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1194
1195 entry->ebx = 0;
1196 entry->ecx = 0;
1197 entry->edx = 0;
1198 break;
1199 case 0x80000000:
1200 entry->eax = min(entry->eax, 0x80000022);
1201 /*
1202 * Serializing LFENCE is reported in a multitude of ways, and
1203 * NullSegClearsBase is not reported in CPUID on Zen2; help
1204 * userspace by providing the CPUID leaf ourselves.
1205 *
1206 * However, only do it if the host has CPUID leaf 0x8000001d.
1207 * QEMU thinks that it can query the host blindly for that
1208 * CPUID leaf if KVM reports that it supports 0x8000001d or
1209 * above. The processor merrily returns values from the
1210 * highest Intel leaf which QEMU tries to use as the guest's
1211 * 0x8000001d. Even worse, this can result in an infinite
1212 * loop if said highest leaf has no subleaves indexed by ECX.
1213 */
1214 if (entry->eax >= 0x8000001d &&
1215 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1216 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1217 entry->eax = max(entry->eax, 0x80000021);
1218 break;
1219 case 0x80000001:
1220 entry->ebx &= ~GENMASK(27, 16);
1221 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1222 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1223 break;
1224 case 0x80000005:
1225 /* Pass host L1 cache and TLB info. */
1226 break;
1227 case 0x80000006:
1228 /* Drop reserved bits, pass host L2 cache and TLB info. */
1229 entry->edx &= ~GENMASK(17, 16);
1230 break;
1231 case 0x80000007: /* Advanced power management */
1232 cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1233
1234 /* mask against host */
1235 entry->edx &= boot_cpu_data.x86_power;
1236 entry->eax = entry->ebx = entry->ecx = 0;
1237 break;
1238 case 0x80000008: {
1239 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
1240 unsigned int phys_as;
1241
1242 /*
1243 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1244 * the guest operates in the same PA space as the host, i.e.
1245 * reductions in MAXPHYADDR for memory encryption affect shadow
1246 * paging, too.
1247 *
1248 * If TDP is enabled, use the raw bare metal MAXPHYADDR as
1249 * reductions to the HPAs do not affect GPAs.
1250 */
1251 if (!tdp_enabled) {
1252 phys_as = boot_cpu_data.x86_phys_bits;
1253 } else {
1254 phys_as = entry->eax & 0xff;
1255 }
1256
1257 entry->eax = phys_as | (virt_as << 8);
1258 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1259 entry->edx = 0;
1260 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1261 break;
1262 }
1263 case 0x8000000A:
1264 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1265 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1266 break;
1267 }
1268 entry->eax = 1; /* SVM revision 1 */
1269 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1270 ASID emulation to nested SVM */
1271 entry->ecx = 0; /* Reserved */
1272 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1273 break;
1274 case 0x80000019:
1275 entry->ecx = entry->edx = 0;
1276 break;
1277 case 0x8000001a:
1278 entry->eax &= GENMASK(2, 0);
1279 entry->ebx = entry->ecx = entry->edx = 0;
1280 break;
1281 case 0x8000001e:
1282 /* Do not return host topology information. */
1283 entry->eax = entry->ebx = entry->ecx = 0;
1284 entry->edx = 0; /* reserved */
1285 break;
1286 case 0x8000001F:
1287 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1288 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1289 } else {
1290 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1291 /* Clear NumVMPL since KVM does not support VMPL. */
1292 entry->ebx &= ~GENMASK(31, 12);
1293 /*
1294 * Enumerate '0' for "PA bits reduction", the adjusted
1295 * MAXPHYADDR is enumerated directly (see 0x80000008).
1296 */
1297 entry->ebx &= ~GENMASK(11, 6);
1298 }
1299 break;
1300 case 0x80000020:
1301 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1302 break;
1303 case 0x80000021:
1304 entry->ebx = entry->edx = 0;
1305 cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1306 cpuid_entry_override(entry, CPUID_8000_0021_ECX);
1307 break;
1308 /* AMD Extended Performance Monitoring and Debug */
1309 case 0x80000022: {
1310 union cpuid_0x80000022_ebx ebx = { };
1311
1312 entry->ecx = entry->edx = 0;
1313 if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
1314 entry->eax = entry->ebx = 0;
1315 break;
1316 }
1317
1318 cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1319
1320 if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
1321 ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
1322 else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
1323 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
1324 else
1325 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
1326
1327 entry->ebx = ebx.full;
1328 break;
1329 }
1330 /*Add support for Centaur's CPUID instruction*/
1331 case 0xC0000000:
1332 /*Just support up to 0xC0000004 now*/
1333 entry->eax = min(entry->eax, 0xC0000004);
1334 break;
1335 case 0xC0000001:
1336 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1337 break;
1338 case 3: /* Processor serial number */
1339 case 5: /* MONITOR/MWAIT */
1340 case 0xC0000002:
1341 case 0xC0000003:
1342 case 0xC0000004:
1343 default:
1344 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1345 break;
1346 }
1347
1348 r = 0;
1349
1350 out:
1351 put_cpu();
1352
1353 return r;
1354 }
1355
do_cpuid_func(struct kvm_cpuid_array * array,u32 func,unsigned int type)1356 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1357 unsigned int type)
1358 {
1359 if (type == KVM_GET_EMULATED_CPUID)
1360 return __do_cpuid_func_emulated(array, func);
1361
1362 return __do_cpuid_func(array, func);
1363 }
1364
1365 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1366
get_cpuid_func(struct kvm_cpuid_array * array,u32 func,unsigned int type)1367 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1368 unsigned int type)
1369 {
1370 u32 limit;
1371 int r;
1372
1373 if (func == CENTAUR_CPUID_SIGNATURE &&
1374 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1375 return 0;
1376
1377 r = do_cpuid_func(array, func, type);
1378 if (r)
1379 return r;
1380
1381 limit = array->entries[array->nent - 1].eax;
1382 for (func = func + 1; func <= limit; ++func) {
1383 r = do_cpuid_func(array, func, type);
1384 if (r)
1385 break;
1386 }
1387
1388 return r;
1389 }
1390
sanity_check_entries(struct kvm_cpuid_entry2 __user * entries,__u32 num_entries,unsigned int ioctl_type)1391 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1392 __u32 num_entries, unsigned int ioctl_type)
1393 {
1394 int i;
1395 __u32 pad[3];
1396
1397 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1398 return false;
1399
1400 /*
1401 * We want to make sure that ->padding is being passed clean from
1402 * userspace in case we want to use it for something in the future.
1403 *
1404 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1405 * have to give ourselves satisfied only with the emulated side. /me
1406 * sheds a tear.
1407 */
1408 for (i = 0; i < num_entries; i++) {
1409 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1410 return true;
1411
1412 if (pad[0] || pad[1] || pad[2])
1413 return true;
1414 }
1415 return false;
1416 }
1417
kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries,unsigned int type)1418 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1419 struct kvm_cpuid_entry2 __user *entries,
1420 unsigned int type)
1421 {
1422 static const u32 funcs[] = {
1423 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1424 };
1425
1426 struct kvm_cpuid_array array = {
1427 .nent = 0,
1428 };
1429 int r, i;
1430
1431 if (cpuid->nent < 1)
1432 return -E2BIG;
1433 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1434 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1435
1436 if (sanity_check_entries(entries, cpuid->nent, type))
1437 return -EINVAL;
1438
1439 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1440 if (!array.entries)
1441 return -ENOMEM;
1442
1443 array.maxnent = cpuid->nent;
1444
1445 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1446 r = get_cpuid_func(&array, funcs[i], type);
1447 if (r)
1448 goto out_free;
1449 }
1450 cpuid->nent = array.nent;
1451
1452 if (copy_to_user(entries, array.entries,
1453 array.nent * sizeof(struct kvm_cpuid_entry2)))
1454 r = -EFAULT;
1455
1456 out_free:
1457 kvfree(array.entries);
1458 return r;
1459 }
1460
kvm_find_cpuid_entry_index(struct kvm_vcpu * vcpu,u32 function,u32 index)1461 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
1462 u32 function, u32 index)
1463 {
1464 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1465 function, index);
1466 }
1467 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
1468
kvm_find_cpuid_entry(struct kvm_vcpu * vcpu,u32 function)1469 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1470 u32 function)
1471 {
1472 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1473 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
1474 }
1475 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1476
1477 /*
1478 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1479 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1480 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1481 * range. Centaur/VIA follows Intel semantics.
1482 *
1483 * A leaf is considered out-of-range if its function is higher than the maximum
1484 * supported leaf of its associated class or if its associated class does not
1485 * exist.
1486 *
1487 * There are three primary classes to be considered, with their respective
1488 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1489 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1490 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1491 *
1492 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1493 * - Hypervisor: 0x40000000 - 0x4fffffff
1494 * - Extended: 0x80000000 - 0xbfffffff
1495 * - Centaur: 0xc0000000 - 0xcfffffff
1496 *
1497 * The Hypervisor class is further subdivided into sub-classes that each act as
1498 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1499 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1500 * CPUID sub-classes are:
1501 *
1502 * - HyperV: 0x40000000 - 0x400000ff
1503 * - KVM: 0x40000100 - 0x400001ff
1504 */
1505 static struct kvm_cpuid_entry2 *
get_out_of_range_cpuid_entry(struct kvm_vcpu * vcpu,u32 * fn_ptr,u32 index)1506 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1507 {
1508 struct kvm_cpuid_entry2 *basic, *class;
1509 u32 function = *fn_ptr;
1510
1511 basic = kvm_find_cpuid_entry(vcpu, 0);
1512 if (!basic)
1513 return NULL;
1514
1515 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1516 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1517 return NULL;
1518
1519 if (function >= 0x40000000 && function <= 0x4fffffff)
1520 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1521 else if (function >= 0xc0000000)
1522 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1523 else
1524 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1525
1526 if (class && function <= class->eax)
1527 return NULL;
1528
1529 /*
1530 * Leaf specific adjustments are also applied when redirecting to the
1531 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1532 * entry for CPUID.0xb.index (see below), then the output value for EDX
1533 * needs to be pulled from CPUID.0xb.1.
1534 */
1535 *fn_ptr = basic->eax;
1536
1537 /*
1538 * The class does not exist or the requested function is out of range;
1539 * the effective CPUID entry is the max basic leaf. Note, the index of
1540 * the original requested leaf is observed!
1541 */
1542 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1543 }
1544
kvm_cpuid(struct kvm_vcpu * vcpu,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx,bool exact_only)1545 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1546 u32 *ecx, u32 *edx, bool exact_only)
1547 {
1548 u32 orig_function = *eax, function = *eax, index = *ecx;
1549 struct kvm_cpuid_entry2 *entry;
1550 bool exact, used_max_basic = false;
1551
1552 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1553 exact = !!entry;
1554
1555 if (!entry && !exact_only) {
1556 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1557 used_max_basic = !!entry;
1558 }
1559
1560 if (entry) {
1561 *eax = entry->eax;
1562 *ebx = entry->ebx;
1563 *ecx = entry->ecx;
1564 *edx = entry->edx;
1565 if (function == 7 && index == 0) {
1566 u64 data;
1567 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1568 (data & TSX_CTRL_CPUID_CLEAR))
1569 *ebx &= ~(F(RTM) | F(HLE));
1570 } else if (function == 0x80000007) {
1571 if (kvm_hv_invtsc_suppressed(vcpu))
1572 *edx &= ~SF(CONSTANT_TSC);
1573 }
1574 } else {
1575 *eax = *ebx = *ecx = *edx = 0;
1576 /*
1577 * When leaf 0BH or 1FH is defined, CL is pass-through
1578 * and EDX is always the x2APIC ID, even for undefined
1579 * subleaves. Index 1 will exist iff the leaf is
1580 * implemented, so we pass through CL iff leaf 1
1581 * exists. EDX can be copied from any existing index.
1582 */
1583 if (function == 0xb || function == 0x1f) {
1584 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1585 if (entry) {
1586 *ecx = index & 0xff;
1587 *edx = entry->edx;
1588 }
1589 }
1590 }
1591 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1592 used_max_basic);
1593 return exact;
1594 }
1595 EXPORT_SYMBOL_GPL(kvm_cpuid);
1596
kvm_emulate_cpuid(struct kvm_vcpu * vcpu)1597 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1598 {
1599 u32 eax, ebx, ecx, edx;
1600
1601 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1602 return 1;
1603
1604 eax = kvm_rax_read(vcpu);
1605 ecx = kvm_rcx_read(vcpu);
1606 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1607 kvm_rax_write(vcpu, eax);
1608 kvm_rbx_write(vcpu, ebx);
1609 kvm_rcx_write(vcpu, ecx);
1610 kvm_rdx_write(vcpu, edx);
1611 return kvm_skip_emulated_instruction(vcpu);
1612 }
1613 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1614