1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
10 */
11
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
17
18 #include <asm/processor.h>
19 #include <asm/user.h>
20 #include <asm/fpu/xstate.h>
21 #include "cpuid.h"
22 #include "lapic.h"
23 #include "mmu.h"
24 #include "trace.h"
25 #include "pmu.h"
26
xstate_required_size(u64 xstate_bv,bool compacted)27 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
28 {
29 int feature_bit = 0;
30 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
31
32 xstate_bv &= XFEATURE_MASK_EXTEND;
33 while (xstate_bv) {
34 if (xstate_bv & 0x1) {
35 u32 eax, ebx, ecx, edx, offset;
36 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
37 offset = compacted ? ret : ebx;
38 ret = max(ret, offset + eax);
39 }
40
41 xstate_bv >>= 1;
42 feature_bit++;
43 }
44
45 return ret;
46 }
47
kvm_mpx_supported(void)48 bool kvm_mpx_supported(void)
49 {
50 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
51 && kvm_x86_ops->mpx_supported());
52 }
53 EXPORT_SYMBOL_GPL(kvm_mpx_supported);
54
kvm_supported_xcr0(void)55 u64 kvm_supported_xcr0(void)
56 {
57 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
58
59 if (!kvm_mpx_supported())
60 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
61
62 return xcr0;
63 }
64
65 #define F(x) bit(X86_FEATURE_##x)
66
kvm_update_cpuid(struct kvm_vcpu * vcpu)67 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
68 {
69 struct kvm_cpuid_entry2 *best;
70 struct kvm_lapic *apic = vcpu->arch.apic;
71
72 best = kvm_find_cpuid_entry(vcpu, 1, 0);
73 if (!best)
74 return 0;
75
76 /* Update OSXSAVE bit */
77 if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
78 best->ecx &= ~F(OSXSAVE);
79 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
80 best->ecx |= F(OSXSAVE);
81 }
82
83 best->edx &= ~F(APIC);
84 if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
85 best->edx |= F(APIC);
86
87 if (apic) {
88 if (best->ecx & F(TSC_DEADLINE_TIMER))
89 apic->lapic_timer.timer_mode_mask = 3 << 17;
90 else
91 apic->lapic_timer.timer_mode_mask = 1 << 17;
92 }
93
94 best = kvm_find_cpuid_entry(vcpu, 7, 0);
95 if (best) {
96 /* Update OSPKE bit */
97 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
98 best->ecx &= ~F(OSPKE);
99 if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
100 best->ecx |= F(OSPKE);
101 }
102 }
103
104 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
105 if (!best) {
106 vcpu->arch.guest_supported_xcr0 = 0;
107 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
108 } else {
109 vcpu->arch.guest_supported_xcr0 =
110 (best->eax | ((u64)best->edx << 32)) &
111 kvm_supported_xcr0();
112 vcpu->arch.guest_xstate_size = best->ebx =
113 xstate_required_size(vcpu->arch.xcr0, false);
114 }
115
116 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
117 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
118 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
119
120 /*
121 * The existing code assumes virtual address is 48-bit or 57-bit in the
122 * canonical address checks; exit if it is ever changed.
123 */
124 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
125 if (best) {
126 int vaddr_bits = (best->eax & 0xff00) >> 8;
127
128 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
129 return -EINVAL;
130 }
131
132 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
133 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
134 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
135 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
136
137 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
138 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
139 if (best) {
140 if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT)
141 best->ecx |= F(MWAIT);
142 else
143 best->ecx &= ~F(MWAIT);
144 }
145 }
146
147 /* Update physical-address width */
148 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
149 kvm_mmu_reset_context(vcpu);
150
151 kvm_pmu_refresh(vcpu);
152 return 0;
153 }
154
is_efer_nx(void)155 static int is_efer_nx(void)
156 {
157 unsigned long long efer = 0;
158
159 rdmsrl_safe(MSR_EFER, &efer);
160 return efer & EFER_NX;
161 }
162
cpuid_fix_nx_cap(struct kvm_vcpu * vcpu)163 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
164 {
165 int i;
166 struct kvm_cpuid_entry2 *e, *entry;
167
168 entry = NULL;
169 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
170 e = &vcpu->arch.cpuid_entries[i];
171 if (e->function == 0x80000001) {
172 entry = e;
173 break;
174 }
175 }
176 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
177 entry->edx &= ~F(NX);
178 printk(KERN_INFO "kvm: guest NX capability removed\n");
179 }
180 }
181
cpuid_query_maxphyaddr(struct kvm_vcpu * vcpu)182 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
183 {
184 struct kvm_cpuid_entry2 *best;
185
186 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
187 if (!best || best->eax < 0x80000008)
188 goto not_found;
189 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
190 if (best)
191 return best->eax & 0xff;
192 not_found:
193 return 36;
194 }
195 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
196
197 /* when an old userspace process fills a new kernel module */
kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid * cpuid,struct kvm_cpuid_entry __user * entries)198 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
199 struct kvm_cpuid *cpuid,
200 struct kvm_cpuid_entry __user *entries)
201 {
202 int r, i;
203 struct kvm_cpuid_entry *cpuid_entries = NULL;
204
205 r = -E2BIG;
206 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
207 goto out;
208 r = -ENOMEM;
209 if (cpuid->nent) {
210 cpuid_entries =
211 vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
212 cpuid->nent));
213 if (!cpuid_entries)
214 goto out;
215 r = -EFAULT;
216 if (copy_from_user(cpuid_entries, entries,
217 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
218 goto out;
219 }
220 for (i = 0; i < cpuid->nent; i++) {
221 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
222 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
223 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
224 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
225 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
226 vcpu->arch.cpuid_entries[i].index = 0;
227 vcpu->arch.cpuid_entries[i].flags = 0;
228 vcpu->arch.cpuid_entries[i].padding[0] = 0;
229 vcpu->arch.cpuid_entries[i].padding[1] = 0;
230 vcpu->arch.cpuid_entries[i].padding[2] = 0;
231 }
232 vcpu->arch.cpuid_nent = cpuid->nent;
233 cpuid_fix_nx_cap(vcpu);
234 kvm_apic_set_version(vcpu);
235 kvm_x86_ops->cpuid_update(vcpu);
236 r = kvm_update_cpuid(vcpu);
237
238 out:
239 vfree(cpuid_entries);
240 return r;
241 }
242
kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)243 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
244 struct kvm_cpuid2 *cpuid,
245 struct kvm_cpuid_entry2 __user *entries)
246 {
247 int r;
248
249 r = -E2BIG;
250 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
251 goto out;
252 r = -EFAULT;
253 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
254 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
255 goto out;
256 vcpu->arch.cpuid_nent = cpuid->nent;
257 kvm_apic_set_version(vcpu);
258 kvm_x86_ops->cpuid_update(vcpu);
259 r = kvm_update_cpuid(vcpu);
260 out:
261 return r;
262 }
263
kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)264 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
265 struct kvm_cpuid2 *cpuid,
266 struct kvm_cpuid_entry2 __user *entries)
267 {
268 int r;
269
270 r = -E2BIG;
271 if (cpuid->nent < vcpu->arch.cpuid_nent)
272 goto out;
273 r = -EFAULT;
274 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
275 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
276 goto out;
277 return 0;
278
279 out:
280 cpuid->nent = vcpu->arch.cpuid_nent;
281 return r;
282 }
283
cpuid_mask(u32 * word,int wordnum)284 static void cpuid_mask(u32 *word, int wordnum)
285 {
286 *word &= boot_cpu_data.x86_capability[wordnum];
287 }
288
do_host_cpuid(struct kvm_cpuid_entry2 * entry,u32 function,u32 index)289 static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
290 u32 index)
291 {
292 entry->function = function;
293 entry->index = index;
294 entry->flags = 0;
295
296 cpuid_count(entry->function, entry->index,
297 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
298
299 switch (function) {
300 case 2:
301 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
302 break;
303 case 4:
304 case 7:
305 case 0xb:
306 case 0xd:
307 case 0xf:
308 case 0x10:
309 case 0x12:
310 case 0x14:
311 case 0x17:
312 case 0x18:
313 case 0x1f:
314 case 0x8000001d:
315 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
316 break;
317 }
318 }
319
__do_cpuid_func_emulated(struct kvm_cpuid_entry2 * entry,u32 func,int * nent,int maxnent)320 static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
321 u32 func, int *nent, int maxnent)
322 {
323 entry->function = func;
324 entry->index = 0;
325 entry->flags = 0;
326
327 switch (func) {
328 case 0:
329 entry->eax = 7;
330 ++*nent;
331 break;
332 case 1:
333 entry->ecx = F(MOVBE);
334 ++*nent;
335 break;
336 case 7:
337 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
338 entry->eax = 0;
339 entry->ecx = F(RDPID);
340 ++*nent;
341 default:
342 break;
343 }
344
345 return 0;
346 }
347
do_cpuid_7_mask(struct kvm_cpuid_entry2 * entry,int index)348 static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
349 {
350 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
351 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
352 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
353 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
354 unsigned f_la57;
355 unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0;
356
357 /* cpuid 7.0.ebx */
358 const u32 kvm_cpuid_7_0_ebx_x86_features =
359 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
360 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
361 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
362 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
363 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
364
365 /* cpuid 7.0.ecx*/
366 const u32 kvm_cpuid_7_0_ecx_x86_features =
367 F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
368 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
369 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
370 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
371
372 /* cpuid 7.0.edx*/
373 const u32 kvm_cpuid_7_0_edx_x86_features =
374 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
375 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
376 F(MD_CLEAR);
377
378 /* cpuid 7.1.eax */
379 const u32 kvm_cpuid_7_1_eax_x86_features =
380 F(AVX512_BF16);
381
382 switch (index) {
383 case 0:
384 entry->eax = min(entry->eax, 1u);
385 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
386 cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
387 /* TSC_ADJUST is emulated */
388 entry->ebx |= F(TSC_ADJUST);
389
390 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
391 f_la57 = entry->ecx & F(LA57);
392 cpuid_mask(&entry->ecx, CPUID_7_ECX);
393 /* Set LA57 based on hardware capability. */
394 entry->ecx |= f_la57;
395 entry->ecx |= f_umip;
396 entry->ecx |= f_pku;
397 /* PKU is not yet implemented for shadow paging. */
398 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
399 entry->ecx &= ~F(PKU);
400
401 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
402 cpuid_mask(&entry->edx, CPUID_7_EDX);
403 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
404 entry->edx |= F(SPEC_CTRL);
405 if (boot_cpu_has(X86_FEATURE_STIBP))
406 entry->edx |= F(INTEL_STIBP);
407 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
408 boot_cpu_has(X86_FEATURE_AMD_SSBD))
409 entry->edx |= F(SPEC_CTRL_SSBD);
410 /*
411 * We emulate ARCH_CAPABILITIES in software even
412 * if the host doesn't support it.
413 */
414 entry->edx |= F(ARCH_CAPABILITIES);
415 break;
416 case 1:
417 entry->eax &= kvm_cpuid_7_1_eax_x86_features;
418 entry->ebx = 0;
419 entry->ecx = 0;
420 entry->edx = 0;
421 break;
422 default:
423 WARN_ON_ONCE(1);
424 entry->eax = 0;
425 entry->ebx = 0;
426 entry->ecx = 0;
427 entry->edx = 0;
428 break;
429 }
430 }
431
__do_cpuid_func(struct kvm_cpuid_entry2 * entry,u32 function,int * nent,int maxnent)432 static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
433 int *nent, int maxnent)
434 {
435 int r;
436 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
437 #ifdef CONFIG_X86_64
438 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
439 ? F(GBPAGES) : 0;
440 unsigned f_lm = F(LM);
441 #else
442 unsigned f_gbpages = 0;
443 unsigned f_lm = 0;
444 #endif
445 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
446 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
447 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
448
449 /* cpuid 1.edx */
450 const u32 kvm_cpuid_1_edx_x86_features =
451 F(FPU) | F(VME) | F(DE) | F(PSE) |
452 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
453 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
454 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
455 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
456 0 /* Reserved, DS, ACPI */ | F(MMX) |
457 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
458 0 /* HTT, TM, Reserved, PBE */;
459 /* cpuid 0x80000001.edx */
460 const u32 kvm_cpuid_8000_0001_edx_x86_features =
461 F(FPU) | F(VME) | F(DE) | F(PSE) |
462 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
463 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
464 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
465 F(PAT) | F(PSE36) | 0 /* Reserved */ |
466 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
467 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
468 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
469 /* cpuid 1.ecx */
470 const u32 kvm_cpuid_1_ecx_x86_features =
471 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
472 * but *not* advertised to guests via CPUID ! */
473 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
474 0 /* DS-CPL, VMX, SMX, EST */ |
475 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
476 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
477 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
478 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
479 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
480 F(F16C) | F(RDRAND);
481 /* cpuid 0x80000001.ecx */
482 const u32 kvm_cpuid_8000_0001_ecx_x86_features =
483 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
484 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
485 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
486 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
487 F(TOPOEXT) | F(PERFCTR_CORE);
488
489 /* cpuid 0x80000008.ebx */
490 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
491 F(CLZERO) | F(XSAVEERPTR) |
492 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
493 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
494
495 /* cpuid 0xC0000001.edx */
496 const u32 kvm_cpuid_C000_0001_edx_x86_features =
497 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
498 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
499 F(PMM) | F(PMM_EN);
500
501 /* cpuid 0xD.1.eax */
502 const u32 kvm_cpuid_D_1_eax_x86_features =
503 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
504
505 /* all calls to cpuid_count() should be made on the same cpu */
506 get_cpu();
507
508 r = -E2BIG;
509
510 if (WARN_ON(*nent >= maxnent))
511 goto out;
512
513 do_host_cpuid(entry, function, 0);
514 ++*nent;
515
516 switch (function) {
517 case 0:
518 /* Limited to the highest leaf implemented in KVM. */
519 entry->eax = min(entry->eax, 0x1fU);
520 break;
521 case 1:
522 entry->edx &= kvm_cpuid_1_edx_x86_features;
523 cpuid_mask(&entry->edx, CPUID_1_EDX);
524 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
525 cpuid_mask(&entry->ecx, CPUID_1_ECX);
526 /* we support x2apic emulation even if host does not support
527 * it since we emulate x2apic in software */
528 entry->ecx |= F(X2APIC);
529 break;
530 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
531 * may return different values. This forces us to get_cpu() before
532 * issuing the first command, and also to emulate this annoying behavior
533 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
534 case 2: {
535 int t, times = entry->eax & 0xff;
536
537 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
538 for (t = 1; t < times; ++t) {
539 if (*nent >= maxnent)
540 goto out;
541
542 do_host_cpuid(&entry[t], function, 0);
543 ++*nent;
544 }
545 break;
546 }
547 /* functions 4 and 0x8000001d have additional index. */
548 case 4:
549 case 0x8000001d: {
550 int i, cache_type;
551
552 /* read more entries until cache_type is zero */
553 for (i = 1; ; ++i) {
554 if (*nent >= maxnent)
555 goto out;
556
557 cache_type = entry[i - 1].eax & 0x1f;
558 if (!cache_type)
559 break;
560 do_host_cpuid(&entry[i], function, i);
561 ++*nent;
562 }
563 break;
564 }
565 case 6: /* Thermal management */
566 entry->eax = 0x4; /* allow ARAT */
567 entry->ebx = 0;
568 entry->ecx = 0;
569 entry->edx = 0;
570 break;
571 /* function 7 has additional index. */
572 case 7: {
573 int i;
574
575 for (i = 0; ; ) {
576 do_cpuid_7_mask(&entry[i], i);
577 if (i == entry->eax)
578 break;
579 if (*nent >= maxnent)
580 goto out;
581
582 ++i;
583 do_host_cpuid(&entry[i], function, i);
584 ++*nent;
585 }
586 break;
587 }
588 case 9:
589 break;
590 case 0xa: { /* Architectural Performance Monitoring */
591 struct x86_pmu_capability cap;
592 union cpuid10_eax eax;
593 union cpuid10_edx edx;
594
595 if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
596 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
597 break;
598 }
599
600 perf_get_x86_pmu_capability(&cap);
601
602 /*
603 * Only support guest architectural pmu on a host
604 * with architectural pmu.
605 */
606 if (!cap.version)
607 memset(&cap, 0, sizeof(cap));
608
609 eax.split.version_id = min(cap.version, 2);
610 eax.split.num_counters = cap.num_counters_gp;
611 eax.split.bit_width = cap.bit_width_gp;
612 eax.split.mask_length = cap.events_mask_len;
613
614 edx.split.num_counters_fixed = cap.num_counters_fixed;
615 edx.split.bit_width_fixed = cap.bit_width_fixed;
616 edx.split.reserved = 0;
617
618 entry->eax = eax.full;
619 entry->ebx = cap.events_mask;
620 entry->ecx = 0;
621 entry->edx = edx.full;
622 break;
623 }
624 /*
625 * Per Intel's SDM, the 0x1f is a superset of 0xb,
626 * thus they can be handled by common code.
627 */
628 case 0x1f:
629 case 0xb: {
630 int i;
631
632 /*
633 * We filled in entry[0] for CPUID(EAX=<function>,
634 * ECX=00H) above. If its level type (ECX[15:8]) is
635 * zero, then the leaf is unimplemented, and we're
636 * done. Otherwise, continue to populate entries
637 * until the level type (ECX[15:8]) of the previously
638 * added entry is zero.
639 */
640 for (i = 1; entry[i - 1].ecx & 0xff00; ++i) {
641 if (*nent >= maxnent)
642 goto out;
643
644 do_host_cpuid(&entry[i], function, i);
645 ++*nent;
646 }
647 break;
648 }
649 case 0xd: {
650 int idx, i;
651 u64 supported = kvm_supported_xcr0();
652
653 entry->eax &= supported;
654 entry->ebx = xstate_required_size(supported, false);
655 entry->ecx = entry->ebx;
656 entry->edx &= supported >> 32;
657 if (!supported)
658 break;
659
660 for (idx = 1, i = 1; idx < 64; ++idx) {
661 u64 mask = ((u64)1 << idx);
662 if (*nent >= maxnent)
663 goto out;
664
665 do_host_cpuid(&entry[i], function, idx);
666 if (idx == 1) {
667 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
668 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
669 entry[i].ebx = 0;
670 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
671 entry[i].ebx =
672 xstate_required_size(supported,
673 true);
674 } else {
675 if (entry[i].eax == 0 || !(supported & mask))
676 continue;
677 if (WARN_ON_ONCE(entry[i].ecx & 1))
678 continue;
679 }
680 entry[i].ecx = 0;
681 entry[i].edx = 0;
682 ++*nent;
683 ++i;
684 }
685 break;
686 }
687 /* Intel PT */
688 case 0x14: {
689 int t, times = entry->eax;
690
691 if (!f_intel_pt)
692 break;
693
694 for (t = 1; t <= times; ++t) {
695 if (*nent >= maxnent)
696 goto out;
697 do_host_cpuid(&entry[t], function, t);
698 ++*nent;
699 }
700 break;
701 }
702 case KVM_CPUID_SIGNATURE: {
703 static const char signature[12] = "KVMKVMKVM\0\0";
704 const u32 *sigptr = (const u32 *)signature;
705 entry->eax = KVM_CPUID_FEATURES;
706 entry->ebx = sigptr[0];
707 entry->ecx = sigptr[1];
708 entry->edx = sigptr[2];
709 break;
710 }
711 case KVM_CPUID_FEATURES:
712 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
713 (1 << KVM_FEATURE_NOP_IO_DELAY) |
714 (1 << KVM_FEATURE_CLOCKSOURCE2) |
715 (1 << KVM_FEATURE_ASYNC_PF) |
716 (1 << KVM_FEATURE_PV_EOI) |
717 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
718 (1 << KVM_FEATURE_PV_UNHALT) |
719 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
720 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
721 (1 << KVM_FEATURE_PV_SEND_IPI) |
722 (1 << KVM_FEATURE_POLL_CONTROL) |
723 (1 << KVM_FEATURE_PV_SCHED_YIELD);
724
725 if (sched_info_on())
726 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
727
728 entry->ebx = 0;
729 entry->ecx = 0;
730 entry->edx = 0;
731 break;
732 case 0x80000000:
733 entry->eax = min(entry->eax, 0x8000001f);
734 break;
735 case 0x80000001:
736 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
737 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
738 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
739 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
740 break;
741 case 0x80000007: /* Advanced power management */
742 /* invariant TSC is CPUID.80000007H:EDX[8] */
743 entry->edx &= (1 << 8);
744 /* mask against host */
745 entry->edx &= boot_cpu_data.x86_power;
746 entry->eax = entry->ebx = entry->ecx = 0;
747 break;
748 case 0x80000008: {
749 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
750 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
751 unsigned phys_as = entry->eax & 0xff;
752
753 /*
754 * Use bare metal's MAXPHADDR if the CPU doesn't report guest
755 * MAXPHYADDR separately, or if TDP (NPT) is disabled, as the
756 * guest version "applies only to guests using nested paging".
757 */
758 if (!g_phys_as || !tdp_enabled)
759 g_phys_as = phys_as;
760
761 entry->eax = g_phys_as | (virt_as << 8);
762 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
763 entry->edx = 0;
764 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
765 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
766 /*
767 * AMD has separate bits for each SPEC_CTRL bit.
768 * arch/x86/kernel/cpu/bugs.c is kind enough to
769 * record that in cpufeatures so use them.
770 */
771 if (boot_cpu_has(X86_FEATURE_IBPB))
772 entry->ebx |= F(AMD_IBPB);
773 if (boot_cpu_has(X86_FEATURE_IBRS))
774 entry->ebx |= F(AMD_IBRS);
775 if (boot_cpu_has(X86_FEATURE_STIBP))
776 entry->ebx |= F(AMD_STIBP);
777 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
778 boot_cpu_has(X86_FEATURE_AMD_SSBD))
779 entry->ebx |= F(AMD_SSBD);
780 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
781 entry->ebx |= F(AMD_SSB_NO);
782 /*
783 * The preference is to use SPEC CTRL MSR instead of the
784 * VIRT_SPEC MSR.
785 */
786 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
787 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
788 entry->ebx |= F(VIRT_SSBD);
789 break;
790 }
791 case 0x80000019:
792 entry->ecx = entry->edx = 0;
793 break;
794 case 0x8000001a:
795 entry->eax &= GENMASK(2, 0);
796 entry->ebx = entry->ecx = entry->edx = 0;
797 break;
798 case 0x8000001e:
799 break;
800 /*Add support for Centaur's CPUID instruction*/
801 case 0xC0000000:
802 /*Just support up to 0xC0000004 now*/
803 entry->eax = min(entry->eax, 0xC0000004);
804 break;
805 case 0xC0000001:
806 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
807 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
808 break;
809 case 3: /* Processor serial number */
810 case 5: /* MONITOR/MWAIT */
811 case 0xC0000002:
812 case 0xC0000003:
813 case 0xC0000004:
814 default:
815 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
816 break;
817 }
818
819 kvm_x86_ops->set_supported_cpuid(function, entry);
820
821 r = 0;
822
823 out:
824 put_cpu();
825
826 return r;
827 }
828
do_cpuid_func(struct kvm_cpuid_entry2 * entry,u32 func,int * nent,int maxnent,unsigned int type)829 static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
830 int *nent, int maxnent, unsigned int type)
831 {
832 if (*nent >= maxnent)
833 return -E2BIG;
834
835 if (type == KVM_GET_EMULATED_CPUID)
836 return __do_cpuid_func_emulated(entry, func, nent, maxnent);
837
838 return __do_cpuid_func(entry, func, nent, maxnent);
839 }
840
841 #undef F
842
843 struct kvm_cpuid_param {
844 u32 func;
845 bool (*qualifier)(const struct kvm_cpuid_param *param);
846 };
847
is_centaur_cpu(const struct kvm_cpuid_param * param)848 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
849 {
850 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
851 }
852
sanity_check_entries(struct kvm_cpuid_entry2 __user * entries,__u32 num_entries,unsigned int ioctl_type)853 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
854 __u32 num_entries, unsigned int ioctl_type)
855 {
856 int i;
857 __u32 pad[3];
858
859 if (ioctl_type != KVM_GET_EMULATED_CPUID)
860 return false;
861
862 /*
863 * We want to make sure that ->padding is being passed clean from
864 * userspace in case we want to use it for something in the future.
865 *
866 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
867 * have to give ourselves satisfied only with the emulated side. /me
868 * sheds a tear.
869 */
870 for (i = 0; i < num_entries; i++) {
871 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
872 return true;
873
874 if (pad[0] || pad[1] || pad[2])
875 return true;
876 }
877 return false;
878 }
879
kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries,unsigned int type)880 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
881 struct kvm_cpuid_entry2 __user *entries,
882 unsigned int type)
883 {
884 struct kvm_cpuid_entry2 *cpuid_entries;
885 int limit, nent = 0, r = -E2BIG, i;
886 u32 func;
887 static const struct kvm_cpuid_param param[] = {
888 { .func = 0 },
889 { .func = 0x80000000 },
890 { .func = 0xC0000000, .qualifier = is_centaur_cpu },
891 { .func = KVM_CPUID_SIGNATURE },
892 };
893
894 if (cpuid->nent < 1)
895 goto out;
896 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
897 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
898
899 if (sanity_check_entries(entries, cpuid->nent, type))
900 return -EINVAL;
901
902 r = -ENOMEM;
903 cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
904 cpuid->nent));
905 if (!cpuid_entries)
906 goto out;
907
908 r = 0;
909 for (i = 0; i < ARRAY_SIZE(param); i++) {
910 const struct kvm_cpuid_param *ent = ¶m[i];
911
912 if (ent->qualifier && !ent->qualifier(ent))
913 continue;
914
915 r = do_cpuid_func(&cpuid_entries[nent], ent->func,
916 &nent, cpuid->nent, type);
917
918 if (r)
919 goto out_free;
920
921 limit = cpuid_entries[nent - 1].eax;
922 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
923 r = do_cpuid_func(&cpuid_entries[nent], func,
924 &nent, cpuid->nent, type);
925
926 if (r)
927 goto out_free;
928 }
929
930 r = -EFAULT;
931 if (copy_to_user(entries, cpuid_entries,
932 nent * sizeof(struct kvm_cpuid_entry2)))
933 goto out_free;
934 cpuid->nent = nent;
935 r = 0;
936
937 out_free:
938 vfree(cpuid_entries);
939 out:
940 return r;
941 }
942
move_to_next_stateful_cpuid_entry(struct kvm_vcpu * vcpu,int i)943 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
944 {
945 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
946 struct kvm_cpuid_entry2 *ej;
947 int j = i;
948 int nent = vcpu->arch.cpuid_nent;
949
950 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
951 /* when no next entry is found, the current entry[i] is reselected */
952 do {
953 j = (j + 1) % nent;
954 ej = &vcpu->arch.cpuid_entries[j];
955 } while (ej->function != e->function);
956
957 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
958
959 return j;
960 }
961
962 /* find an entry with matching function, matching index (if needed), and that
963 * should be read next (if it's stateful) */
is_matching_cpuid_entry(struct kvm_cpuid_entry2 * e,u32 function,u32 index)964 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
965 u32 function, u32 index)
966 {
967 if (e->function != function)
968 return 0;
969 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
970 return 0;
971 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
972 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
973 return 0;
974 return 1;
975 }
976
kvm_find_cpuid_entry(struct kvm_vcpu * vcpu,u32 function,u32 index)977 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
978 u32 function, u32 index)
979 {
980 int i;
981 struct kvm_cpuid_entry2 *best = NULL;
982
983 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
984 struct kvm_cpuid_entry2 *e;
985
986 e = &vcpu->arch.cpuid_entries[i];
987 if (is_matching_cpuid_entry(e, function, index)) {
988 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
989 move_to_next_stateful_cpuid_entry(vcpu, i);
990 best = e;
991 break;
992 }
993 }
994 return best;
995 }
996 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
997
998 /*
999 * If the basic or extended CPUID leaf requested is higher than the
1000 * maximum supported basic or extended leaf, respectively, then it is
1001 * out of range.
1002 */
cpuid_function_in_range(struct kvm_vcpu * vcpu,u32 function)1003 static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
1004 {
1005 struct kvm_cpuid_entry2 *max;
1006
1007 max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
1008 return max && function <= max->eax;
1009 }
1010
kvm_cpuid(struct kvm_vcpu * vcpu,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx,bool check_limit)1011 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1012 u32 *ecx, u32 *edx, bool check_limit)
1013 {
1014 u32 function = *eax, index = *ecx;
1015 struct kvm_cpuid_entry2 *entry;
1016 struct kvm_cpuid_entry2 *max;
1017 bool found;
1018
1019 entry = kvm_find_cpuid_entry(vcpu, function, index);
1020 found = entry;
1021 /*
1022 * Intel CPUID semantics treats any query for an out-of-range
1023 * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
1024 * requested. AMD CPUID semantics returns all zeroes for any
1025 * undefined leaf, whether or not the leaf is in range.
1026 */
1027 if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
1028 !cpuid_function_in_range(vcpu, function)) {
1029 max = kvm_find_cpuid_entry(vcpu, 0, 0);
1030 if (max) {
1031 function = max->eax;
1032 entry = kvm_find_cpuid_entry(vcpu, function, index);
1033 }
1034 }
1035 if (entry) {
1036 *eax = entry->eax;
1037 *ebx = entry->ebx;
1038 *ecx = entry->ecx;
1039 *edx = entry->edx;
1040 } else {
1041 *eax = *ebx = *ecx = *edx = 0;
1042 /*
1043 * When leaf 0BH or 1FH is defined, CL is pass-through
1044 * and EDX is always the x2APIC ID, even for undefined
1045 * subleaves. Index 1 will exist iff the leaf is
1046 * implemented, so we pass through CL iff leaf 1
1047 * exists. EDX can be copied from any existing index.
1048 */
1049 if (function == 0xb || function == 0x1f) {
1050 entry = kvm_find_cpuid_entry(vcpu, function, 1);
1051 if (entry) {
1052 *ecx = index & 0xff;
1053 *edx = entry->edx;
1054 }
1055 }
1056 }
1057 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found);
1058 return found;
1059 }
1060 EXPORT_SYMBOL_GPL(kvm_cpuid);
1061
kvm_emulate_cpuid(struct kvm_vcpu * vcpu)1062 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1063 {
1064 u32 eax, ebx, ecx, edx;
1065
1066 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1067 return 1;
1068
1069 eax = kvm_rax_read(vcpu);
1070 ecx = kvm_rcx_read(vcpu);
1071 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
1072 kvm_rax_write(vcpu, eax);
1073 kvm_rbx_write(vcpu, ebx);
1074 kvm_rcx_write(vcpu, ecx);
1075 kvm_rdx_write(vcpu, edx);
1076 return kvm_skip_emulated_instruction(vcpu);
1077 }
1078 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1079