• Home
  • Raw
  • Download

Lines Matching full:cpuid

4  * cpuid support routines
21 #include "cpuid.h"
100 * save the feature bitmap to avoid cpuid lookup for every PV in kvm_update_pv_runtime()
227 struct kvm_cpuid *cpuid, in kvm_vcpu_ioctl_set_cpuid() argument
234 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_vcpu_ioctl_set_cpuid()
237 if (cpuid->nent) { in kvm_vcpu_ioctl_set_cpuid()
238 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent)); in kvm_vcpu_ioctl_set_cpuid()
242 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT); in kvm_vcpu_ioctl_set_cpuid()
248 for (i = 0; i < cpuid->nent; i++) { in kvm_vcpu_ioctl_set_cpuid()
261 r = kvm_check_cpuid(e2, cpuid->nent); in kvm_vcpu_ioctl_set_cpuid()
269 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid()
282 struct kvm_cpuid2 *cpuid, in kvm_vcpu_ioctl_set_cpuid2() argument
288 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_vcpu_ioctl_set_cpuid2()
291 if (cpuid->nent) { in kvm_vcpu_ioctl_set_cpuid2()
292 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent)); in kvm_vcpu_ioctl_set_cpuid2()
297 r = kvm_check_cpuid(e2, cpuid->nent); in kvm_vcpu_ioctl_set_cpuid2()
305 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid2()
314 struct kvm_cpuid2 *cpuid, in kvm_vcpu_ioctl_get_cpuid2() argument
320 if (cpuid->nent < vcpu->arch.cpuid_nent) in kvm_vcpu_ioctl_get_cpuid2()
329 cpuid->nent = vcpu->arch.cpuid_nent; in kvm_vcpu_ioctl_get_cpuid2()
335 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); in kvm_cpu_cap_mask() local
341 cpuid_count(cpuid.function, cpuid.index, in kvm_cpu_cap_mask()
344 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); in kvm_cpu_cap_mask()
367 * advertised to guests via CPUID! in kvm_set_cpu_caps()
611 * CPUID(function=2, index=0) may return different results each in __do_cpuid_func()
613 * number of times software should do CPUID(2, 0). in __do_cpuid_func()
619 * APM states that CPUID(2) is reserved. in __do_cpuid_func()
622 * a stateful CPUID.0x2 is encountered. in __do_cpuid_func()
706 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is in __do_cpuid_func()
820 /* invariant TSC is CPUID.80000007H:EDX[8] */ in __do_cpuid_func()
861 /* Support memory encryption cpuid if host supports it */ in __do_cpuid_func()
866 /*Add support for Centaur's CPUID instruction*/ in __do_cpuid_func()
954 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, in kvm_dev_ioctl_get_cpuid() argument
967 if (cpuid->nent < 1) in kvm_dev_ioctl_get_cpuid()
969 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_dev_ioctl_get_cpuid()
970 cpuid->nent = KVM_MAX_CPUID_ENTRIES; in kvm_dev_ioctl_get_cpuid()
972 if (sanity_check_entries(entries, cpuid->nent, type)) in kvm_dev_ioctl_get_cpuid()
976 cpuid->nent)); in kvm_dev_ioctl_get_cpuid()
980 array.maxnent = cpuid->nent; in kvm_dev_ioctl_get_cpuid()
987 cpuid->nent = array.nent; in kvm_dev_ioctl_get_cpuid()
1007 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1008 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1018 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1019 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1029 * CPUID sub-classes are:
1061 * entry for CPUID.0xb.index (see below), then the output value for EDX in get_out_of_range_cpuid_entry()
1062 * needs to be pulled from CPUID.0xb.1. in get_out_of_range_cpuid_entry()
1068 * the effective CPUID entry is the max basic leaf. Note, the index of in get_out_of_range_cpuid_entry()