1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4
5 #include "x86.h"
6 #include <asm/cpu.h>
7 #include <asm/processor.h>
8
9 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
10 bool kvm_mpx_supported(void);
11 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
12 u32 function, u32 index);
13 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
14 struct kvm_cpuid_entry2 __user *entries,
15 unsigned int type);
16 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
17 struct kvm_cpuid *cpuid,
18 struct kvm_cpuid_entry __user *entries);
19 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20 struct kvm_cpuid2 *cpuid,
21 struct kvm_cpuid_entry2 __user *entries);
22 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23 struct kvm_cpuid2 *cpuid,
24 struct kvm_cpuid_entry2 __user *entries);
25 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
26 u32 *ecx, u32 *edx, bool check_limit);
27
28 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
29
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)30 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
31 {
32 return vcpu->arch.maxphyaddr;
33 }
34
35 struct cpuid_reg {
36 u32 function;
37 u32 index;
38 int reg;
39 };
40
41 static const struct cpuid_reg reverse_cpuid[] = {
42 [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
43 [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
44 [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
45 [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
46 [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
47 [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
48 [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
49 [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
50 [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
51 [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
52 [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
53 [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
54 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
55 [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
56 [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
57 };
58
x86_feature_cpuid(unsigned x86_feature)59 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
60 {
61 unsigned x86_leaf = x86_feature / 32;
62
63 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
64 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
65
66 return reverse_cpuid[x86_leaf];
67 }
68
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned x86_feature)69 static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
70 {
71 struct kvm_cpuid_entry2 *entry;
72 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
73
74 entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
75 if (!entry)
76 return NULL;
77
78 switch (cpuid.reg) {
79 case CPUID_EAX:
80 return &entry->eax;
81 case CPUID_EBX:
82 return &entry->ebx;
83 case CPUID_ECX:
84 return &entry->ecx;
85 case CPUID_EDX:
86 return &entry->edx;
87 default:
88 BUILD_BUG();
89 return NULL;
90 }
91 }
92
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned x86_feature)93 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
94 {
95 int *reg;
96
97 if (x86_feature == X86_FEATURE_XSAVE &&
98 !static_cpu_has(X86_FEATURE_XSAVE))
99 return false;
100
101 reg = guest_cpuid_get_register(vcpu, x86_feature);
102 if (!reg)
103 return false;
104
105 return *reg & bit(x86_feature);
106 }
107
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned x86_feature)108 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
109 {
110 int *reg;
111
112 reg = guest_cpuid_get_register(vcpu, x86_feature);
113 if (reg)
114 *reg &= ~bit(x86_feature);
115 }
116
guest_cpuid_is_amd(struct kvm_vcpu * vcpu)117 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
118 {
119 struct kvm_cpuid_entry2 *best;
120
121 best = kvm_find_cpuid_entry(vcpu, 0, 0);
122 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
123 }
124
guest_cpuid_family(struct kvm_vcpu * vcpu)125 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
126 {
127 struct kvm_cpuid_entry2 *best;
128
129 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
130 if (!best)
131 return -1;
132
133 return x86_family(best->eax);
134 }
135
guest_cpuid_model(struct kvm_vcpu * vcpu)136 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
137 {
138 struct kvm_cpuid_entry2 *best;
139
140 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
141 if (!best)
142 return -1;
143
144 return x86_model(best->eax);
145 }
146
guest_cpuid_stepping(struct kvm_vcpu * vcpu)147 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
148 {
149 struct kvm_cpuid_entry2 *best;
150
151 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
152 if (!best)
153 return -1;
154
155 return x86_stepping(best->eax);
156 }
157
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)158 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
159 {
160 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
161 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
162 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
163 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
164 }
165
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)166 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
167 {
168 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
169 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
170 }
171
supports_cpuid_fault(struct kvm_vcpu * vcpu)172 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
173 {
174 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
175 }
176
cpuid_fault_enabled(struct kvm_vcpu * vcpu)177 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
178 {
179 return vcpu->arch.msr_misc_features_enables &
180 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
181 }
182
183 #endif
184