• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
3 
4 #include "x86.h"
5 
6 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
7 bool kvm_mpx_supported(void);
8 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
9 					      u32 function, u32 index);
10 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
11 			    struct kvm_cpuid_entry2 __user *entries,
12 			    unsigned int type);
13 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
14 			     struct kvm_cpuid *cpuid,
15 			     struct kvm_cpuid_entry __user *entries);
16 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
17 			      struct kvm_cpuid2 *cpuid,
18 			      struct kvm_cpuid_entry2 __user *entries);
19 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20 			      struct kvm_cpuid2 *cpuid,
21 			      struct kvm_cpuid_entry2 __user *entries);
22 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
23 
24 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
25 
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)26 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
27 {
28 	return vcpu->arch.maxphyaddr;
29 }
30 
guest_cpuid_has_xsave(struct kvm_vcpu * vcpu)31 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
32 {
33 	struct kvm_cpuid_entry2 *best;
34 
35 	if (!static_cpu_has(X86_FEATURE_XSAVE))
36 		return false;
37 
38 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
39 	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
40 }
41 
guest_cpuid_has_mtrr(struct kvm_vcpu * vcpu)42 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
43 {
44 	struct kvm_cpuid_entry2 *best;
45 
46 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
47 	return best && (best->edx & bit(X86_FEATURE_MTRR));
48 }
49 
guest_cpuid_has_tsc_adjust(struct kvm_vcpu * vcpu)50 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
51 {
52 	struct kvm_cpuid_entry2 *best;
53 
54 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
55 	return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
56 }
57 
guest_cpuid_has_smep(struct kvm_vcpu * vcpu)58 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
59 {
60 	struct kvm_cpuid_entry2 *best;
61 
62 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
63 	return best && (best->ebx & bit(X86_FEATURE_SMEP));
64 }
65 
guest_cpuid_has_smap(struct kvm_vcpu * vcpu)66 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
67 {
68 	struct kvm_cpuid_entry2 *best;
69 
70 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
71 	return best && (best->ebx & bit(X86_FEATURE_SMAP));
72 }
73 
guest_cpuid_has_fsgsbase(struct kvm_vcpu * vcpu)74 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
75 {
76 	struct kvm_cpuid_entry2 *best;
77 
78 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
79 	return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
80 }
81 
guest_cpuid_has_longmode(struct kvm_vcpu * vcpu)82 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
83 {
84 	struct kvm_cpuid_entry2 *best;
85 
86 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
87 	return best && (best->edx & bit(X86_FEATURE_LM));
88 }
89 
guest_cpuid_has_osvw(struct kvm_vcpu * vcpu)90 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
91 {
92 	struct kvm_cpuid_entry2 *best;
93 
94 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
95 	return best && (best->ecx & bit(X86_FEATURE_OSVW));
96 }
97 
guest_cpuid_has_pcid(struct kvm_vcpu * vcpu)98 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
99 {
100 	struct kvm_cpuid_entry2 *best;
101 
102 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
103 	return best && (best->ecx & bit(X86_FEATURE_PCID));
104 }
105 
guest_cpuid_has_x2apic(struct kvm_vcpu * vcpu)106 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
107 {
108 	struct kvm_cpuid_entry2 *best;
109 
110 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
111 	return best && (best->ecx & bit(X86_FEATURE_X2APIC));
112 }
113 
guest_cpuid_is_amd(struct kvm_vcpu * vcpu)114 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
115 {
116 	struct kvm_cpuid_entry2 *best;
117 
118 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
119 	return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
120 }
121 
guest_cpuid_has_gbpages(struct kvm_vcpu * vcpu)122 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
123 {
124 	struct kvm_cpuid_entry2 *best;
125 
126 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
127 	return best && (best->edx & bit(X86_FEATURE_GBPAGES));
128 }
129 
guest_cpuid_has_rtm(struct kvm_vcpu * vcpu)130 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
131 {
132 	struct kvm_cpuid_entry2 *best;
133 
134 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
135 	return best && (best->ebx & bit(X86_FEATURE_RTM));
136 }
137 
guest_cpuid_has_pcommit(struct kvm_vcpu * vcpu)138 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
139 {
140 	struct kvm_cpuid_entry2 *best;
141 
142 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
143 	return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
144 }
145 
guest_cpuid_has_mpx(struct kvm_vcpu * vcpu)146 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
147 {
148 	struct kvm_cpuid_entry2 *best;
149 
150 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
151 	return best && (best->ebx & bit(X86_FEATURE_MPX));
152 }
153 
guest_cpuid_has_rdtscp(struct kvm_vcpu * vcpu)154 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
155 {
156 	struct kvm_cpuid_entry2 *best;
157 
158 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
159 	return best && (best->edx & bit(X86_FEATURE_RDTSCP));
160 }
161 
guest_cpuid_has_ibpb(struct kvm_vcpu * vcpu)162 static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
163 {
164 	struct kvm_cpuid_entry2 *best;
165 
166 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
167 	if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
168 		return true;
169 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
170 	return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
171 }
172 
guest_cpuid_has_spec_ctrl(struct kvm_vcpu * vcpu)173 static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
174 {
175 	struct kvm_cpuid_entry2 *best;
176 
177 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
178 	if (best && (best->ebx & (bit(X86_FEATURE_AMD_IBRS | bit(X86_FEATURE_AMD_SSBD)))))
179 		return true;
180 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
181 	return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
182 }
183 
guest_cpuid_has_arch_capabilities(struct kvm_vcpu * vcpu)184 static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
185 {
186 	struct kvm_cpuid_entry2 *best;
187 
188 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
189 	return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
190 }
191 
guest_cpuid_has_virt_ssbd(struct kvm_vcpu * vcpu)192 static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
193 {
194 	struct kvm_cpuid_entry2 *best;
195 
196 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
197 	return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
198 }
199 
200 
201 
202 /*
203  * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
204  */
205 #define BIT_NRIPS	3
206 
guest_cpuid_has_nrips(struct kvm_vcpu * vcpu)207 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
208 {
209 	struct kvm_cpuid_entry2 *best;
210 
211 	best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
212 
213 	/*
214 	 * NRIPS is a scattered cpuid feature, so we can't use
215 	 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
216 	 * position 8, not 3).
217 	 */
218 	return best && (best->edx & bit(BIT_NRIPS));
219 }
220 #undef BIT_NRIPS
221 
222 #endif
223