• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef ARCH_X86_KVM_CPUID_H
3  #define ARCH_X86_KVM_CPUID_H
4  
5  #include "x86.h"
6  #include "reverse_cpuid.h"
7  #include <asm/cpu.h>
8  #include <asm/processor.h>
9  #include <uapi/asm/kvm_para.h>
10  
11  extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12  void kvm_set_cpu_caps(void);
13  
14  void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15  void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16  struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
17  					      u32 function, u32 index);
18  int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
19  			    struct kvm_cpuid_entry2 __user *entries,
20  			    unsigned int type);
21  int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
22  			     struct kvm_cpuid *cpuid,
23  			     struct kvm_cpuid_entry __user *entries);
24  int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
25  			      struct kvm_cpuid2 *cpuid,
26  			      struct kvm_cpuid_entry2 __user *entries);
27  int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28  			      struct kvm_cpuid2 *cpuid,
29  			      struct kvm_cpuid_entry2 __user *entries);
30  bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
31  	       u32 *ecx, u32 *edx, bool exact_only);
32  
33  int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
34  u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
35  
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)36  static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
37  {
38  	return vcpu->arch.maxphyaddr;
39  }
40  
kvm_vcpu_is_legal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)41  static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
42  {
43  	return !(gpa & vcpu->arch.reserved_gpa_bits);
44  }
45  
kvm_vcpu_is_illegal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)46  static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
47  {
48  	return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
49  }
50  
kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t alignment)51  static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
52  						 gpa_t gpa, gpa_t alignment)
53  {
54  	return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
55  }
56  
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)57  static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
58  {
59  	return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
60  }
61  
cpuid_entry_override(struct kvm_cpuid_entry2 * entry,unsigned int leaf)62  static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
63  						 unsigned int leaf)
64  {
65  	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
66  
67  	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
68  	*reg = kvm_cpu_caps[leaf];
69  }
70  
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned int x86_feature)71  static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
72  						     unsigned int x86_feature)
73  {
74  	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
75  	struct kvm_cpuid_entry2 *entry;
76  
77  	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
78  	if (!entry)
79  		return NULL;
80  
81  	return __cpuid_entry_get_reg(entry, cpuid.reg);
82  }
83  
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned int x86_feature)84  static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
85  					    unsigned int x86_feature)
86  {
87  	u32 *reg;
88  
89  	reg = guest_cpuid_get_register(vcpu, x86_feature);
90  	if (!reg)
91  		return false;
92  
93  	return *reg & __feature_bit(x86_feature);
94  }
95  
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned int x86_feature)96  static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
97  					      unsigned int x86_feature)
98  {
99  	u32 *reg;
100  
101  	reg = guest_cpuid_get_register(vcpu, x86_feature);
102  	if (reg)
103  		*reg &= ~__feature_bit(x86_feature);
104  }
105  
guest_cpuid_is_amd_or_hygon(struct kvm_vcpu * vcpu)106  static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
107  {
108  	struct kvm_cpuid_entry2 *best;
109  
110  	best = kvm_find_cpuid_entry(vcpu, 0, 0);
111  	return best &&
112  	       (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
113  		is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
114  }
115  
guest_cpuid_is_intel(struct kvm_vcpu * vcpu)116  static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
117  {
118  	struct kvm_cpuid_entry2 *best;
119  
120  	best = kvm_find_cpuid_entry(vcpu, 0, 0);
121  	return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
122  }
123  
guest_cpuid_family(struct kvm_vcpu * vcpu)124  static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
125  {
126  	struct kvm_cpuid_entry2 *best;
127  
128  	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
129  	if (!best)
130  		return -1;
131  
132  	return x86_family(best->eax);
133  }
134  
guest_cpuid_model(struct kvm_vcpu * vcpu)135  static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
136  {
137  	struct kvm_cpuid_entry2 *best;
138  
139  	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
140  	if (!best)
141  		return -1;
142  
143  	return x86_model(best->eax);
144  }
145  
guest_cpuid_stepping(struct kvm_vcpu * vcpu)146  static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
147  {
148  	struct kvm_cpuid_entry2 *best;
149  
150  	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
151  	if (!best)
152  		return -1;
153  
154  	return x86_stepping(best->eax);
155  }
156  
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)157  static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
158  {
159  	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
160  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
161  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
162  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
163  }
164  
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)165  static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
166  {
167  	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
168  		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
169  }
170  
supports_cpuid_fault(struct kvm_vcpu * vcpu)171  static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
172  {
173  	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
174  }
175  
cpuid_fault_enabled(struct kvm_vcpu * vcpu)176  static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
177  {
178  	return vcpu->arch.msr_misc_features_enables &
179  		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
180  }
181  
kvm_cpu_cap_clear(unsigned int x86_feature)182  static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
183  {
184  	unsigned int x86_leaf = __feature_leaf(x86_feature);
185  
186  	reverse_cpuid_check(x86_leaf);
187  	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
188  }
189  
kvm_cpu_cap_set(unsigned int x86_feature)190  static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
191  {
192  	unsigned int x86_leaf = __feature_leaf(x86_feature);
193  
194  	reverse_cpuid_check(x86_leaf);
195  	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
196  }
197  
kvm_cpu_cap_get(unsigned int x86_feature)198  static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
199  {
200  	unsigned int x86_leaf = __feature_leaf(x86_feature);
201  
202  	reverse_cpuid_check(x86_leaf);
203  	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
204  }
205  
kvm_cpu_cap_has(unsigned int x86_feature)206  static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
207  {
208  	return !!kvm_cpu_cap_get(x86_feature);
209  }
210  
kvm_cpu_cap_check_and_set(unsigned int x86_feature)211  static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
212  {
213  	if (boot_cpu_has(x86_feature))
214  		kvm_cpu_cap_set(x86_feature);
215  }
216  
guest_pv_has(struct kvm_vcpu * vcpu,unsigned int kvm_feature)217  static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
218  					 unsigned int kvm_feature)
219  {
220  	if (!vcpu->arch.pv_cpuid.enforce)
221  		return true;
222  
223  	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
224  }
225  
226  #endif
227