• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_REVERSE_CPUID_H
3 #define ARCH_X86_KVM_REVERSE_CPUID_H
4 
5 #include <uapi/asm/kvm.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cpufeatures.h>
8 
9 /*
10  * Hardware-defined CPUID leafs that are scattered in the kernel, but need to
11  * be directly used by KVM.  Note, these word values conflict with the kernel's
12  * "bug" caps, but KVM doesn't use those.
13  */
14 enum kvm_only_cpuid_leafs {
15 	CPUID_12_EAX	 = NCAPINTS,
16 	NR_KVM_CPU_CAPS,
17 
18 	NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
19 };
20 
21 #define KVM_X86_FEATURE(w, f)		((w)*32 + (f))
22 
23 /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
24 #define KVM_X86_FEATURE_SGX1		KVM_X86_FEATURE(CPUID_12_EAX, 0)
25 #define KVM_X86_FEATURE_SGX2		KVM_X86_FEATURE(CPUID_12_EAX, 1)
26 
27 struct cpuid_reg {
28 	u32 function;
29 	u32 index;
30 	int reg;
31 };
32 
33 static const struct cpuid_reg reverse_cpuid[] = {
34 	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
35 	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
36 	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
37 	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
38 	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
39 	[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
40 	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
41 	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
42 	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
43 	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
44 	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
45 	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
46 	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
47 	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
48 	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
49 	[CPUID_12_EAX]        = {0x00000012, 0, CPUID_EAX},
50 	[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
51 	[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
52 };
53 
54 /*
55  * Reverse CPUID and its derivatives can only be used for hardware-defined
56  * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
57  * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
58  * is nonsensical as the bit number/mask is an arbitrary software-defined value
59  * and can't be used by KVM to query/control guest capabilities.  And obviously
60  * the leaf being queried must have an entry in the lookup table.
61  */
reverse_cpuid_check(unsigned int x86_leaf)62 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
63 {
64 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
65 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
66 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
67 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
68 	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
69 	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
70 }
71 
72 /*
73  * Translate feature bits that are scattered in the kernel's cpufeatures word
74  * into KVM feature words that align with hardware's definitions.
75  */
__feature_translate(int x86_feature)76 static __always_inline u32 __feature_translate(int x86_feature)
77 {
78 	if (x86_feature == X86_FEATURE_SGX1)
79 		return KVM_X86_FEATURE_SGX1;
80 	else if (x86_feature == X86_FEATURE_SGX2)
81 		return KVM_X86_FEATURE_SGX2;
82 
83 	return x86_feature;
84 }
85 
__feature_leaf(int x86_feature)86 static __always_inline u32 __feature_leaf(int x86_feature)
87 {
88 	return __feature_translate(x86_feature) / 32;
89 }
90 
91 /*
92  * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
93  * the hardware defined bit number (stored in bits 4:0) and a software defined
94  * "word" (stored in bits 31:5).  The word is used to index into arrays of
95  * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
96  */
__feature_bit(int x86_feature)97 static __always_inline u32 __feature_bit(int x86_feature)
98 {
99 	x86_feature = __feature_translate(x86_feature);
100 
101 	reverse_cpuid_check(x86_feature / 32);
102 	return 1 << (x86_feature & 31);
103 }
104 
105 #define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
106 
x86_feature_cpuid(unsigned int x86_feature)107 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
108 {
109 	unsigned int x86_leaf = __feature_leaf(x86_feature);
110 
111 	reverse_cpuid_check(x86_leaf);
112 	return reverse_cpuid[x86_leaf];
113 }
114 
__cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,u32 reg)115 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
116 						  u32 reg)
117 {
118 	switch (reg) {
119 	case CPUID_EAX:
120 		return &entry->eax;
121 	case CPUID_EBX:
122 		return &entry->ebx;
123 	case CPUID_ECX:
124 		return &entry->ecx;
125 	case CPUID_EDX:
126 		return &entry->edx;
127 	default:
128 		BUILD_BUG();
129 		return NULL;
130 	}
131 }
132 
cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)133 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
134 						unsigned int x86_feature)
135 {
136 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
137 
138 	return __cpuid_entry_get_reg(entry, cpuid.reg);
139 }
140 
cpuid_entry_get(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)141 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
142 					   unsigned int x86_feature)
143 {
144 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
145 
146 	return *reg & __feature_bit(x86_feature);
147 }
148 
cpuid_entry_has(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)149 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
150 					    unsigned int x86_feature)
151 {
152 	return cpuid_entry_get(entry, x86_feature);
153 }
154 
cpuid_entry_clear(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)155 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
156 					      unsigned int x86_feature)
157 {
158 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
159 
160 	*reg &= ~__feature_bit(x86_feature);
161 }
162 
cpuid_entry_set(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)163 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
164 					    unsigned int x86_feature)
165 {
166 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
167 
168 	*reg |= __feature_bit(x86_feature);
169 }
170 
cpuid_entry_change(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature,bool set)171 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
172 					       unsigned int x86_feature,
173 					       bool set)
174 {
175 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
176 
177 	/*
178 	 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
179 	 * compiler into using CMOV instead of Jcc when possible.
180 	 */
181 	if (set)
182 		*reg |= __feature_bit(x86_feature);
183 	else
184 		*reg &= ~__feature_bit(x86_feature);
185 }
186 
187 #endif /* ARCH_X86_KVM_REVERSE_CPUID_H */
188