1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_EVMCS_H
3 #define __KVM_X86_VMX_EVMCS_H
4
5 #include <linux/jump_label.h>
6
7 #include <asm/hyperv-tlfs.h>
8 #include <asm/mshyperv.h>
9 #include <asm/vmx.h>
10
11 #include "capabilities.h"
12 #include "vmcs.h"
13 #include "vmcs12.h"
14
15 struct vmcs_config;
16
17 DECLARE_STATIC_KEY_FALSE(enable_evmcs);
18
19 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
20
21 #define KVM_EVMCS_VERSION 1
22
23 /*
24 * Enlightened VMCSv1 doesn't support these:
25 *
26 * POSTED_INTR_NV = 0x00000002,
27 * GUEST_INTR_STATUS = 0x00000810,
28 * APIC_ACCESS_ADDR = 0x00002014,
29 * POSTED_INTR_DESC_ADDR = 0x00002016,
30 * EOI_EXIT_BITMAP0 = 0x0000201c,
31 * EOI_EXIT_BITMAP1 = 0x0000201e,
32 * EOI_EXIT_BITMAP2 = 0x00002020,
33 * EOI_EXIT_BITMAP3 = 0x00002022,
34 * GUEST_PML_INDEX = 0x00000812,
35 * PML_ADDRESS = 0x0000200e,
36 * VM_FUNCTION_CONTROL = 0x00002018,
37 * EPTP_LIST_ADDRESS = 0x00002024,
38 * VMREAD_BITMAP = 0x00002026,
39 * VMWRITE_BITMAP = 0x00002028,
40 *
41 * TSC_MULTIPLIER = 0x00002032,
42 * PLE_GAP = 0x00004020,
43 * PLE_WINDOW = 0x00004022,
44 * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
45 * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
46 * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
47 *
48 * Currently unsupported in KVM:
49 * GUEST_IA32_RTIT_CTL = 0x00002814,
50 */
51 #define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
52 PIN_BASED_VMX_PREEMPTION_TIMER)
53 #define EVMCS1_UNSUPPORTED_2NDEXEC \
54 (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
55 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
56 SECONDARY_EXEC_APIC_REGISTER_VIRT | \
57 SECONDARY_EXEC_ENABLE_PML | \
58 SECONDARY_EXEC_ENABLE_VMFUNC | \
59 SECONDARY_EXEC_SHADOW_VMCS | \
60 SECONDARY_EXEC_TSC_SCALING | \
61 SECONDARY_EXEC_PAUSE_LOOP_EXITING)
62 #define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \
63 (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
64 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
65 #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
66 #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
67
68 #if IS_ENABLED(CONFIG_HYPERV)
69
70 struct evmcs_field {
71 u16 offset;
72 u16 clean_field;
73 };
74
75 extern const struct evmcs_field vmcs_field_to_evmcs_1[];
76 extern const unsigned int nr_evmcs_1_fields;
77
get_evmcs_offset(unsigned long field,u16 * clean_field)78 static __always_inline int get_evmcs_offset(unsigned long field,
79 u16 *clean_field)
80 {
81 unsigned int index = ROL16(field, 6);
82 const struct evmcs_field *evmcs_field;
83
84 if (unlikely(index >= nr_evmcs_1_fields)) {
85 WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
86 field);
87 return -ENOENT;
88 }
89
90 evmcs_field = &vmcs_field_to_evmcs_1[index];
91
92 if (clean_field)
93 *clean_field = evmcs_field->clean_field;
94
95 return evmcs_field->offset;
96 }
97
evmcs_write64(unsigned long field,u64 value)98 static inline void evmcs_write64(unsigned long field, u64 value)
99 {
100 u16 clean_field;
101 int offset = get_evmcs_offset(field, &clean_field);
102
103 if (offset < 0)
104 return;
105
106 *(u64 *)((char *)current_evmcs + offset) = value;
107
108 current_evmcs->hv_clean_fields &= ~clean_field;
109 }
110
evmcs_write32(unsigned long field,u32 value)111 static inline void evmcs_write32(unsigned long field, u32 value)
112 {
113 u16 clean_field;
114 int offset = get_evmcs_offset(field, &clean_field);
115
116 if (offset < 0)
117 return;
118
119 *(u32 *)((char *)current_evmcs + offset) = value;
120 current_evmcs->hv_clean_fields &= ~clean_field;
121 }
122
evmcs_write16(unsigned long field,u16 value)123 static inline void evmcs_write16(unsigned long field, u16 value)
124 {
125 u16 clean_field;
126 int offset = get_evmcs_offset(field, &clean_field);
127
128 if (offset < 0)
129 return;
130
131 *(u16 *)((char *)current_evmcs + offset) = value;
132 current_evmcs->hv_clean_fields &= ~clean_field;
133 }
134
evmcs_read64(unsigned long field)135 static inline u64 evmcs_read64(unsigned long field)
136 {
137 int offset = get_evmcs_offset(field, NULL);
138
139 if (offset < 0)
140 return 0;
141
142 return *(u64 *)((char *)current_evmcs + offset);
143 }
144
evmcs_read32(unsigned long field)145 static inline u32 evmcs_read32(unsigned long field)
146 {
147 int offset = get_evmcs_offset(field, NULL);
148
149 if (offset < 0)
150 return 0;
151
152 return *(u32 *)((char *)current_evmcs + offset);
153 }
154
evmcs_read16(unsigned long field)155 static inline u16 evmcs_read16(unsigned long field)
156 {
157 int offset = get_evmcs_offset(field, NULL);
158
159 if (offset < 0)
160 return 0;
161
162 return *(u16 *)((char *)current_evmcs + offset);
163 }
164
evmcs_load(u64 phys_addr)165 static inline void evmcs_load(u64 phys_addr)
166 {
167 struct hv_vp_assist_page *vp_ap =
168 hv_get_vp_assist_page(smp_processor_id());
169
170 if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
171 vp_ap->nested_control.features.directhypercall = 1;
172 vp_ap->current_nested_vmcs = phys_addr;
173 vp_ap->enlighten_vmentry = 1;
174 }
175
176 __init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
177 #else /* !IS_ENABLED(CONFIG_HYPERV) */
evmcs_write64(unsigned long field,u64 value)178 static inline void evmcs_write64(unsigned long field, u64 value) {}
evmcs_write32(unsigned long field,u32 value)179 static inline void evmcs_write32(unsigned long field, u32 value) {}
evmcs_write16(unsigned long field,u16 value)180 static inline void evmcs_write16(unsigned long field, u16 value) {}
evmcs_read64(unsigned long field)181 static inline u64 evmcs_read64(unsigned long field) { return 0; }
evmcs_read32(unsigned long field)182 static inline u32 evmcs_read32(unsigned long field) { return 0; }
evmcs_read16(unsigned long field)183 static inline u16 evmcs_read16(unsigned long field) { return 0; }
evmcs_load(u64 phys_addr)184 static inline void evmcs_load(u64 phys_addr) {}
185 #endif /* IS_ENABLED(CONFIG_HYPERV) */
186
187 #define EVMPTR_INVALID (-1ULL)
188 #define EVMPTR_MAP_PENDING (-2ULL)
189
evmptr_is_valid(u64 evmptr)190 static inline bool evmptr_is_valid(u64 evmptr)
191 {
192 return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
193 }
194
195 enum nested_evmptrld_status {
196 EVMPTRLD_DISABLED,
197 EVMPTRLD_SUCCEEDED,
198 EVMPTRLD_VMFAIL,
199 EVMPTRLD_ERROR,
200 };
201
202 bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
203 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
204 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
205 uint16_t *vmcs_version);
206 void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
207 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
208
209 #endif /* __KVM_X86_VMX_EVMCS_H */
210