1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * tools/testing/selftests/kvm/include/x86_64/vmx.h
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8 #ifndef SELFTEST_KVM_VMX_H
9 #define SELFTEST_KVM_VMX_H
10
11 #include <stdint.h>
12 #include "processor.h"
13
14 #define CPUID_VMX_BIT 5
15
16 #define CPUID_VMX (1 << 5)
17
18 /*
19 * Definitions of Primary Processor-Based VM-Execution Controls.
20 */
21 #define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
22 #define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
23 #define CPU_BASED_HLT_EXITING 0x00000080
24 #define CPU_BASED_INVLPG_EXITING 0x00000200
25 #define CPU_BASED_MWAIT_EXITING 0x00000400
26 #define CPU_BASED_RDPMC_EXITING 0x00000800
27 #define CPU_BASED_RDTSC_EXITING 0x00001000
28 #define CPU_BASED_CR3_LOAD_EXITING 0x00008000
29 #define CPU_BASED_CR3_STORE_EXITING 0x00010000
30 #define CPU_BASED_CR8_LOAD_EXITING 0x00080000
31 #define CPU_BASED_CR8_STORE_EXITING 0x00100000
32 #define CPU_BASED_TPR_SHADOW 0x00200000
33 #define CPU_BASED_NMI_WINDOW_EXITING 0x00400000
34 #define CPU_BASED_MOV_DR_EXITING 0x00800000
35 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000
36 #define CPU_BASED_USE_IO_BITMAPS 0x02000000
37 #define CPU_BASED_MONITOR_TRAP 0x08000000
38 #define CPU_BASED_USE_MSR_BITMAPS 0x10000000
39 #define CPU_BASED_MONITOR_EXITING 0x20000000
40 #define CPU_BASED_PAUSE_EXITING 0x40000000
41 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
42
43 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
44
45 /*
46 * Definitions of Secondary Processor-Based VM-Execution Controls.
47 */
48 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
49 #define SECONDARY_EXEC_ENABLE_EPT 0x00000002
50 #define SECONDARY_EXEC_DESC 0x00000004
51 #define SECONDARY_EXEC_RDTSCP 0x00000008
52 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
53 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020
54 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
55 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
56 #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
57 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
58 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
59 #define SECONDARY_EXEC_RDRAND_EXITING 0x00000800
60 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
61 #define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
62 #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
63 #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000
64 #define SECONDARY_EXEC_ENABLE_PML 0x00020000
65 #define SECONDARY_EPT_VE 0x00040000
66 #define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000
67 #define SECONDARY_EXEC_TSC_SCALING 0x02000000
68
69 #define PIN_BASED_EXT_INTR_MASK 0x00000001
70 #define PIN_BASED_NMI_EXITING 0x00000008
71 #define PIN_BASED_VIRTUAL_NMIS 0x00000020
72 #define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
73 #define PIN_BASED_POSTED_INTR 0x00000080
74
75 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
76
77 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
78 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
79 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
80 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
81 #define VM_EXIT_SAVE_IA32_PAT 0x00040000
82 #define VM_EXIT_LOAD_IA32_PAT 0x00080000
83 #define VM_EXIT_SAVE_IA32_EFER 0x00100000
84 #define VM_EXIT_LOAD_IA32_EFER 0x00200000
85 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
86
87 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
88
89 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
90 #define VM_ENTRY_IA32E_MODE 0x00000200
91 #define VM_ENTRY_SMM 0x00000400
92 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
93 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
94 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000
95 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000
96
97 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
98
99 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
100 #define VMX_MISC_SAVE_EFER_LMA 0x00000020
101
102 #define EXIT_REASON_FAILED_VMENTRY 0x80000000
103 #define EXIT_REASON_EXCEPTION_NMI 0
104 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
105 #define EXIT_REASON_TRIPLE_FAULT 2
106 #define EXIT_REASON_INTERRUPT_WINDOW 7
107 #define EXIT_REASON_NMI_WINDOW 8
108 #define EXIT_REASON_TASK_SWITCH 9
109 #define EXIT_REASON_CPUID 10
110 #define EXIT_REASON_HLT 12
111 #define EXIT_REASON_INVD 13
112 #define EXIT_REASON_INVLPG 14
113 #define EXIT_REASON_RDPMC 15
114 #define EXIT_REASON_RDTSC 16
115 #define EXIT_REASON_VMCALL 18
116 #define EXIT_REASON_VMCLEAR 19
117 #define EXIT_REASON_VMLAUNCH 20
118 #define EXIT_REASON_VMPTRLD 21
119 #define EXIT_REASON_VMPTRST 22
120 #define EXIT_REASON_VMREAD 23
121 #define EXIT_REASON_VMRESUME 24
122 #define EXIT_REASON_VMWRITE 25
123 #define EXIT_REASON_VMOFF 26
124 #define EXIT_REASON_VMON 27
125 #define EXIT_REASON_CR_ACCESS 28
126 #define EXIT_REASON_DR_ACCESS 29
127 #define EXIT_REASON_IO_INSTRUCTION 30
128 #define EXIT_REASON_MSR_READ 31
129 #define EXIT_REASON_MSR_WRITE 32
130 #define EXIT_REASON_INVALID_STATE 33
131 #define EXIT_REASON_MWAIT_INSTRUCTION 36
132 #define EXIT_REASON_MONITOR_INSTRUCTION 39
133 #define EXIT_REASON_PAUSE_INSTRUCTION 40
134 #define EXIT_REASON_MCE_DURING_VMENTRY 41
135 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
136 #define EXIT_REASON_APIC_ACCESS 44
137 #define EXIT_REASON_EOI_INDUCED 45
138 #define EXIT_REASON_EPT_VIOLATION 48
139 #define EXIT_REASON_EPT_MISCONFIG 49
140 #define EXIT_REASON_INVEPT 50
141 #define EXIT_REASON_RDTSCP 51
142 #define EXIT_REASON_PREEMPTION_TIMER 52
143 #define EXIT_REASON_INVVPID 53
144 #define EXIT_REASON_WBINVD 54
145 #define EXIT_REASON_XSETBV 55
146 #define EXIT_REASON_APIC_WRITE 56
147 #define EXIT_REASON_INVPCID 58
148 #define EXIT_REASON_PML_FULL 62
149 #define EXIT_REASON_XSAVES 63
150 #define EXIT_REASON_XRSTORS 64
151 #define LAST_EXIT_REASON 64
152
153 enum vmcs_field {
154 VIRTUAL_PROCESSOR_ID = 0x00000000,
155 POSTED_INTR_NV = 0x00000002,
156 GUEST_ES_SELECTOR = 0x00000800,
157 GUEST_CS_SELECTOR = 0x00000802,
158 GUEST_SS_SELECTOR = 0x00000804,
159 GUEST_DS_SELECTOR = 0x00000806,
160 GUEST_FS_SELECTOR = 0x00000808,
161 GUEST_GS_SELECTOR = 0x0000080a,
162 GUEST_LDTR_SELECTOR = 0x0000080c,
163 GUEST_TR_SELECTOR = 0x0000080e,
164 GUEST_INTR_STATUS = 0x00000810,
165 GUEST_PML_INDEX = 0x00000812,
166 HOST_ES_SELECTOR = 0x00000c00,
167 HOST_CS_SELECTOR = 0x00000c02,
168 HOST_SS_SELECTOR = 0x00000c04,
169 HOST_DS_SELECTOR = 0x00000c06,
170 HOST_FS_SELECTOR = 0x00000c08,
171 HOST_GS_SELECTOR = 0x00000c0a,
172 HOST_TR_SELECTOR = 0x00000c0c,
173 IO_BITMAP_A = 0x00002000,
174 IO_BITMAP_A_HIGH = 0x00002001,
175 IO_BITMAP_B = 0x00002002,
176 IO_BITMAP_B_HIGH = 0x00002003,
177 MSR_BITMAP = 0x00002004,
178 MSR_BITMAP_HIGH = 0x00002005,
179 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
180 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
181 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
182 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
183 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
184 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
185 PML_ADDRESS = 0x0000200e,
186 PML_ADDRESS_HIGH = 0x0000200f,
187 TSC_OFFSET = 0x00002010,
188 TSC_OFFSET_HIGH = 0x00002011,
189 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
190 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
191 APIC_ACCESS_ADDR = 0x00002014,
192 APIC_ACCESS_ADDR_HIGH = 0x00002015,
193 POSTED_INTR_DESC_ADDR = 0x00002016,
194 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
195 EPT_POINTER = 0x0000201a,
196 EPT_POINTER_HIGH = 0x0000201b,
197 EOI_EXIT_BITMAP0 = 0x0000201c,
198 EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
199 EOI_EXIT_BITMAP1 = 0x0000201e,
200 EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
201 EOI_EXIT_BITMAP2 = 0x00002020,
202 EOI_EXIT_BITMAP2_HIGH = 0x00002021,
203 EOI_EXIT_BITMAP3 = 0x00002022,
204 EOI_EXIT_BITMAP3_HIGH = 0x00002023,
205 VMREAD_BITMAP = 0x00002026,
206 VMREAD_BITMAP_HIGH = 0x00002027,
207 VMWRITE_BITMAP = 0x00002028,
208 VMWRITE_BITMAP_HIGH = 0x00002029,
209 XSS_EXIT_BITMAP = 0x0000202C,
210 XSS_EXIT_BITMAP_HIGH = 0x0000202D,
211 TSC_MULTIPLIER = 0x00002032,
212 TSC_MULTIPLIER_HIGH = 0x00002033,
213 GUEST_PHYSICAL_ADDRESS = 0x00002400,
214 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
215 VMCS_LINK_POINTER = 0x00002800,
216 VMCS_LINK_POINTER_HIGH = 0x00002801,
217 GUEST_IA32_DEBUGCTL = 0x00002802,
218 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
219 GUEST_IA32_PAT = 0x00002804,
220 GUEST_IA32_PAT_HIGH = 0x00002805,
221 GUEST_IA32_EFER = 0x00002806,
222 GUEST_IA32_EFER_HIGH = 0x00002807,
223 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
224 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
225 GUEST_PDPTR0 = 0x0000280a,
226 GUEST_PDPTR0_HIGH = 0x0000280b,
227 GUEST_PDPTR1 = 0x0000280c,
228 GUEST_PDPTR1_HIGH = 0x0000280d,
229 GUEST_PDPTR2 = 0x0000280e,
230 GUEST_PDPTR2_HIGH = 0x0000280f,
231 GUEST_PDPTR3 = 0x00002810,
232 GUEST_PDPTR3_HIGH = 0x00002811,
233 GUEST_BNDCFGS = 0x00002812,
234 GUEST_BNDCFGS_HIGH = 0x00002813,
235 HOST_IA32_PAT = 0x00002c00,
236 HOST_IA32_PAT_HIGH = 0x00002c01,
237 HOST_IA32_EFER = 0x00002c02,
238 HOST_IA32_EFER_HIGH = 0x00002c03,
239 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
240 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
241 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
242 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
243 EXCEPTION_BITMAP = 0x00004004,
244 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
245 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
246 CR3_TARGET_COUNT = 0x0000400a,
247 VM_EXIT_CONTROLS = 0x0000400c,
248 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
249 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
250 VM_ENTRY_CONTROLS = 0x00004012,
251 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
252 VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
253 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
254 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
255 TPR_THRESHOLD = 0x0000401c,
256 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
257 PLE_GAP = 0x00004020,
258 PLE_WINDOW = 0x00004022,
259 VM_INSTRUCTION_ERROR = 0x00004400,
260 VM_EXIT_REASON = 0x00004402,
261 VM_EXIT_INTR_INFO = 0x00004404,
262 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
263 IDT_VECTORING_INFO_FIELD = 0x00004408,
264 IDT_VECTORING_ERROR_CODE = 0x0000440a,
265 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
266 VMX_INSTRUCTION_INFO = 0x0000440e,
267 GUEST_ES_LIMIT = 0x00004800,
268 GUEST_CS_LIMIT = 0x00004802,
269 GUEST_SS_LIMIT = 0x00004804,
270 GUEST_DS_LIMIT = 0x00004806,
271 GUEST_FS_LIMIT = 0x00004808,
272 GUEST_GS_LIMIT = 0x0000480a,
273 GUEST_LDTR_LIMIT = 0x0000480c,
274 GUEST_TR_LIMIT = 0x0000480e,
275 GUEST_GDTR_LIMIT = 0x00004810,
276 GUEST_IDTR_LIMIT = 0x00004812,
277 GUEST_ES_AR_BYTES = 0x00004814,
278 GUEST_CS_AR_BYTES = 0x00004816,
279 GUEST_SS_AR_BYTES = 0x00004818,
280 GUEST_DS_AR_BYTES = 0x0000481a,
281 GUEST_FS_AR_BYTES = 0x0000481c,
282 GUEST_GS_AR_BYTES = 0x0000481e,
283 GUEST_LDTR_AR_BYTES = 0x00004820,
284 GUEST_TR_AR_BYTES = 0x00004822,
285 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
286 GUEST_ACTIVITY_STATE = 0X00004826,
287 GUEST_SYSENTER_CS = 0x0000482A,
288 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
289 HOST_IA32_SYSENTER_CS = 0x00004c00,
290 CR0_GUEST_HOST_MASK = 0x00006000,
291 CR4_GUEST_HOST_MASK = 0x00006002,
292 CR0_READ_SHADOW = 0x00006004,
293 CR4_READ_SHADOW = 0x00006006,
294 CR3_TARGET_VALUE0 = 0x00006008,
295 CR3_TARGET_VALUE1 = 0x0000600a,
296 CR3_TARGET_VALUE2 = 0x0000600c,
297 CR3_TARGET_VALUE3 = 0x0000600e,
298 EXIT_QUALIFICATION = 0x00006400,
299 GUEST_LINEAR_ADDRESS = 0x0000640a,
300 GUEST_CR0 = 0x00006800,
301 GUEST_CR3 = 0x00006802,
302 GUEST_CR4 = 0x00006804,
303 GUEST_ES_BASE = 0x00006806,
304 GUEST_CS_BASE = 0x00006808,
305 GUEST_SS_BASE = 0x0000680a,
306 GUEST_DS_BASE = 0x0000680c,
307 GUEST_FS_BASE = 0x0000680e,
308 GUEST_GS_BASE = 0x00006810,
309 GUEST_LDTR_BASE = 0x00006812,
310 GUEST_TR_BASE = 0x00006814,
311 GUEST_GDTR_BASE = 0x00006816,
312 GUEST_IDTR_BASE = 0x00006818,
313 GUEST_DR7 = 0x0000681a,
314 GUEST_RSP = 0x0000681c,
315 GUEST_RIP = 0x0000681e,
316 GUEST_RFLAGS = 0x00006820,
317 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
318 GUEST_SYSENTER_ESP = 0x00006824,
319 GUEST_SYSENTER_EIP = 0x00006826,
320 HOST_CR0 = 0x00006c00,
321 HOST_CR3 = 0x00006c02,
322 HOST_CR4 = 0x00006c04,
323 HOST_FS_BASE = 0x00006c06,
324 HOST_GS_BASE = 0x00006c08,
325 HOST_TR_BASE = 0x00006c0a,
326 HOST_GDTR_BASE = 0x00006c0c,
327 HOST_IDTR_BASE = 0x00006c0e,
328 HOST_IA32_SYSENTER_ESP = 0x00006c10,
329 HOST_IA32_SYSENTER_EIP = 0x00006c12,
330 HOST_RSP = 0x00006c14,
331 HOST_RIP = 0x00006c16,
332 };
333
334 struct vmx_msr_entry {
335 uint32_t index;
336 uint32_t reserved;
337 uint64_t value;
338 } __attribute__ ((aligned(16)));
339
340 #include "evmcs.h"
341
vmxon(uint64_t phys)342 static inline int vmxon(uint64_t phys)
343 {
344 uint8_t ret;
345
346 __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
347 : [ret]"=rm"(ret)
348 : [pa]"m"(phys)
349 : "cc", "memory");
350
351 return ret;
352 }
353
vmxoff(void)354 static inline void vmxoff(void)
355 {
356 __asm__ __volatile__("vmxoff");
357 }
358
vmclear(uint64_t vmcs_pa)359 static inline int vmclear(uint64_t vmcs_pa)
360 {
361 uint8_t ret;
362
363 __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
364 : [ret]"=rm"(ret)
365 : [pa]"m"(vmcs_pa)
366 : "cc", "memory");
367
368 return ret;
369 }
370
vmptrld(uint64_t vmcs_pa)371 static inline int vmptrld(uint64_t vmcs_pa)
372 {
373 uint8_t ret;
374
375 if (enable_evmcs)
376 return -1;
377
378 __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
379 : [ret]"=rm"(ret)
380 : [pa]"m"(vmcs_pa)
381 : "cc", "memory");
382
383 return ret;
384 }
385
vmptrst(uint64_t * value)386 static inline int vmptrst(uint64_t *value)
387 {
388 uint64_t tmp;
389 uint8_t ret;
390
391 if (enable_evmcs)
392 return evmcs_vmptrst(value);
393
394 __asm__ __volatile__("vmptrst %[value]; setna %[ret]"
395 : [value]"=m"(tmp), [ret]"=rm"(ret)
396 : : "cc", "memory");
397
398 *value = tmp;
399 return ret;
400 }
401
402 /*
403 * A wrapper around vmptrst that ignores errors and returns zero if the
404 * vmptrst instruction fails.
405 */
vmptrstz(void)406 static inline uint64_t vmptrstz(void)
407 {
408 uint64_t value = 0;
409 vmptrst(&value);
410 return value;
411 }
412
413 /*
414 * No guest state (e.g. GPRs) is established by this vmlaunch.
415 */
vmlaunch(void)416 static inline int vmlaunch(void)
417 {
418 int ret;
419
420 if (enable_evmcs)
421 return evmcs_vmlaunch();
422
423 __asm__ __volatile__("push %%rbp;"
424 "push %%rcx;"
425 "push %%rdx;"
426 "push %%rsi;"
427 "push %%rdi;"
428 "push $0;"
429 "vmwrite %%rsp, %[host_rsp];"
430 "lea 1f(%%rip), %%rax;"
431 "vmwrite %%rax, %[host_rip];"
432 "vmlaunch;"
433 "incq (%%rsp);"
434 "1: pop %%rax;"
435 "pop %%rdi;"
436 "pop %%rsi;"
437 "pop %%rdx;"
438 "pop %%rcx;"
439 "pop %%rbp;"
440 : [ret]"=&a"(ret)
441 : [host_rsp]"r"((uint64_t)HOST_RSP),
442 [host_rip]"r"((uint64_t)HOST_RIP)
443 : "memory", "cc", "rbx", "r8", "r9", "r10",
444 "r11", "r12", "r13", "r14", "r15");
445 return ret;
446 }
447
448 /*
449 * No guest state (e.g. GPRs) is established by this vmresume.
450 */
vmresume(void)451 static inline int vmresume(void)
452 {
453 int ret;
454
455 if (enable_evmcs)
456 return evmcs_vmresume();
457
458 __asm__ __volatile__("push %%rbp;"
459 "push %%rcx;"
460 "push %%rdx;"
461 "push %%rsi;"
462 "push %%rdi;"
463 "push $0;"
464 "vmwrite %%rsp, %[host_rsp];"
465 "lea 1f(%%rip), %%rax;"
466 "vmwrite %%rax, %[host_rip];"
467 "vmresume;"
468 "incq (%%rsp);"
469 "1: pop %%rax;"
470 "pop %%rdi;"
471 "pop %%rsi;"
472 "pop %%rdx;"
473 "pop %%rcx;"
474 "pop %%rbp;"
475 : [ret]"=&a"(ret)
476 : [host_rsp]"r"((uint64_t)HOST_RSP),
477 [host_rip]"r"((uint64_t)HOST_RIP)
478 : "memory", "cc", "rbx", "r8", "r9", "r10",
479 "r11", "r12", "r13", "r14", "r15");
480 return ret;
481 }
482
vmcall(void)483 static inline void vmcall(void)
484 {
485 /* Currently, L1 destroys our GPRs during vmexits. */
486 __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
487 "rax", "rbx", "rcx", "rdx",
488 "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
489 "r13", "r14", "r15");
490 }
491
vmread(uint64_t encoding,uint64_t * value)492 static inline int vmread(uint64_t encoding, uint64_t *value)
493 {
494 uint64_t tmp;
495 uint8_t ret;
496
497 if (enable_evmcs)
498 return evmcs_vmread(encoding, value);
499
500 __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
501 : [value]"=rm"(tmp), [ret]"=rm"(ret)
502 : [encoding]"r"(encoding)
503 : "cc", "memory");
504
505 *value = tmp;
506 return ret;
507 }
508
509 /*
510 * A wrapper around vmread that ignores errors and returns zero if the
511 * vmread instruction fails.
512 */
vmreadz(uint64_t encoding)513 static inline uint64_t vmreadz(uint64_t encoding)
514 {
515 uint64_t value = 0;
516 vmread(encoding, &value);
517 return value;
518 }
519
vmwrite(uint64_t encoding,uint64_t value)520 static inline int vmwrite(uint64_t encoding, uint64_t value)
521 {
522 uint8_t ret;
523
524 if (enable_evmcs)
525 return evmcs_vmwrite(encoding, value);
526
527 __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
528 : [ret]"=rm"(ret)
529 : [value]"rm"(value), [encoding]"r"(encoding)
530 : "cc", "memory");
531
532 return ret;
533 }
534
vmcs_revision(void)535 static inline uint32_t vmcs_revision(void)
536 {
537 return rdmsr(MSR_IA32_VMX_BASIC);
538 }
539
540 struct vmx_pages {
541 void *vmxon_hva;
542 uint64_t vmxon_gpa;
543 void *vmxon;
544
545 void *vmcs_hva;
546 uint64_t vmcs_gpa;
547 void *vmcs;
548
549 void *msr_hva;
550 uint64_t msr_gpa;
551 void *msr;
552
553 void *shadow_vmcs_hva;
554 uint64_t shadow_vmcs_gpa;
555 void *shadow_vmcs;
556
557 void *vmread_hva;
558 uint64_t vmread_gpa;
559 void *vmread;
560
561 void *vmwrite_hva;
562 uint64_t vmwrite_gpa;
563 void *vmwrite;
564
565 void *vp_assist_hva;
566 uint64_t vp_assist_gpa;
567 void *vp_assist;
568
569 void *enlightened_vmcs_hva;
570 uint64_t enlightened_vmcs_gpa;
571 void *enlightened_vmcs;
572
573 void *eptp_hva;
574 uint64_t eptp_gpa;
575 void *eptp;
576 };
577
578 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
579 bool prepare_for_vmx_operation(struct vmx_pages *vmx);
580 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
581 bool load_vmcs(struct vmx_pages *vmx);
582
583 void nested_vmx_check_supported(void);
584
585 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
586 uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
587 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
588 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
589 uint32_t eptp_memslot);
590 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
591 uint32_t memslot, uint32_t eptp_memslot);
592 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
593 uint32_t eptp_memslot);
594
595 #endif /* SELFTEST_KVM_VMX_H */
596