• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * gtests/tests/vmx_tsc_adjust_test.c
3  *
4  * Copyright (C) 2018, Google LLC.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2.
7  *
8  *
9  * IA32_TSC_ADJUST test
10  *
11  * According to the SDM, "if an execution of WRMSR to the
12  * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
13  * the logical processor also adds (or subtracts) value X from the
14  * IA32_TSC_ADJUST MSR.
15  *
16  * Note that when L1 doesn't intercept writes to IA32_TSC, a
17  * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
18  * value.
19  *
20  * This test verifies that this unusual case is handled correctly.
21  */
22 
23 #include "test_util.h"
24 #include "kvm_util.h"
25 #include "x86.h"
26 #include "vmx.h"
27 
28 #include <string.h>
29 #include <sys/ioctl.h>
30 
31 #include "../kselftest.h"
32 
33 #ifndef MSR_IA32_TSC_ADJUST
34 #define MSR_IA32_TSC_ADJUST 0x3b
35 #endif
36 
37 #define PAGE_SIZE	4096
38 #define VCPU_ID		5
39 
40 #define TSC_ADJUST_VALUE (1ll << 32)
41 #define TSC_OFFSET_VALUE -(1ll << 48)
42 
43 enum {
44 	PORT_ABORT = 0x1000,
45 	PORT_REPORT,
46 	PORT_DONE,
47 };
48 
49 enum {
50 	VMXON_PAGE = 0,
51 	VMCS_PAGE,
52 	MSR_BITMAP_PAGE,
53 
54 	NUM_VMX_PAGES,
55 };
56 
57 struct kvm_single_msr {
58 	struct kvm_msrs header;
59 	struct kvm_msr_entry entry;
60 } __attribute__((packed));
61 
62 /* The virtual machine object. */
63 static struct kvm_vm *vm;
64 
check_ia32_tsc_adjust(int64_t max)65 static void check_ia32_tsc_adjust(int64_t max)
66 {
67 	int64_t adjust;
68 
69 	adjust = rdmsr(MSR_IA32_TSC_ADJUST);
70 	GUEST_SYNC(adjust);
71 	GUEST_ASSERT(adjust <= max);
72 }
73 
l2_guest_code(void)74 static void l2_guest_code(void)
75 {
76 	uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
77 
78 	wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
79 	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
80 
81 	/* Exit to L1 */
82 	__asm__ __volatile__("vmcall");
83 }
84 
l1_guest_code(struct vmx_pages * vmx_pages)85 static void l1_guest_code(struct vmx_pages *vmx_pages)
86 {
87 #define L2_GUEST_STACK_SIZE 64
88 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
89 	uint32_t control;
90 	uintptr_t save_cr3;
91 
92 	GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
93 	wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
94 	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
95 
96 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
97 
98 	/* Prepare the VMCS for L2 execution. */
99 	prepare_vmcs(vmx_pages, l2_guest_code,
100 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
101 	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
102 	control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
103 	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
104 	vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
105 
106 	/* Jump into L2.  First, test failure to load guest CR3.  */
107 	save_cr3 = vmreadz(GUEST_CR3);
108 	vmwrite(GUEST_CR3, -1ull);
109 	GUEST_ASSERT(!vmlaunch());
110 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
111 		     (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
112 	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
113 	vmwrite(GUEST_CR3, save_cr3);
114 
115 	GUEST_ASSERT(!vmlaunch());
116 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
117 
118 	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
119 
120 	GUEST_DONE();
121 }
122 
report(int64_t val)123 void report(int64_t val)
124 {
125 	printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
126 	       val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
127 }
128 
main(int argc,char * argv[])129 int main(int argc, char *argv[])
130 {
131 	struct vmx_pages *vmx_pages;
132 	vm_vaddr_t vmx_pages_gva;
133 	struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
134 
135 	if (!(entry->ecx & CPUID_VMX)) {
136 		fprintf(stderr, "nested VMX not enabled, skipping test\n");
137 		exit(KSFT_SKIP);
138 	}
139 
140 	vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
141 	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
142 
143 	/* Allocate VMX pages and shared descriptors (vmx_pages). */
144 	vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
145 	vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
146 
147 	for (;;) {
148 		volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
149 		struct guest_args args;
150 
151 		vcpu_run(vm, VCPU_ID);
152 		guest_args_read(vm, VCPU_ID, &args);
153 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
154 			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
155 			    run->exit_reason,
156 			    exit_reason_str(run->exit_reason));
157 
158 		switch (args.port) {
159 		case GUEST_PORT_ABORT:
160 			TEST_ASSERT(false, "%s", (const char *) args.arg0);
161 			/* NOT REACHED */
162 		case GUEST_PORT_SYNC:
163 			report(args.arg1);
164 			break;
165 		case GUEST_PORT_DONE:
166 			goto done;
167 		default:
168 			TEST_ASSERT(false, "Unknown port 0x%x.", args.port);
169 		}
170 	}
171 
172 	kvm_vm_free(vm);
173 done:
174 	return 0;
175 }
176