• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat, Inc.
4  *
5  * Tests for Hyper-V clocksources
6  */
7 #include "test_util.h"
8 #include "kvm_util.h"
9 #include "processor.h"
10 #include "hyperv.h"
11 
12 struct ms_hyperv_tsc_page {
13 	volatile u32 tsc_sequence;
14 	u32 reserved1;
15 	volatile u64 tsc_scale;
16 	volatile s64 tsc_offset;
17 } __packed;
18 
19 /* Simplified mul_u64_u64_shr() */
mul_u64_u64_shr64(u64 a,u64 b)20 static inline u64 mul_u64_u64_shr64(u64 a, u64 b)
21 {
22 	union {
23 		u64 ll;
24 		struct {
25 			u32 low, high;
26 		} l;
27 	} rm, rn, rh, a0, b0;
28 	u64 c;
29 
30 	a0.ll = a;
31 	b0.ll = b;
32 
33 	rm.ll = (u64)a0.l.low * b0.l.high;
34 	rn.ll = (u64)a0.l.high * b0.l.low;
35 	rh.ll = (u64)a0.l.high * b0.l.high;
36 
37 	rh.l.low = c = rm.l.high + rn.l.high + rh.l.low;
38 	rh.l.high = (c >> 32) + rh.l.high;
39 
40 	return rh.ll;
41 }
42 
nop_loop(void)43 static inline void nop_loop(void)
44 {
45 	int i;
46 
47 	for (i = 0; i < 100000000; i++)
48 		asm volatile("nop");
49 }
50 
check_tsc_msr_rdtsc(void)51 static inline void check_tsc_msr_rdtsc(void)
52 {
53 	u64 tsc_freq, r1, r2, t1, t2;
54 	s64 delta_ns;
55 
56 	tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
57 	GUEST_ASSERT(tsc_freq > 0);
58 
59 	/* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
60 	r1 = rdtsc();
61 	t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
62 	r1 = (r1 + rdtsc()) / 2;
63 	nop_loop();
64 	r2 = rdtsc();
65 	t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
66 	r2 = (r2 + rdtsc()) / 2;
67 
68 	GUEST_ASSERT(r2 > r1 && t2 > t1);
69 
70 	/* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
71 	delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
72 	if (delta_ns < 0)
73 		delta_ns = -delta_ns;
74 
75 	/* 1% tolerance */
76 	GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
77 }
78 
get_tscpage_ts(struct ms_hyperv_tsc_page * tsc_page)79 static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
80 {
81 	return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
82 }
83 
check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page * tsc_page)84 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
85 {
86 	u64 r1, r2, t1, t2;
87 
88 	/* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
89 	t1 = get_tscpage_ts(tsc_page);
90 	r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
91 
92 	/* 10 ms tolerance */
93 	GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
94 	nop_loop();
95 
96 	t2 = get_tscpage_ts(tsc_page);
97 	r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
98 	GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
99 }
100 
guest_main(struct ms_hyperv_tsc_page * tsc_page,vm_paddr_t tsc_page_gpa)101 static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa)
102 {
103 	u64 tsc_scale, tsc_offset;
104 
105 	/* Set Guest OS id to enable Hyper-V emulation */
106 	GUEST_SYNC(1);
107 	wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
108 	GUEST_SYNC(2);
109 
110 	check_tsc_msr_rdtsc();
111 
112 	GUEST_SYNC(3);
113 
114 	/* Set up TSC page is disabled state, check that it's clean */
115 	wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa);
116 	GUEST_ASSERT(tsc_page->tsc_sequence == 0);
117 	GUEST_ASSERT(tsc_page->tsc_scale == 0);
118 	GUEST_ASSERT(tsc_page->tsc_offset == 0);
119 
120 	GUEST_SYNC(4);
121 
122 	/* Set up TSC page is enabled state */
123 	wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa | 0x1);
124 	GUEST_ASSERT(tsc_page->tsc_sequence != 0);
125 
126 	GUEST_SYNC(5);
127 
128 	check_tsc_msr_tsc_page(tsc_page);
129 
130 	GUEST_SYNC(6);
131 
132 	tsc_offset = tsc_page->tsc_offset;
133 	/* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
134 
135 	GUEST_SYNC(7);
136 	/* Sanity check TSC page timestamp, it should be close to 0 */
137 	GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
138 
139 	GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
140 
141 	nop_loop();
142 
143 	/*
144 	 * Enable Re-enlightenment and check that TSC page stays constant across
145 	 * KVM_SET_CLOCK.
146 	 */
147 	wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0x1 << 16 | 0xff);
148 	wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0x1);
149 	tsc_offset = tsc_page->tsc_offset;
150 	tsc_scale = tsc_page->tsc_scale;
151 	GUEST_SYNC(8);
152 	GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset);
153 	GUEST_ASSERT(tsc_page->tsc_scale == tsc_scale);
154 
155 	GUEST_SYNC(9);
156 
157 	check_tsc_msr_tsc_page(tsc_page);
158 
159 	/*
160 	 * Disable re-enlightenment and TSC page, check that KVM doesn't update
161 	 * it anymore.
162 	 */
163 	wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
164 	wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
165 	wrmsr(HV_X64_MSR_REFERENCE_TSC, 0);
166 	memset(tsc_page, 0, sizeof(*tsc_page));
167 
168 	GUEST_SYNC(10);
169 	GUEST_ASSERT(tsc_page->tsc_sequence == 0);
170 	GUEST_ASSERT(tsc_page->tsc_offset == 0);
171 	GUEST_ASSERT(tsc_page->tsc_scale == 0);
172 
173 	GUEST_DONE();
174 }
175 
176 #define VCPU_ID 0
177 
host_check_tsc_msr_rdtsc(struct kvm_vm * vm)178 static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
179 {
180 	u64 tsc_freq, r1, r2, t1, t2;
181 	s64 delta_ns;
182 
183 	tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
184 	TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
185 
186 	/* For increased accuracy, take mean rdtsc() before and afrer ioctl */
187 	r1 = rdtsc();
188 	t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
189 	r1 = (r1 + rdtsc()) / 2;
190 	nop_loop();
191 	r2 = rdtsc();
192 	t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
193 	r2 = (r2 + rdtsc()) / 2;
194 
195 	TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
196 
197 	/* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
198 	delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
199 	if (delta_ns < 0)
200 		delta_ns = -delta_ns;
201 
202 	/* 1% tolerance */
203 	TEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100,
204 		    "Elapsed time does not match (MSR=%ld, TSC=%ld)",
205 		    (t2 - t1) * 100, (r2 - r1) * 1000000000 / tsc_freq);
206 }
207 
main(void)208 int main(void)
209 {
210 	struct kvm_vm *vm;
211 	struct kvm_run *run;
212 	struct ucall uc;
213 	vm_vaddr_t tsc_page_gva;
214 	int stage;
215 
216 	vm = vm_create_default(VCPU_ID, 0, guest_main);
217 	run = vcpu_state(vm, VCPU_ID);
218 
219 	vcpu_set_hv_cpuid(vm, VCPU_ID);
220 
221 	tsc_page_gva = vm_vaddr_alloc_page(vm);
222 	memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
223 	TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
224 		"TSC page has to be page aligned\n");
225 	vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
226 
227 	host_check_tsc_msr_rdtsc(vm);
228 
229 	for (stage = 1;; stage++) {
230 		_vcpu_run(vm, VCPU_ID);
231 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
232 			    "Stage %d: unexpected exit reason: %u (%s),\n",
233 			    stage, run->exit_reason,
234 			    exit_reason_str(run->exit_reason));
235 
236 		switch (get_ucall(vm, VCPU_ID, &uc)) {
237 		case UCALL_ABORT:
238 			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
239 				  __FILE__, uc.args[1]);
240 			/* NOT REACHED */
241 		case UCALL_SYNC:
242 			break;
243 		case UCALL_DONE:
244 			/* Keep in sync with guest_main() */
245 			TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n",
246 				    stage);
247 			goto out;
248 		default:
249 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
250 		}
251 
252 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
253 			    uc.args[1] == stage,
254 			    "Stage %d: Unexpected register values vmexit, got %lx",
255 			    stage, (ulong)uc.args[1]);
256 
257 		/* Reset kvmclock triggering TSC page update */
258 		if (stage == 7 || stage == 8 || stage == 10) {
259 			struct kvm_clock_data clock = {0};
260 
261 			vm_ioctl(vm, KVM_SET_CLOCK, &clock);
262 		}
263 	}
264 
265 out:
266 	kvm_vm_free(vm);
267 }
268