1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021, Red Hat, Inc.
4 *
5 * Tests for Hyper-V features enablement
6 */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15
16 #define LINUX_OS_ID ((u64)0x8100 << 48)
17
hypercall(u64 control,vm_vaddr_t input_address,vm_vaddr_t output_address,uint64_t * hv_status)18 static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
19 vm_vaddr_t output_address, uint64_t *hv_status)
20 {
21 uint8_t vector;
22
23 /* Note both the hypercall and the "asm safe" clobber r9-r11. */
24 asm volatile("mov %[output_address], %%r8\n\t"
25 KVM_ASM_SAFE("vmcall")
26 : "=a" (*hv_status),
27 "+c" (control), "+d" (input_address),
28 KVM_ASM_SAFE_OUTPUTS(vector)
29 : [output_address] "r"(output_address),
30 "a" (-EFAULT)
31 : "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS);
32 return vector;
33 }
34
35 struct msr_data {
36 uint32_t idx;
37 bool available;
38 bool write;
39 u64 write_val;
40 };
41
42 struct hcall_data {
43 uint64_t control;
44 uint64_t expect;
45 bool ud_expected;
46 };
47
guest_msr(struct msr_data * msr)48 static void guest_msr(struct msr_data *msr)
49 {
50 uint64_t ignored;
51 uint8_t vector;
52
53 GUEST_ASSERT(msr->idx);
54
55 if (!msr->write)
56 vector = rdmsr_safe(msr->idx, &ignored);
57 else
58 vector = wrmsr_safe(msr->idx, msr->write_val);
59
60 if (msr->available)
61 GUEST_ASSERT_2(!vector, msr->idx, vector);
62 else
63 GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector);
64 GUEST_DONE();
65 }
66
guest_hcall(vm_vaddr_t pgs_gpa,struct hcall_data * hcall)67 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
68 {
69 u64 res, input, output;
70 uint8_t vector;
71
72 GUEST_ASSERT(hcall->control);
73
74 wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
75 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
76
77 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
78 input = pgs_gpa;
79 output = pgs_gpa + 4096;
80 } else {
81 input = output = 0;
82 }
83
84 vector = hypercall(hcall->control, input, output, &res);
85 if (hcall->ud_expected) {
86 GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
87 } else {
88 GUEST_ASSERT_2(!vector, hcall->control, vector);
89 GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
90 }
91
92 GUEST_DONE();
93 }
94
vcpu_reset_hv_cpuid(struct kvm_vcpu * vcpu)95 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
96 {
97 /*
98 * Enable all supported Hyper-V features, then clear the leafs holding
99 * the features that will be tested one by one.
100 */
101 vcpu_set_hv_cpuid(vcpu);
102
103 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
104 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
105 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
106 }
107
guest_test_msrs_access(void)108 static void guest_test_msrs_access(void)
109 {
110 struct kvm_cpuid2 *prev_cpuid = NULL;
111 struct kvm_cpuid_entry2 *feat, *dbg;
112 struct kvm_vcpu *vcpu;
113 struct kvm_run *run;
114 struct kvm_vm *vm;
115 struct ucall uc;
116 int stage = 0;
117 vm_vaddr_t msr_gva;
118 struct msr_data *msr;
119
120 while (true) {
121 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
122
123 msr_gva = vm_vaddr_alloc_page(vm);
124 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
125 msr = addr_gva2hva(vm, msr_gva);
126
127 vcpu_args_set(vcpu, 1, msr_gva);
128 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
129
130 if (!prev_cpuid) {
131 vcpu_reset_hv_cpuid(vcpu);
132
133 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
134 } else {
135 vcpu_init_cpuid(vcpu, prev_cpuid);
136 }
137
138 feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
139 dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
140
141 vm_init_descriptor_tables(vm);
142 vcpu_init_descriptor_tables(vcpu);
143
144 run = vcpu->run;
145
146 /* TODO: Make this entire test easier to maintain. */
147 if (stage >= 21)
148 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
149
150 switch (stage) {
151 case 0:
152 /*
153 * Only available when Hyper-V identification is set
154 */
155 msr->idx = HV_X64_MSR_GUEST_OS_ID;
156 msr->write = 0;
157 msr->available = 0;
158 break;
159 case 1:
160 msr->idx = HV_X64_MSR_HYPERCALL;
161 msr->write = 0;
162 msr->available = 0;
163 break;
164 case 2:
165 feat->eax |= HV_MSR_HYPERCALL_AVAILABLE;
166 /*
167 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
168 * HV_X64_MSR_HYPERCALL available.
169 */
170 msr->idx = HV_X64_MSR_GUEST_OS_ID;
171 msr->write = 1;
172 msr->write_val = LINUX_OS_ID;
173 msr->available = 1;
174 break;
175 case 3:
176 msr->idx = HV_X64_MSR_GUEST_OS_ID;
177 msr->write = 0;
178 msr->available = 1;
179 break;
180 case 4:
181 msr->idx = HV_X64_MSR_HYPERCALL;
182 msr->write = 0;
183 msr->available = 1;
184 break;
185
186 case 5:
187 msr->idx = HV_X64_MSR_VP_RUNTIME;
188 msr->write = 0;
189 msr->available = 0;
190 break;
191 case 6:
192 feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
193 msr->idx = HV_X64_MSR_VP_RUNTIME;
194 msr->write = 0;
195 msr->available = 1;
196 break;
197 case 7:
198 /* Read only */
199 msr->idx = HV_X64_MSR_VP_RUNTIME;
200 msr->write = 1;
201 msr->write_val = 1;
202 msr->available = 0;
203 break;
204
205 case 8:
206 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
207 msr->write = 0;
208 msr->available = 0;
209 break;
210 case 9:
211 feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
212 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
213 msr->write = 0;
214 msr->available = 1;
215 break;
216 case 10:
217 /* Read only */
218 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
219 msr->write = 1;
220 msr->write_val = 1;
221 msr->available = 0;
222 break;
223
224 case 11:
225 msr->idx = HV_X64_MSR_VP_INDEX;
226 msr->write = 0;
227 msr->available = 0;
228 break;
229 case 12:
230 feat->eax |= HV_MSR_VP_INDEX_AVAILABLE;
231 msr->idx = HV_X64_MSR_VP_INDEX;
232 msr->write = 0;
233 msr->available = 1;
234 break;
235 case 13:
236 /* Read only */
237 msr->idx = HV_X64_MSR_VP_INDEX;
238 msr->write = 1;
239 msr->write_val = 1;
240 msr->available = 0;
241 break;
242
243 case 14:
244 msr->idx = HV_X64_MSR_RESET;
245 msr->write = 0;
246 msr->available = 0;
247 break;
248 case 15:
249 feat->eax |= HV_MSR_RESET_AVAILABLE;
250 msr->idx = HV_X64_MSR_RESET;
251 msr->write = 0;
252 msr->available = 1;
253 break;
254 case 16:
255 msr->idx = HV_X64_MSR_RESET;
256 msr->write = 1;
257 msr->write_val = 0;
258 msr->available = 1;
259 break;
260
261 case 17:
262 msr->idx = HV_X64_MSR_REFERENCE_TSC;
263 msr->write = 0;
264 msr->available = 0;
265 break;
266 case 18:
267 feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
268 msr->idx = HV_X64_MSR_REFERENCE_TSC;
269 msr->write = 0;
270 msr->available = 1;
271 break;
272 case 19:
273 msr->idx = HV_X64_MSR_REFERENCE_TSC;
274 msr->write = 1;
275 msr->write_val = 0;
276 msr->available = 1;
277 break;
278
279 case 20:
280 msr->idx = HV_X64_MSR_EOM;
281 msr->write = 0;
282 msr->available = 0;
283 break;
284 case 21:
285 /*
286 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
287 * capability enabled and guest visible CPUID bit unset.
288 */
289 msr->idx = HV_X64_MSR_EOM;
290 msr->write = 0;
291 msr->available = 0;
292 break;
293 case 22:
294 feat->eax |= HV_MSR_SYNIC_AVAILABLE;
295 msr->idx = HV_X64_MSR_EOM;
296 msr->write = 0;
297 msr->available = 1;
298 break;
299 case 23:
300 msr->idx = HV_X64_MSR_EOM;
301 msr->write = 1;
302 msr->write_val = 0;
303 msr->available = 1;
304 break;
305
306 case 24:
307 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
308 msr->write = 0;
309 msr->available = 0;
310 break;
311 case 25:
312 feat->eax |= HV_MSR_SYNTIMER_AVAILABLE;
313 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
314 msr->write = 0;
315 msr->available = 1;
316 break;
317 case 26:
318 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
319 msr->write = 1;
320 msr->write_val = 0;
321 msr->available = 1;
322 break;
323 case 27:
324 /* Direct mode test */
325 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
326 msr->write = 1;
327 msr->write_val = 1 << 12;
328 msr->available = 0;
329 break;
330 case 28:
331 feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
332 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
333 msr->write = 1;
334 msr->write_val = 1 << 12;
335 msr->available = 1;
336 break;
337
338 case 29:
339 msr->idx = HV_X64_MSR_EOI;
340 msr->write = 0;
341 msr->available = 0;
342 break;
343 case 30:
344 feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
345 msr->idx = HV_X64_MSR_EOI;
346 msr->write = 1;
347 msr->write_val = 1;
348 msr->available = 1;
349 break;
350
351 case 31:
352 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
353 msr->write = 0;
354 msr->available = 0;
355 break;
356 case 32:
357 feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
358 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
359 msr->write = 0;
360 msr->available = 1;
361 break;
362 case 33:
363 /* Read only */
364 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
365 msr->write = 1;
366 msr->write_val = 1;
367 msr->available = 0;
368 break;
369
370 case 34:
371 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
372 msr->write = 0;
373 msr->available = 0;
374 break;
375 case 35:
376 feat->eax |= HV_ACCESS_REENLIGHTENMENT;
377 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
378 msr->write = 0;
379 msr->available = 1;
380 break;
381 case 36:
382 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
383 msr->write = 1;
384 msr->write_val = 1;
385 msr->available = 1;
386 break;
387 case 37:
388 /* Can only write '0' */
389 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
390 msr->write = 1;
391 msr->write_val = 1;
392 msr->available = 0;
393 break;
394
395 case 38:
396 msr->idx = HV_X64_MSR_CRASH_P0;
397 msr->write = 0;
398 msr->available = 0;
399 break;
400 case 39:
401 feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
402 msr->idx = HV_X64_MSR_CRASH_P0;
403 msr->write = 0;
404 msr->available = 1;
405 break;
406 case 40:
407 msr->idx = HV_X64_MSR_CRASH_P0;
408 msr->write = 1;
409 msr->write_val = 1;
410 msr->available = 1;
411 break;
412
413 case 41:
414 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
415 msr->write = 0;
416 msr->available = 0;
417 break;
418 case 42:
419 feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
420 dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
421 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
422 msr->write = 0;
423 msr->available = 1;
424 break;
425 case 43:
426 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
427 msr->write = 1;
428 msr->write_val = 0;
429 msr->available = 1;
430 break;
431
432 case 44:
433 kvm_vm_free(vm);
434 return;
435 }
436
437 vcpu_set_cpuid(vcpu);
438
439 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
440
441 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
442 msr->idx, msr->write ? "write" : "read");
443
444 vcpu_run(vcpu);
445 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
446 "unexpected exit reason: %u (%s)",
447 run->exit_reason, exit_reason_str(run->exit_reason));
448
449 switch (get_ucall(vcpu, &uc)) {
450 case UCALL_ABORT:
451 REPORT_GUEST_ASSERT_2(uc, "MSR = %lx, vector = %lx");
452 return;
453 case UCALL_DONE:
454 break;
455 default:
456 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
457 return;
458 }
459
460 stage++;
461 kvm_vm_free(vm);
462 }
463 }
464
guest_test_hcalls_access(void)465 static void guest_test_hcalls_access(void)
466 {
467 struct kvm_cpuid_entry2 *feat, *recomm, *dbg;
468 struct kvm_cpuid2 *prev_cpuid = NULL;
469 struct kvm_vcpu *vcpu;
470 struct kvm_run *run;
471 struct kvm_vm *vm;
472 struct ucall uc;
473 int stage = 0;
474 vm_vaddr_t hcall_page, hcall_params;
475 struct hcall_data *hcall;
476
477 while (true) {
478 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
479
480 vm_init_descriptor_tables(vm);
481 vcpu_init_descriptor_tables(vcpu);
482
483 /* Hypercall input/output */
484 hcall_page = vm_vaddr_alloc_pages(vm, 2);
485 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
486
487 hcall_params = vm_vaddr_alloc_page(vm);
488 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
489 hcall = addr_gva2hva(vm, hcall_params);
490
491 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
492 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
493
494 if (!prev_cpuid) {
495 vcpu_reset_hv_cpuid(vcpu);
496
497 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
498 } else {
499 vcpu_init_cpuid(vcpu, prev_cpuid);
500 }
501
502 feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
503 recomm = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
504 dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
505
506 run = vcpu->run;
507
508 switch (stage) {
509 case 0:
510 feat->eax |= HV_MSR_HYPERCALL_AVAILABLE;
511 hcall->control = 0xbeef;
512 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
513 break;
514
515 case 1:
516 hcall->control = HVCALL_POST_MESSAGE;
517 hcall->expect = HV_STATUS_ACCESS_DENIED;
518 break;
519 case 2:
520 feat->ebx |= HV_POST_MESSAGES;
521 hcall->control = HVCALL_POST_MESSAGE;
522 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
523 break;
524
525 case 3:
526 hcall->control = HVCALL_SIGNAL_EVENT;
527 hcall->expect = HV_STATUS_ACCESS_DENIED;
528 break;
529 case 4:
530 feat->ebx |= HV_SIGNAL_EVENTS;
531 hcall->control = HVCALL_SIGNAL_EVENT;
532 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
533 break;
534
535 case 5:
536 hcall->control = HVCALL_RESET_DEBUG_SESSION;
537 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
538 break;
539 case 6:
540 dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
541 hcall->control = HVCALL_RESET_DEBUG_SESSION;
542 hcall->expect = HV_STATUS_ACCESS_DENIED;
543 break;
544 case 7:
545 feat->ebx |= HV_DEBUGGING;
546 hcall->control = HVCALL_RESET_DEBUG_SESSION;
547 hcall->expect = HV_STATUS_OPERATION_DENIED;
548 break;
549
550 case 8:
551 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
552 hcall->expect = HV_STATUS_ACCESS_DENIED;
553 break;
554 case 9:
555 recomm->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
556 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
557 hcall->expect = HV_STATUS_SUCCESS;
558 break;
559 case 10:
560 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
561 hcall->expect = HV_STATUS_ACCESS_DENIED;
562 break;
563 case 11:
564 recomm->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
565 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
566 hcall->expect = HV_STATUS_SUCCESS;
567 break;
568
569 case 12:
570 hcall->control = HVCALL_SEND_IPI;
571 hcall->expect = HV_STATUS_ACCESS_DENIED;
572 break;
573 case 13:
574 recomm->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
575 hcall->control = HVCALL_SEND_IPI;
576 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
577 break;
578 case 14:
579 /* Nothing in 'sparse banks' -> success */
580 hcall->control = HVCALL_SEND_IPI_EX;
581 hcall->expect = HV_STATUS_SUCCESS;
582 break;
583
584 case 15:
585 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
586 hcall->expect = HV_STATUS_ACCESS_DENIED;
587 break;
588 case 16:
589 recomm->ebx = 0xfff;
590 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
591 hcall->expect = HV_STATUS_SUCCESS;
592 break;
593 case 17:
594 /* XMM fast hypercall */
595 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
596 hcall->ud_expected = true;
597 break;
598 case 18:
599 feat->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
600 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
601 hcall->ud_expected = false;
602 hcall->expect = HV_STATUS_SUCCESS;
603 break;
604 case 19:
605 kvm_vm_free(vm);
606 return;
607 }
608
609 vcpu_set_cpuid(vcpu);
610
611 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
612
613 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
614
615 vcpu_run(vcpu);
616 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
617 "unexpected exit reason: %u (%s)",
618 run->exit_reason, exit_reason_str(run->exit_reason));
619
620 switch (get_ucall(vcpu, &uc)) {
621 case UCALL_ABORT:
622 REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
623 return;
624 case UCALL_DONE:
625 break;
626 default:
627 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
628 return;
629 }
630
631 stage++;
632 kvm_vm_free(vm);
633 }
634 }
635
main(void)636 int main(void)
637 {
638 pr_info("Testing access to Hyper-V specific MSRs\n");
639 guest_test_msrs_access();
640
641 pr_info("Testing access to Hyper-V hypercalls\n");
642 guest_test_hcalls_access();
643 }
644