• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat Inc.
4  *
5  * Generic tests for KVM CPUID set/get ioctls
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10 
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 
15 #define VCPU_ID 0
16 
17 /* CPUIDs known to differ */
18 struct {
19 	u32 function;
20 	u32 index;
21 } mangled_cpuids[] = {
22 	/*
23 	 * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
24 	 * which are not controlled for by this test.
25 	 */
26 	{.function = 0xd, .index = 0},
27 	{.function = 0xd, .index = 1},
28 };
29 
test_guest_cpuids(struct kvm_cpuid2 * guest_cpuid)30 static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
31 {
32 	int i;
33 	u32 eax, ebx, ecx, edx;
34 
35 	for (i = 0; i < guest_cpuid->nent; i++) {
36 		eax = guest_cpuid->entries[i].function;
37 		ecx = guest_cpuid->entries[i].index;
38 
39 		cpuid(&eax, &ebx, &ecx, &edx);
40 
41 		GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
42 			     ebx == guest_cpuid->entries[i].ebx &&
43 			     ecx == guest_cpuid->entries[i].ecx &&
44 			     edx == guest_cpuid->entries[i].edx);
45 	}
46 
47 }
48 
test_cpuid_40000000(struct kvm_cpuid2 * guest_cpuid)49 static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid)
50 {
51 	u32 eax = 0x40000000, ebx, ecx = 0, edx;
52 
53 	cpuid(&eax, &ebx, &ecx, &edx);
54 
55 	GUEST_ASSERT(eax == 0x40000001);
56 }
57 
guest_main(struct kvm_cpuid2 * guest_cpuid)58 static void guest_main(struct kvm_cpuid2 *guest_cpuid)
59 {
60 	GUEST_SYNC(1);
61 
62 	test_guest_cpuids(guest_cpuid);
63 
64 	GUEST_SYNC(2);
65 
66 	test_cpuid_40000000(guest_cpuid);
67 
68 	GUEST_DONE();
69 }
70 
is_cpuid_mangled(struct kvm_cpuid_entry2 * entrie)71 static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie)
72 {
73 	int i;
74 
75 	for (i = 0; i < sizeof(mangled_cpuids); i++) {
76 		if (mangled_cpuids[i].function == entrie->function &&
77 		    mangled_cpuids[i].index == entrie->index)
78 			return true;
79 	}
80 
81 	return false;
82 }
83 
check_cpuid(struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 * entrie)84 static void check_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *entrie)
85 {
86 	int i;
87 
88 	for (i = 0; i < cpuid->nent; i++) {
89 		if (cpuid->entries[i].function == entrie->function &&
90 		    cpuid->entries[i].index == entrie->index) {
91 			if (is_cpuid_mangled(entrie))
92 				return;
93 
94 			TEST_ASSERT(cpuid->entries[i].eax == entrie->eax &&
95 				    cpuid->entries[i].ebx == entrie->ebx &&
96 				    cpuid->entries[i].ecx == entrie->ecx &&
97 				    cpuid->entries[i].edx == entrie->edx,
98 				    "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
99 				    entrie->function, entrie->index,
100 				    cpuid->entries[i].eax, cpuid->entries[i].ebx,
101 				    cpuid->entries[i].ecx, cpuid->entries[i].edx,
102 				    entrie->eax, entrie->ebx, entrie->ecx, entrie->edx);
103 			return;
104 		}
105 	}
106 
107 	TEST_ASSERT(false, "CPUID 0x%x.%x not found", entrie->function, entrie->index);
108 }
109 
compare_cpuids(struct kvm_cpuid2 * cpuid1,struct kvm_cpuid2 * cpuid2)110 static void compare_cpuids(struct kvm_cpuid2 *cpuid1, struct kvm_cpuid2 *cpuid2)
111 {
112 	int i;
113 
114 	for (i = 0; i < cpuid1->nent; i++)
115 		check_cpuid(cpuid2, &cpuid1->entries[i]);
116 
117 	for (i = 0; i < cpuid2->nent; i++)
118 		check_cpuid(cpuid1, &cpuid2->entries[i]);
119 }
120 
run_vcpu(struct kvm_vm * vm,uint32_t vcpuid,int stage)121 static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
122 {
123 	struct ucall uc;
124 
125 	_vcpu_run(vm, vcpuid);
126 
127 	switch (get_ucall(vm, vcpuid, &uc)) {
128 	case UCALL_SYNC:
129 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
130 			    uc.args[1] == stage + 1,
131 			    "Stage %d: Unexpected register values vmexit, got %lx",
132 			    stage + 1, (ulong)uc.args[1]);
133 		return;
134 	case UCALL_DONE:
135 		return;
136 	case UCALL_ABORT:
137 		TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", (const char *)uc.args[0],
138 			    __FILE__, uc.args[1], uc.args[2], uc.args[3]);
139 	default:
140 		TEST_ASSERT(false, "Unexpected exit: %s",
141 			    exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
142 	}
143 }
144 
vcpu_alloc_cpuid(struct kvm_vm * vm,vm_vaddr_t * p_gva,struct kvm_cpuid2 * cpuid)145 struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
146 {
147 	int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
148 	vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
149 	struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
150 
151 	memcpy(guest_cpuids, cpuid, size);
152 
153 	*p_gva = gva;
154 	return guest_cpuids;
155 }
156 
main(void)157 int main(void)
158 {
159 	struct kvm_cpuid2 *supp_cpuid, *cpuid2;
160 	vm_vaddr_t cpuid_gva;
161 	struct kvm_vm *vm;
162 	int stage;
163 
164 	vm = vm_create_default(VCPU_ID, 0, guest_main);
165 
166 	supp_cpuid = kvm_get_supported_cpuid();
167 	cpuid2 = vcpu_get_cpuid(vm, VCPU_ID);
168 
169 	compare_cpuids(supp_cpuid, cpuid2);
170 
171 	vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
172 
173 	vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva);
174 
175 	for (stage = 0; stage < 3; stage++)
176 		run_vcpu(vm, VCPU_ID, stage);
177 
178 	kvm_vm_free(vm);
179 }
180