1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for KVM paravirtual feature disablement
6 */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14
15 extern unsigned char rdmsr_start;
16 extern unsigned char rdmsr_end;
17
do_rdmsr(u32 idx)18 static u64 do_rdmsr(u32 idx)
19 {
20 u32 lo, hi;
21
22 asm volatile("rdmsr_start: rdmsr;"
23 "rdmsr_end:"
24 : "=a"(lo), "=c"(hi)
25 : "c"(idx));
26
27 return (((u64) hi) << 32) | lo;
28 }
29
30 extern unsigned char wrmsr_start;
31 extern unsigned char wrmsr_end;
32
do_wrmsr(u32 idx,u64 val)33 static void do_wrmsr(u32 idx, u64 val)
34 {
35 u32 lo, hi;
36
37 lo = val;
38 hi = val >> 32;
39
40 asm volatile("wrmsr_start: wrmsr;"
41 "wrmsr_end:"
42 : : "a"(lo), "c"(idx), "d"(hi));
43 }
44
45 static int nr_gp;
46
guest_gp_handler(struct ex_regs * regs)47 static void guest_gp_handler(struct ex_regs *regs)
48 {
49 unsigned char *rip = (unsigned char *)regs->rip;
50 bool r, w;
51
52 r = rip == &rdmsr_start;
53 w = rip == &wrmsr_start;
54 GUEST_ASSERT(r || w);
55
56 nr_gp++;
57
58 if (r)
59 regs->rip = (uint64_t)&rdmsr_end;
60 else
61 regs->rip = (uint64_t)&wrmsr_end;
62 }
63
64 struct msr_data {
65 uint32_t idx;
66 const char *name;
67 };
68
69 #define TEST_MSR(msr) { .idx = msr, .name = #msr }
70 #define UCALL_PR_MSR 0xdeadbeef
71 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
72
73 /*
74 * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
75 * written, as the KVM_CPUID_FEATURES leaf is cleared.
76 */
77 static struct msr_data msrs_to_test[] = {
78 TEST_MSR(MSR_KVM_SYSTEM_TIME),
79 TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
80 TEST_MSR(MSR_KVM_WALL_CLOCK),
81 TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
82 TEST_MSR(MSR_KVM_ASYNC_PF_EN),
83 TEST_MSR(MSR_KVM_STEAL_TIME),
84 TEST_MSR(MSR_KVM_PV_EOI_EN),
85 TEST_MSR(MSR_KVM_POLL_CONTROL),
86 TEST_MSR(MSR_KVM_ASYNC_PF_INT),
87 TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
88 };
89
test_msr(struct msr_data * msr)90 static void test_msr(struct msr_data *msr)
91 {
92 PR_MSR(msr);
93 do_rdmsr(msr->idx);
94 GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
95
96 nr_gp = 0;
97 do_wrmsr(msr->idx, 0);
98 GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
99 nr_gp = 0;
100 }
101
102 struct hcall_data {
103 uint64_t nr;
104 const char *name;
105 };
106
107 #define TEST_HCALL(hc) { .nr = hc, .name = #hc }
108 #define UCALL_PR_HCALL 0xdeadc0de
109 #define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
110
111 /*
112 * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
113 * features have been cleared in KVM_CPUID_FEATURES.
114 */
115 static struct hcall_data hcalls_to_test[] = {
116 TEST_HCALL(KVM_HC_KICK_CPU),
117 TEST_HCALL(KVM_HC_SEND_IPI),
118 TEST_HCALL(KVM_HC_SCHED_YIELD),
119 };
120
test_hcall(struct hcall_data * hc)121 static void test_hcall(struct hcall_data *hc)
122 {
123 uint64_t r;
124
125 PR_HCALL(hc);
126 r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
127 GUEST_ASSERT(r == -KVM_ENOSYS);
128 }
129
guest_main(void)130 static void guest_main(void)
131 {
132 int i;
133
134 for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
135 test_msr(&msrs_to_test[i]);
136 }
137
138 for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
139 test_hcall(&hcalls_to_test[i]);
140 }
141
142 GUEST_DONE();
143 }
144
clear_kvm_cpuid_features(struct kvm_cpuid2 * cpuid)145 static void clear_kvm_cpuid_features(struct kvm_cpuid2 *cpuid)
146 {
147 struct kvm_cpuid_entry2 ent = {0};
148
149 ent.function = KVM_CPUID_FEATURES;
150 TEST_ASSERT(set_cpuid(cpuid, &ent),
151 "failed to clear KVM_CPUID_FEATURES leaf");
152 }
153
pr_msr(struct ucall * uc)154 static void pr_msr(struct ucall *uc)
155 {
156 struct msr_data *msr = (struct msr_data *)uc->args[0];
157
158 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
159 }
160
pr_hcall(struct ucall * uc)161 static void pr_hcall(struct ucall *uc)
162 {
163 struct hcall_data *hc = (struct hcall_data *)uc->args[0];
164
165 pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
166 }
167
handle_abort(struct ucall * uc)168 static void handle_abort(struct ucall *uc)
169 {
170 TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
171 __FILE__, uc->args[1]);
172 }
173
174 #define VCPU_ID 0
175
enter_guest(struct kvm_vm * vm)176 static void enter_guest(struct kvm_vm *vm)
177 {
178 struct kvm_run *run;
179 struct ucall uc;
180 int r;
181
182 run = vcpu_state(vm, VCPU_ID);
183
184 while (true) {
185 r = _vcpu_run(vm, VCPU_ID);
186 TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
187 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
188 "unexpected exit reason: %u (%s)",
189 run->exit_reason, exit_reason_str(run->exit_reason));
190
191 switch (get_ucall(vm, VCPU_ID, &uc)) {
192 case UCALL_PR_MSR:
193 pr_msr(&uc);
194 break;
195 case UCALL_PR_HCALL:
196 pr_hcall(&uc);
197 break;
198 case UCALL_ABORT:
199 handle_abort(&uc);
200 return;
201 case UCALL_DONE:
202 return;
203 }
204 }
205 }
206
main(void)207 int main(void)
208 {
209 struct kvm_enable_cap cap = {0};
210 struct kvm_cpuid2 *best;
211 struct kvm_vm *vm;
212
213 if (!kvm_check_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
214 pr_info("will skip kvm paravirt restriction tests.\n");
215 return 0;
216 }
217
218 vm = vm_create_default(VCPU_ID, 0, guest_main);
219
220 cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID;
221 cap.args[0] = 1;
222 vcpu_enable_cap(vm, VCPU_ID, &cap);
223
224 best = kvm_get_supported_cpuid();
225 clear_kvm_cpuid_features(best);
226 vcpu_set_cpuid(vm, VCPU_ID, best);
227
228 vm_init_descriptor_tables(vm);
229 vcpu_init_descriptor_tables(vm, VCPU_ID);
230 vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
231
232 enter_guest(vm);
233 kvm_vm_free(vm);
234 }
235