1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * tests for KVM_CAP_X86_USER_SPACE_MSR and KVM_X86_SET_MSR_FILTER
4 *
5 * Copyright (C) 2020, Amazon Inc.
6 *
7 * This is a functional test to verify that we can deflect MSR events
8 * into user space.
9 */
10 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/ioctl.h>
16
17 #include "test_util.h"
18
19 #include "kvm_util.h"
20 #include "processor.h"
21
22 #define VCPU_ID 5
23
24 static u32 msr_reads, msr_writes;
25
26 static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
27 static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
28 static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
29 static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
30 static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
31 static u8 bitmap_deadbeef[1] = { 0x1 };
32
deny_msr(uint8_t * bitmap,u32 msr)33 static void deny_msr(uint8_t *bitmap, u32 msr)
34 {
35 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
36
37 bitmap[idx / 8] &= ~(1 << (idx % 8));
38 }
39
prepare_bitmaps(void)40 static void prepare_bitmaps(void)
41 {
42 memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
43 memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
44 memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
45 memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
46 memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
47
48 deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
49 deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
50 deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
51 }
52
53 struct kvm_msr_filter filter = {
54 .flags = KVM_MSR_FILTER_DEFAULT_DENY,
55 .ranges = {
56 {
57 .flags = KVM_MSR_FILTER_READ,
58 .base = 0x00000000,
59 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
60 .bitmap = bitmap_00000000,
61 }, {
62 .flags = KVM_MSR_FILTER_WRITE,
63 .base = 0x00000000,
64 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
65 .bitmap = bitmap_00000000_write,
66 }, {
67 .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
68 .base = 0x40000000,
69 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
70 .bitmap = bitmap_40000000,
71 }, {
72 .flags = KVM_MSR_FILTER_READ,
73 .base = 0xc0000000,
74 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
75 .bitmap = bitmap_c0000000_read,
76 }, {
77 .flags = KVM_MSR_FILTER_WRITE,
78 .base = 0xc0000000,
79 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
80 .bitmap = bitmap_c0000000,
81 }, {
82 .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
83 .base = 0xdeadbeef,
84 .nmsrs = 1,
85 .bitmap = bitmap_deadbeef,
86 },
87 },
88 };
89
90 struct kvm_msr_filter no_filter = {
91 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
92 };
93
guest_msr_calls(bool trapped)94 static void guest_msr_calls(bool trapped)
95 {
96 /* This goes into the in-kernel emulation */
97 wrmsr(MSR_SYSCALL_MASK, 0);
98
99 if (trapped) {
100 /* This goes into user space emulation */
101 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
102 GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
103 } else {
104 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
105 GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
106 }
107
108 /* If trapped == true, this goes into user space emulation */
109 wrmsr(MSR_IA32_POWER_CTL, 0x1234);
110
111 /* This goes into the in-kernel emulation */
112 rdmsr(MSR_IA32_POWER_CTL);
113
114 /* Invalid MSR, should always be handled by user space exit */
115 GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
116 wrmsr(0xdeadbeef, 0x1234);
117 }
118
guest_code(void)119 static void guest_code(void)
120 {
121 guest_msr_calls(true);
122
123 /*
124 * Disable msr filtering, so that the kernel
125 * handles everything in the next round
126 */
127 GUEST_SYNC(0);
128
129 guest_msr_calls(false);
130
131 GUEST_DONE();
132 }
133
handle_ucall(struct kvm_vm * vm)134 static int handle_ucall(struct kvm_vm *vm)
135 {
136 struct ucall uc;
137
138 switch (get_ucall(vm, VCPU_ID, &uc)) {
139 case UCALL_ABORT:
140 TEST_FAIL("Guest assertion not met");
141 break;
142 case UCALL_SYNC:
143 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter);
144 break;
145 case UCALL_DONE:
146 return 1;
147 default:
148 TEST_FAIL("Unknown ucall %lu", uc.cmd);
149 }
150
151 return 0;
152 }
153
handle_rdmsr(struct kvm_run * run)154 static void handle_rdmsr(struct kvm_run *run)
155 {
156 run->msr.data = run->msr.index;
157 msr_reads++;
158
159 if (run->msr.index == MSR_SYSCALL_MASK ||
160 run->msr.index == MSR_GS_BASE) {
161 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
162 "MSR read trap w/o access fault");
163 }
164
165 if (run->msr.index == 0xdeadbeef) {
166 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
167 "MSR deadbeef read trap w/o inval fault");
168 }
169 }
170
handle_wrmsr(struct kvm_run * run)171 static void handle_wrmsr(struct kvm_run *run)
172 {
173 /* ignore */
174 msr_writes++;
175
176 if (run->msr.index == MSR_IA32_POWER_CTL) {
177 TEST_ASSERT(run->msr.data == 0x1234,
178 "MSR data for MSR_IA32_POWER_CTL incorrect");
179 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
180 "MSR_IA32_POWER_CTL trap w/o access fault");
181 }
182
183 if (run->msr.index == 0xdeadbeef) {
184 TEST_ASSERT(run->msr.data == 0x1234,
185 "MSR data for deadbeef incorrect");
186 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
187 "deadbeef trap w/o inval fault");
188 }
189 }
190
main(int argc,char * argv[])191 int main(int argc, char *argv[])
192 {
193 struct kvm_enable_cap cap = {
194 .cap = KVM_CAP_X86_USER_SPACE_MSR,
195 .args[0] = KVM_MSR_EXIT_REASON_INVAL |
196 KVM_MSR_EXIT_REASON_UNKNOWN |
197 KVM_MSR_EXIT_REASON_FILTER,
198 };
199 struct kvm_vm *vm;
200 struct kvm_run *run;
201 int rc;
202
203 /* Tell stdout not to buffer its content */
204 setbuf(stdout, NULL);
205
206 /* Create VM */
207 vm = vm_create_default(VCPU_ID, 0, guest_code);
208 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
209 run = vcpu_state(vm, VCPU_ID);
210
211 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
212 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
213 vm_enable_cap(vm, &cap);
214
215 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
216 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
217
218 prepare_bitmaps();
219 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
220
221 while (1) {
222 rc = _vcpu_run(vm, VCPU_ID);
223
224 TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
225
226 switch (run->exit_reason) {
227 case KVM_EXIT_X86_RDMSR:
228 handle_rdmsr(run);
229 break;
230 case KVM_EXIT_X86_WRMSR:
231 handle_wrmsr(run);
232 break;
233 case KVM_EXIT_IO:
234 if (handle_ucall(vm))
235 goto done;
236 break;
237 }
238
239 }
240
241 done:
242 TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
243 TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
244
245 kvm_vm_free(vm);
246
247 return 0;
248 }
249