1 /*
2 * tools/testing/selftests/kvm/lib/kvm_util.c
3 *
4 * Copyright (C) 2018, Google LLC.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */
8
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "kvm_util_internal.h"
12
13 #include <assert.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <sys/stat.h>
17 #include <linux/kernel.h>
18
19 #define KVM_DEV_PATH "/dev/kvm"
20
21 #define KVM_UTIL_PGS_PER_HUGEPG 512
22 #define KVM_UTIL_MIN_PADDR 0x2000
23
24 /* Aligns x up to the next multiple of size. Size must be a power of 2. */
align(void * x,size_t size)25 static void *align(void *x, size_t size)
26 {
27 size_t mask = size - 1;
28 TEST_ASSERT(size != 0 && !(size & (size - 1)),
29 "size not a power of 2: %lu", size);
30 return (void *) (((size_t) x + mask) & ~mask);
31 }
32
33 /* Capability
34 *
35 * Input Args:
36 * cap - Capability
37 *
38 * Output Args: None
39 *
40 * Return:
41 * On success, the Value corresponding to the capability (KVM_CAP_*)
42 * specified by the value of cap. On failure a TEST_ASSERT failure
43 * is produced.
44 *
45 * Looks up and returns the value corresponding to the capability
46 * (KVM_CAP_*) given by cap.
47 */
kvm_check_cap(long cap)48 int kvm_check_cap(long cap)
49 {
50 int ret;
51 int kvm_fd;
52
53 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
54 if (kvm_fd < 0)
55 exit(KSFT_SKIP);
56
57 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
58 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
59 " rc: %i errno: %i", ret, errno);
60
61 close(kvm_fd);
62
63 return ret;
64 }
65
66 /* VM Enable Capability
67 *
68 * Input Args:
69 * vm - Virtual Machine
70 * cap - Capability
71 *
72 * Output Args: None
73 *
74 * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
75 *
76 * Enables a capability (KVM_CAP_*) on the VM.
77 */
vm_enable_cap(struct kvm_vm * vm,struct kvm_enable_cap * cap)78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
79 {
80 int ret;
81
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
83 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
84 " rc: %i errno: %i", ret, errno);
85
86 return ret;
87 }
88
vm_open(struct kvm_vm * vm,int perm)89 static void vm_open(struct kvm_vm *vm, int perm)
90 {
91 vm->kvm_fd = open(KVM_DEV_PATH, perm);
92 if (vm->kvm_fd < 0)
93 exit(KSFT_SKIP);
94
95 /* Create VM. */
96 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL);
97 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
98 "rc: %i errno: %i", vm->fd, errno);
99 }
100
101 /* VM Create
102 *
103 * Input Args:
104 * mode - VM Mode (e.g. VM_MODE_FLAT48PG)
105 * phy_pages - Physical memory pages
106 * perm - permission
107 *
108 * Output Args: None
109 *
110 * Return:
111 * Pointer to opaque structure that describes the created VM.
112 *
113 * Creates a VM with the mode specified by mode (e.g. VM_MODE_FLAT48PG).
114 * When phy_pages is non-zero, a memory region of phy_pages physical pages
115 * is created and mapped starting at guest physical address 0. The file
116 * descriptor to control the created VM is created with the permissions
117 * given by perm (e.g. O_RDWR).
118 */
vm_create(enum vm_guest_mode mode,uint64_t phy_pages,int perm)119 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
120 {
121 struct kvm_vm *vm;
122 int kvm_fd;
123
124 /* Allocate memory. */
125 vm = calloc(1, sizeof(*vm));
126 TEST_ASSERT(vm != NULL, "Insufficent Memory");
127
128 vm->mode = mode;
129 vm_open(vm, perm);
130
131 /* Setup mode specific traits. */
132 switch (vm->mode) {
133 case VM_MODE_FLAT48PG:
134 vm->page_size = 0x1000;
135 vm->page_shift = 12;
136
137 /* Limit to 48-bit canonical virtual addresses. */
138 vm->vpages_valid = sparsebit_alloc();
139 sparsebit_set_num(vm->vpages_valid,
140 0, (1ULL << (48 - 1)) >> vm->page_shift);
141 sparsebit_set_num(vm->vpages_valid,
142 (~((1ULL << (48 - 1)) - 1)) >> vm->page_shift,
143 (1ULL << (48 - 1)) >> vm->page_shift);
144
145 /* Limit physical addresses to 52-bits. */
146 vm->max_gfn = ((1ULL << 52) >> vm->page_shift) - 1;
147 break;
148
149 default:
150 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
151 }
152
153 /* Allocate and setup memory for guest. */
154 vm->vpages_mapped = sparsebit_alloc();
155 if (phy_pages != 0)
156 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
157 0, 0, phy_pages, 0);
158
159 return vm;
160 }
161
162 /* VM Restart
163 *
164 * Input Args:
165 * vm - VM that has been released before
166 * perm - permission
167 *
168 * Output Args: None
169 *
170 * Reopens the file descriptors associated to the VM and reinstates the
171 * global state, such as the irqchip and the memory regions that are mapped
172 * into the guest.
173 */
kvm_vm_restart(struct kvm_vm * vmp,int perm)174 void kvm_vm_restart(struct kvm_vm *vmp, int perm)
175 {
176 struct userspace_mem_region *region;
177
178 vm_open(vmp, perm);
179 if (vmp->has_irqchip)
180 vm_create_irqchip(vmp);
181
182 for (region = vmp->userspace_mem_region_head; region;
183 region = region->next) {
184 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
185 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
186 " rc: %i errno: %i\n"
187 " slot: %u flags: 0x%x\n"
188 " guest_phys_addr: 0x%lx size: 0x%lx",
189 ret, errno, region->region.slot, region->region.flags,
190 region->region.guest_phys_addr,
191 region->region.memory_size);
192 }
193 }
194
kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log)195 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
196 {
197 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
198 int ret;
199
200 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
201 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
202 strerror(-ret));
203 }
204
205 /* Userspace Memory Region Find
206 *
207 * Input Args:
208 * vm - Virtual Machine
209 * start - Starting VM physical address
210 * end - Ending VM physical address, inclusive.
211 *
212 * Output Args: None
213 *
214 * Return:
215 * Pointer to overlapping region, NULL if no such region.
216 *
217 * Searches for a region with any physical memory that overlaps with
218 * any portion of the guest physical addresses from start to end
219 * inclusive. If multiple overlapping regions exist, a pointer to any
220 * of the regions is returned. Null is returned only when no overlapping
221 * region exists.
222 */
userspace_mem_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end)223 static struct userspace_mem_region *userspace_mem_region_find(
224 struct kvm_vm *vm, uint64_t start, uint64_t end)
225 {
226 struct userspace_mem_region *region;
227
228 for (region = vm->userspace_mem_region_head; region;
229 region = region->next) {
230 uint64_t existing_start = region->region.guest_phys_addr;
231 uint64_t existing_end = region->region.guest_phys_addr
232 + region->region.memory_size - 1;
233 if (start <= existing_end && end >= existing_start)
234 return region;
235 }
236
237 return NULL;
238 }
239
240 /* KVM Userspace Memory Region Find
241 *
242 * Input Args:
243 * vm - Virtual Machine
244 * start - Starting VM physical address
245 * end - Ending VM physical address, inclusive.
246 *
247 * Output Args: None
248 *
249 * Return:
250 * Pointer to overlapping region, NULL if no such region.
251 *
252 * Public interface to userspace_mem_region_find. Allows tests to look up
253 * the memslot datastructure for a given range of guest physical memory.
254 */
255 struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end)256 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
257 uint64_t end)
258 {
259 struct userspace_mem_region *region;
260
261 region = userspace_mem_region_find(vm, start, end);
262 if (!region)
263 return NULL;
264
265 return ®ion->region;
266 }
267
268 /* VCPU Find
269 *
270 * Input Args:
271 * vm - Virtual Machine
272 * vcpuid - VCPU ID
273 *
274 * Output Args: None
275 *
276 * Return:
277 * Pointer to VCPU structure
278 *
279 * Locates a vcpu structure that describes the VCPU specified by vcpuid and
280 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
281 * for the specified vcpuid.
282 */
vcpu_find(struct kvm_vm * vm,uint32_t vcpuid)283 struct vcpu *vcpu_find(struct kvm_vm *vm,
284 uint32_t vcpuid)
285 {
286 struct vcpu *vcpup;
287
288 for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
289 if (vcpup->id == vcpuid)
290 return vcpup;
291 }
292
293 return NULL;
294 }
295
296 /* VM VCPU Remove
297 *
298 * Input Args:
299 * vm - Virtual Machine
300 * vcpuid - VCPU ID
301 *
302 * Output Args: None
303 *
304 * Return: None, TEST_ASSERT failures for all error conditions
305 *
306 * Within the VM specified by vm, removes the VCPU given by vcpuid.
307 */
vm_vcpu_rm(struct kvm_vm * vm,uint32_t vcpuid)308 static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid)
309 {
310 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
311 int ret;
312
313 ret = munmap(vcpu->state, sizeof(*vcpu->state));
314 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
315 "errno: %i", ret, errno);
316 close(vcpu->fd);
317 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
318 "errno: %i", ret, errno);
319
320 if (vcpu->next)
321 vcpu->next->prev = vcpu->prev;
322 if (vcpu->prev)
323 vcpu->prev->next = vcpu->next;
324 else
325 vm->vcpu_head = vcpu->next;
326 free(vcpu);
327 }
328
kvm_vm_release(struct kvm_vm * vmp)329 void kvm_vm_release(struct kvm_vm *vmp)
330 {
331 int ret;
332
333 /* Free VCPUs. */
334 while (vmp->vcpu_head)
335 vm_vcpu_rm(vmp, vmp->vcpu_head->id);
336
337 /* Close file descriptor for the VM. */
338 ret = close(vmp->fd);
339 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
340 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
341
342 close(vmp->kvm_fd);
343 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
344 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
345 }
346
347 /* Destroys and frees the VM pointed to by vmp.
348 */
kvm_vm_free(struct kvm_vm * vmp)349 void kvm_vm_free(struct kvm_vm *vmp)
350 {
351 int ret;
352
353 if (vmp == NULL)
354 return;
355
356 /* Free userspace_mem_regions. */
357 while (vmp->userspace_mem_region_head) {
358 struct userspace_mem_region *region
359 = vmp->userspace_mem_region_head;
360
361 region->region.memory_size = 0;
362 ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
363 ®ion->region);
364 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
365 "rc: %i errno: %i", ret, errno);
366
367 vmp->userspace_mem_region_head = region->next;
368 sparsebit_free(®ion->unused_phy_pages);
369 ret = munmap(region->mmap_start, region->mmap_size);
370 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
371 ret, errno);
372
373 free(region);
374 }
375
376 /* Free sparsebit arrays. */
377 sparsebit_free(&vmp->vpages_valid);
378 sparsebit_free(&vmp->vpages_mapped);
379
380 kvm_vm_release(vmp);
381
382 /* Free the structure describing the VM. */
383 free(vmp);
384 }
385
386 /* Memory Compare, host virtual to guest virtual
387 *
388 * Input Args:
389 * hva - Starting host virtual address
390 * vm - Virtual Machine
391 * gva - Starting guest virtual address
392 * len - number of bytes to compare
393 *
394 * Output Args: None
395 *
396 * Input/Output Args: None
397 *
398 * Return:
399 * Returns 0 if the bytes starting at hva for a length of len
400 * are equal the guest virtual bytes starting at gva. Returns
401 * a value < 0, if bytes at hva are less than those at gva.
402 * Otherwise a value > 0 is returned.
403 *
404 * Compares the bytes starting at the host virtual address hva, for
405 * a length of len, to the guest bytes starting at the guest virtual
406 * address given by gva.
407 */
kvm_memcmp_hva_gva(void * hva,struct kvm_vm * vm,vm_vaddr_t gva,size_t len)408 int kvm_memcmp_hva_gva(void *hva,
409 struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
410 {
411 size_t amt;
412
413 /* Compare a batch of bytes until either a match is found
414 * or all the bytes have been compared.
415 */
416 for (uintptr_t offset = 0; offset < len; offset += amt) {
417 uintptr_t ptr1 = (uintptr_t)hva + offset;
418
419 /* Determine host address for guest virtual address
420 * at offset.
421 */
422 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
423
424 /* Determine amount to compare on this pass.
425 * Don't allow the comparsion to cross a page boundary.
426 */
427 amt = len - offset;
428 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
429 amt = vm->page_size - (ptr1 % vm->page_size);
430 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
431 amt = vm->page_size - (ptr2 % vm->page_size);
432
433 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
434 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
435
436 /* Perform the comparison. If there is a difference
437 * return that result to the caller, otherwise need
438 * to continue on looking for a mismatch.
439 */
440 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
441 if (ret != 0)
442 return ret;
443 }
444
445 /* No mismatch found. Let the caller know the two memory
446 * areas are equal.
447 */
448 return 0;
449 }
450
451 /* Allocate an instance of struct kvm_cpuid2
452 *
453 * Input Args: None
454 *
455 * Output Args: None
456 *
457 * Return: A pointer to the allocated struct. The caller is responsible
458 * for freeing this struct.
459 *
460 * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
461 * array to be decided at allocation time, allocation is slightly
462 * complicated. This function uses a reasonable default length for
463 * the array and performs the appropriate allocation.
464 */
allocate_kvm_cpuid2(void)465 static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
466 {
467 struct kvm_cpuid2 *cpuid;
468 int nent = 100;
469 size_t size;
470
471 size = sizeof(*cpuid);
472 size += nent * sizeof(struct kvm_cpuid_entry2);
473 cpuid = malloc(size);
474 if (!cpuid) {
475 perror("malloc");
476 abort();
477 }
478
479 cpuid->nent = nent;
480
481 return cpuid;
482 }
483
484 /* KVM Supported CPUID Get
485 *
486 * Input Args: None
487 *
488 * Output Args:
489 *
490 * Return: The supported KVM CPUID
491 *
492 * Get the guest CPUID supported by KVM.
493 */
kvm_get_supported_cpuid(void)494 struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
495 {
496 static struct kvm_cpuid2 *cpuid;
497 int ret;
498 int kvm_fd;
499
500 if (cpuid)
501 return cpuid;
502
503 cpuid = allocate_kvm_cpuid2();
504 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
505 if (kvm_fd < 0)
506 exit(KSFT_SKIP);
507
508 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
509 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
510 ret, errno);
511
512 close(kvm_fd);
513 return cpuid;
514 }
515
516 /* Locate a cpuid entry.
517 *
518 * Input Args:
519 * cpuid: The cpuid.
520 * function: The function of the cpuid entry to find.
521 *
522 * Output Args: None
523 *
524 * Return: A pointer to the cpuid entry. Never returns NULL.
525 */
526 struct kvm_cpuid_entry2 *
kvm_get_supported_cpuid_index(uint32_t function,uint32_t index)527 kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
528 {
529 struct kvm_cpuid2 *cpuid;
530 struct kvm_cpuid_entry2 *entry = NULL;
531 int i;
532
533 cpuid = kvm_get_supported_cpuid();
534 for (i = 0; i < cpuid->nent; i++) {
535 if (cpuid->entries[i].function == function &&
536 cpuid->entries[i].index == index) {
537 entry = &cpuid->entries[i];
538 break;
539 }
540 }
541
542 TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
543 function, index);
544 return entry;
545 }
546
547 /* VM Userspace Memory Region Add
548 *
549 * Input Args:
550 * vm - Virtual Machine
551 * backing_src - Storage source for this region.
552 * NULL to use anonymous memory.
553 * guest_paddr - Starting guest physical address
554 * slot - KVM region slot
555 * npages - Number of physical pages
556 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
557 *
558 * Output Args: None
559 *
560 * Return: None
561 *
562 * Allocates a memory area of the number of pages specified by npages
563 * and maps it to the VM specified by vm, at a starting physical address
564 * given by guest_paddr. The region is created with a KVM region slot
565 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
566 * region is created with the flags given by flags.
567 */
vm_userspace_mem_region_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags)568 void vm_userspace_mem_region_add(struct kvm_vm *vm,
569 enum vm_mem_backing_src_type src_type,
570 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
571 uint32_t flags)
572 {
573 int ret;
574 unsigned long pmem_size = 0;
575 struct userspace_mem_region *region;
576 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
577
578 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
579 "address not on a page boundary.\n"
580 " guest_paddr: 0x%lx vm->page_size: 0x%x",
581 guest_paddr, vm->page_size);
582 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
583 <= vm->max_gfn, "Physical range beyond maximum "
584 "supported physical address,\n"
585 " guest_paddr: 0x%lx npages: 0x%lx\n"
586 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
587 guest_paddr, npages, vm->max_gfn, vm->page_size);
588
589 /* Confirm a mem region with an overlapping address doesn't
590 * already exist.
591 */
592 region = (struct userspace_mem_region *) userspace_mem_region_find(
593 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
594 if (region != NULL)
595 TEST_ASSERT(false, "overlapping userspace_mem_region already "
596 "exists\n"
597 " requested guest_paddr: 0x%lx npages: 0x%lx "
598 "page_size: 0x%x\n"
599 " existing guest_paddr: 0x%lx size: 0x%lx",
600 guest_paddr, npages, vm->page_size,
601 (uint64_t) region->region.guest_phys_addr,
602 (uint64_t) region->region.memory_size);
603
604 /* Confirm no region with the requested slot already exists. */
605 for (region = vm->userspace_mem_region_head; region;
606 region = region->next) {
607 if (region->region.slot == slot)
608 break;
609 }
610 if (region != NULL)
611 TEST_ASSERT(false, "A mem region with the requested slot "
612 "already exists.\n"
613 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
614 " existing slot: %u paddr: 0x%lx size: 0x%lx",
615 slot, guest_paddr, npages,
616 region->region.slot,
617 (uint64_t) region->region.guest_phys_addr,
618 (uint64_t) region->region.memory_size);
619
620 /* Allocate and initialize new mem region structure. */
621 region = calloc(1, sizeof(*region));
622 TEST_ASSERT(region != NULL, "Insufficient Memory");
623 region->mmap_size = npages * vm->page_size;
624
625 /* Enough memory to align up to a huge page. */
626 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
627 region->mmap_size += huge_page_size;
628 region->mmap_start = mmap(NULL, region->mmap_size,
629 PROT_READ | PROT_WRITE,
630 MAP_PRIVATE | MAP_ANONYMOUS
631 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
632 -1, 0);
633 TEST_ASSERT(region->mmap_start != MAP_FAILED,
634 "test_malloc failed, mmap_start: %p errno: %i",
635 region->mmap_start, errno);
636
637 /* Align THP allocation up to start of a huge page. */
638 region->host_mem = align(region->mmap_start,
639 src_type == VM_MEM_SRC_ANONYMOUS_THP ? huge_page_size : 1);
640
641 /* As needed perform madvise */
642 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
643 ret = madvise(region->host_mem, npages * vm->page_size,
644 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
645 TEST_ASSERT(ret == 0, "madvise failed,\n"
646 " addr: %p\n"
647 " length: 0x%lx\n"
648 " src_type: %x",
649 region->host_mem, npages * vm->page_size, src_type);
650 }
651
652 region->unused_phy_pages = sparsebit_alloc();
653 sparsebit_set_num(region->unused_phy_pages,
654 guest_paddr >> vm->page_shift, npages);
655 region->region.slot = slot;
656 region->region.flags = flags;
657 region->region.guest_phys_addr = guest_paddr;
658 region->region.memory_size = npages * vm->page_size;
659 region->region.userspace_addr = (uintptr_t) region->host_mem;
660 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
661 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
662 " rc: %i errno: %i\n"
663 " slot: %u flags: 0x%x\n"
664 " guest_phys_addr: 0x%lx size: 0x%lx",
665 ret, errno, slot, flags,
666 guest_paddr, (uint64_t) region->region.memory_size);
667
668 /* Add to linked-list of memory regions. */
669 if (vm->userspace_mem_region_head)
670 vm->userspace_mem_region_head->prev = region;
671 region->next = vm->userspace_mem_region_head;
672 vm->userspace_mem_region_head = region;
673 }
674
675 /* Memslot to region
676 *
677 * Input Args:
678 * vm - Virtual Machine
679 * memslot - KVM memory slot ID
680 *
681 * Output Args: None
682 *
683 * Return:
684 * Pointer to memory region structure that describe memory region
685 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
686 * on error (e.g. currently no memory region using memslot as a KVM
687 * memory slot ID).
688 */
memslot2region(struct kvm_vm * vm,uint32_t memslot)689 static struct userspace_mem_region *memslot2region(struct kvm_vm *vm,
690 uint32_t memslot)
691 {
692 struct userspace_mem_region *region;
693
694 for (region = vm->userspace_mem_region_head; region;
695 region = region->next) {
696 if (region->region.slot == memslot)
697 break;
698 }
699 if (region == NULL) {
700 fprintf(stderr, "No mem region with the requested slot found,\n"
701 " requested slot: %u\n", memslot);
702 fputs("---- vm dump ----\n", stderr);
703 vm_dump(stderr, vm, 2);
704 TEST_ASSERT(false, "Mem region not found");
705 }
706
707 return region;
708 }
709
710 /* VM Memory Region Flags Set
711 *
712 * Input Args:
713 * vm - Virtual Machine
714 * flags - Starting guest physical address
715 *
716 * Output Args: None
717 *
718 * Return: None
719 *
720 * Sets the flags of the memory region specified by the value of slot,
721 * to the values given by flags.
722 */
vm_mem_region_set_flags(struct kvm_vm * vm,uint32_t slot,uint32_t flags)723 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
724 {
725 int ret;
726 struct userspace_mem_region *region;
727
728 /* Locate memory region. */
729 region = memslot2region(vm, slot);
730
731 region->region.flags = flags;
732
733 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
734
735 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
736 " rc: %i errno: %i slot: %u flags: 0x%x",
737 ret, errno, slot, flags);
738 }
739
740 /* VCPU mmap Size
741 *
742 * Input Args: None
743 *
744 * Output Args: None
745 *
746 * Return:
747 * Size of VCPU state
748 *
749 * Returns the size of the structure pointed to by the return value
750 * of vcpu_state().
751 */
vcpu_mmap_sz(void)752 static int vcpu_mmap_sz(void)
753 {
754 int dev_fd, ret;
755
756 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
757 if (dev_fd < 0)
758 exit(KSFT_SKIP);
759
760 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
761 TEST_ASSERT(ret >= sizeof(struct kvm_run),
762 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
763 __func__, ret, errno);
764
765 close(dev_fd);
766
767 return ret;
768 }
769
770 /* VM VCPU Add
771 *
772 * Input Args:
773 * vm - Virtual Machine
774 * vcpuid - VCPU ID
775 *
776 * Output Args: None
777 *
778 * Return: None
779 *
780 * Creates and adds to the VM specified by vm and virtual CPU with
781 * the ID given by vcpuid.
782 */
vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpuid,int pgd_memslot,int gdt_memslot)783 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot)
784 {
785 struct vcpu *vcpu;
786
787 /* Confirm a vcpu with the specified id doesn't already exist. */
788 vcpu = vcpu_find(vm, vcpuid);
789 if (vcpu != NULL)
790 TEST_ASSERT(false, "vcpu with the specified id "
791 "already exists,\n"
792 " requested vcpuid: %u\n"
793 " existing vcpuid: %u state: %p",
794 vcpuid, vcpu->id, vcpu->state);
795
796 /* Allocate and initialize new vcpu structure. */
797 vcpu = calloc(1, sizeof(*vcpu));
798 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
799 vcpu->id = vcpuid;
800 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
801 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
802 vcpu->fd, errno);
803
804 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
805 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
806 vcpu_mmap_sz(), sizeof(*vcpu->state));
807 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
808 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
809 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
810 "vcpu id: %u errno: %i", vcpuid, errno);
811
812 /* Add to linked-list of VCPUs. */
813 if (vm->vcpu_head)
814 vm->vcpu_head->prev = vcpu;
815 vcpu->next = vm->vcpu_head;
816 vm->vcpu_head = vcpu;
817
818 vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot);
819 }
820
821 /* VM Virtual Address Unused Gap
822 *
823 * Input Args:
824 * vm - Virtual Machine
825 * sz - Size (bytes)
826 * vaddr_min - Minimum Virtual Address
827 *
828 * Output Args: None
829 *
830 * Return:
831 * Lowest virtual address at or below vaddr_min, with at least
832 * sz unused bytes. TEST_ASSERT failure if no area of at least
833 * size sz is available.
834 *
835 * Within the VM specified by vm, locates the lowest starting virtual
836 * address >= vaddr_min, that has at least sz unallocated bytes. A
837 * TEST_ASSERT failure occurs for invalid input or no area of at least
838 * sz unallocated bytes >= vaddr_min is available.
839 */
vm_vaddr_unused_gap(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min)840 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
841 vm_vaddr_t vaddr_min)
842 {
843 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
844
845 /* Determine lowest permitted virtual page index. */
846 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
847 if ((pgidx_start * vm->page_size) < vaddr_min)
848 goto no_va_found;
849
850 /* Loop over section with enough valid virtual page indexes. */
851 if (!sparsebit_is_set_num(vm->vpages_valid,
852 pgidx_start, pages))
853 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
854 pgidx_start, pages);
855 do {
856 /*
857 * Are there enough unused virtual pages available at
858 * the currently proposed starting virtual page index.
859 * If not, adjust proposed starting index to next
860 * possible.
861 */
862 if (sparsebit_is_clear_num(vm->vpages_mapped,
863 pgidx_start, pages))
864 goto va_found;
865 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
866 pgidx_start, pages);
867 if (pgidx_start == 0)
868 goto no_va_found;
869
870 /*
871 * If needed, adjust proposed starting virtual address,
872 * to next range of valid virtual addresses.
873 */
874 if (!sparsebit_is_set_num(vm->vpages_valid,
875 pgidx_start, pages)) {
876 pgidx_start = sparsebit_next_set_num(
877 vm->vpages_valid, pgidx_start, pages);
878 if (pgidx_start == 0)
879 goto no_va_found;
880 }
881 } while (pgidx_start != 0);
882
883 no_va_found:
884 TEST_ASSERT(false, "No vaddr of specified pages available, "
885 "pages: 0x%lx", pages);
886
887 /* NOT REACHED */
888 return -1;
889
890 va_found:
891 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
892 pgidx_start, pages),
893 "Unexpected, invalid virtual page index range,\n"
894 " pgidx_start: 0x%lx\n"
895 " pages: 0x%lx",
896 pgidx_start, pages);
897 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
898 pgidx_start, pages),
899 "Unexpected, pages already mapped,\n"
900 " pgidx_start: 0x%lx\n"
901 " pages: 0x%lx",
902 pgidx_start, pages);
903
904 return pgidx_start * vm->page_size;
905 }
906
907 /* VM Virtual Address Allocate
908 *
909 * Input Args:
910 * vm - Virtual Machine
911 * sz - Size in bytes
912 * vaddr_min - Minimum starting virtual address
913 * data_memslot - Memory region slot for data pages
914 * pgd_memslot - Memory region slot for new virtual translation tables
915 *
916 * Output Args: None
917 *
918 * Return:
919 * Starting guest virtual address
920 *
921 * Allocates at least sz bytes within the virtual address space of the vm
922 * given by vm. The allocated bytes are mapped to a virtual address >=
923 * the address given by vaddr_min. Note that each allocation uses a
924 * a unique set of pages, with the minimum real allocation being at least
925 * a page.
926 */
vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,uint32_t data_memslot,uint32_t pgd_memslot)927 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
928 uint32_t data_memslot, uint32_t pgd_memslot)
929 {
930 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
931
932 virt_pgd_alloc(vm, pgd_memslot);
933
934 /* Find an unused range of virtual page addresses of at least
935 * pages in length.
936 */
937 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
938
939 /* Map the virtual pages. */
940 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
941 pages--, vaddr += vm->page_size) {
942 vm_paddr_t paddr;
943
944 paddr = vm_phy_page_alloc(vm, KVM_UTIL_MIN_PADDR, data_memslot);
945
946 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
947
948 sparsebit_set(vm->vpages_mapped,
949 vaddr >> vm->page_shift);
950 }
951
952 return vaddr_start;
953 }
954
955 /*
956 * Map a range of VM virtual address to the VM's physical address
957 *
958 * Input Args:
959 * vm - Virtual Machine
960 * vaddr - Virtuall address to map
961 * paddr - VM Physical Address
962 * size - The size of the range to map
963 * pgd_memslot - Memory region slot for new virtual translation tables
964 *
965 * Output Args: None
966 *
967 * Return: None
968 *
969 * Within the VM given by vm, creates a virtual translation for the
970 * page range starting at vaddr to the page range starting at paddr.
971 */
virt_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,size_t size,uint32_t pgd_memslot)972 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
973 size_t size, uint32_t pgd_memslot)
974 {
975 size_t page_size = vm->page_size;
976 size_t npages = size / page_size;
977
978 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
979 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
980
981 while (npages--) {
982 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
983 vaddr += page_size;
984 paddr += page_size;
985 }
986 }
987
988 /* Address VM Physical to Host Virtual
989 *
990 * Input Args:
991 * vm - Virtual Machine
992 * gpa - VM physical address
993 *
994 * Output Args: None
995 *
996 * Return:
997 * Equivalent host virtual address
998 *
999 * Locates the memory region containing the VM physical address given
1000 * by gpa, within the VM given by vm. When found, the host virtual
1001 * address providing the memory to the vm physical address is returned.
1002 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1003 */
addr_gpa2hva(struct kvm_vm * vm,vm_paddr_t gpa)1004 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1005 {
1006 struct userspace_mem_region *region;
1007 for (region = vm->userspace_mem_region_head; region;
1008 region = region->next) {
1009 if ((gpa >= region->region.guest_phys_addr)
1010 && (gpa <= (region->region.guest_phys_addr
1011 + region->region.memory_size - 1)))
1012 return (void *) ((uintptr_t) region->host_mem
1013 + (gpa - region->region.guest_phys_addr));
1014 }
1015
1016 TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa);
1017 return NULL;
1018 }
1019
1020 /* Address Host Virtual to VM Physical
1021 *
1022 * Input Args:
1023 * vm - Virtual Machine
1024 * hva - Host virtual address
1025 *
1026 * Output Args: None
1027 *
1028 * Return:
1029 * Equivalent VM physical address
1030 *
1031 * Locates the memory region containing the host virtual address given
1032 * by hva, within the VM given by vm. When found, the equivalent
1033 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1034 * region containing hva exists.
1035 */
addr_hva2gpa(struct kvm_vm * vm,void * hva)1036 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1037 {
1038 struct userspace_mem_region *region;
1039 for (region = vm->userspace_mem_region_head; region;
1040 region = region->next) {
1041 if ((hva >= region->host_mem)
1042 && (hva <= (region->host_mem
1043 + region->region.memory_size - 1)))
1044 return (vm_paddr_t) ((uintptr_t)
1045 region->region.guest_phys_addr
1046 + (hva - (uintptr_t) region->host_mem));
1047 }
1048
1049 TEST_ASSERT(false, "No mapping to a guest physical address, "
1050 "hva: %p", hva);
1051 return -1;
1052 }
1053
1054 /* VM Create IRQ Chip
1055 *
1056 * Input Args:
1057 * vm - Virtual Machine
1058 *
1059 * Output Args: None
1060 *
1061 * Return: None
1062 *
1063 * Creates an interrupt controller chip for the VM specified by vm.
1064 */
vm_create_irqchip(struct kvm_vm * vm)1065 void vm_create_irqchip(struct kvm_vm *vm)
1066 {
1067 int ret;
1068
1069 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1070 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1071 "rc: %i errno: %i", ret, errno);
1072
1073 vm->has_irqchip = true;
1074 }
1075
1076 /* VM VCPU State
1077 *
1078 * Input Args:
1079 * vm - Virtual Machine
1080 * vcpuid - VCPU ID
1081 *
1082 * Output Args: None
1083 *
1084 * Return:
1085 * Pointer to structure that describes the state of the VCPU.
1086 *
1087 * Locates and returns a pointer to a structure that describes the
1088 * state of the VCPU with the given vcpuid.
1089 */
vcpu_state(struct kvm_vm * vm,uint32_t vcpuid)1090 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1091 {
1092 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1093 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1094
1095 return vcpu->state;
1096 }
1097
1098 /* VM VCPU Run
1099 *
1100 * Input Args:
1101 * vm - Virtual Machine
1102 * vcpuid - VCPU ID
1103 *
1104 * Output Args: None
1105 *
1106 * Return: None
1107 *
1108 * Switch to executing the code for the VCPU given by vcpuid, within the VM
1109 * given by vm.
1110 */
vcpu_run(struct kvm_vm * vm,uint32_t vcpuid)1111 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1112 {
1113 int ret = _vcpu_run(vm, vcpuid);
1114 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1115 "rc: %i errno: %i", ret, errno);
1116 }
1117
_vcpu_run(struct kvm_vm * vm,uint32_t vcpuid)1118 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1119 {
1120 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1121 int rc;
1122
1123 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1124 do {
1125 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1126 } while (rc == -1 && errno == EINTR);
1127 return rc;
1128 }
1129
1130 /* VM VCPU Set MP State
1131 *
1132 * Input Args:
1133 * vm - Virtual Machine
1134 * vcpuid - VCPU ID
1135 * mp_state - mp_state to be set
1136 *
1137 * Output Args: None
1138 *
1139 * Return: None
1140 *
1141 * Sets the MP state of the VCPU given by vcpuid, to the state given
1142 * by mp_state.
1143 */
vcpu_set_mp_state(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_mp_state * mp_state)1144 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1145 struct kvm_mp_state *mp_state)
1146 {
1147 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1148 int ret;
1149
1150 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1151
1152 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1153 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1154 "rc: %i errno: %i", ret, errno);
1155 }
1156
1157 /* VM VCPU Regs Get
1158 *
1159 * Input Args:
1160 * vm - Virtual Machine
1161 * vcpuid - VCPU ID
1162 *
1163 * Output Args:
1164 * regs - current state of VCPU regs
1165 *
1166 * Return: None
1167 *
1168 * Obtains the current register state for the VCPU specified by vcpuid
1169 * and stores it at the location given by regs.
1170 */
vcpu_regs_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_regs * regs)1171 void vcpu_regs_get(struct kvm_vm *vm,
1172 uint32_t vcpuid, struct kvm_regs *regs)
1173 {
1174 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1175 int ret;
1176
1177 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1178
1179 /* Get the regs. */
1180 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1181 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1182 ret, errno);
1183 }
1184
1185 /* VM VCPU Regs Set
1186 *
1187 * Input Args:
1188 * vm - Virtual Machine
1189 * vcpuid - VCPU ID
1190 * regs - Values to set VCPU regs to
1191 *
1192 * Output Args: None
1193 *
1194 * Return: None
1195 *
1196 * Sets the regs of the VCPU specified by vcpuid to the values
1197 * given by regs.
1198 */
vcpu_regs_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_regs * regs)1199 void vcpu_regs_set(struct kvm_vm *vm,
1200 uint32_t vcpuid, struct kvm_regs *regs)
1201 {
1202 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1203 int ret;
1204
1205 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1206
1207 /* Set the regs. */
1208 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1209 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1210 ret, errno);
1211 }
1212
vcpu_events_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_vcpu_events * events)1213 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1214 struct kvm_vcpu_events *events)
1215 {
1216 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1217 int ret;
1218
1219 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1220
1221 /* Get the regs. */
1222 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1223 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1224 ret, errno);
1225 }
1226
vcpu_events_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_vcpu_events * events)1227 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1228 struct kvm_vcpu_events *events)
1229 {
1230 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1231 int ret;
1232
1233 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1234
1235 /* Set the regs. */
1236 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1237 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1238 ret, errno);
1239 }
1240
1241 /* VCPU Get MSR
1242 *
1243 * Input Args:
1244 * vm - Virtual Machine
1245 * vcpuid - VCPU ID
1246 * msr_index - Index of MSR
1247 *
1248 * Output Args: None
1249 *
1250 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
1251 *
1252 * Get value of MSR for VCPU.
1253 */
vcpu_get_msr(struct kvm_vm * vm,uint32_t vcpuid,uint64_t msr_index)1254 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
1255 {
1256 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1257 struct {
1258 struct kvm_msrs header;
1259 struct kvm_msr_entry entry;
1260 } buffer = {};
1261 int r;
1262
1263 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1264 buffer.header.nmsrs = 1;
1265 buffer.entry.index = msr_index;
1266 r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
1267 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
1268 " rc: %i errno: %i", r, errno);
1269
1270 return buffer.entry.data;
1271 }
1272
1273 /* VCPU Set MSR
1274 *
1275 * Input Args:
1276 * vm - Virtual Machine
1277 * vcpuid - VCPU ID
1278 * msr_index - Index of MSR
1279 * msr_value - New value of MSR
1280 *
1281 * Output Args: None
1282 *
1283 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
1284 *
1285 * Set value of MSR for VCPU.
1286 */
vcpu_set_msr(struct kvm_vm * vm,uint32_t vcpuid,uint64_t msr_index,uint64_t msr_value)1287 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
1288 uint64_t msr_value)
1289 {
1290 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1291 struct {
1292 struct kvm_msrs header;
1293 struct kvm_msr_entry entry;
1294 } buffer = {};
1295 int r;
1296
1297 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1298 memset(&buffer, 0, sizeof(buffer));
1299 buffer.header.nmsrs = 1;
1300 buffer.entry.index = msr_index;
1301 buffer.entry.data = msr_value;
1302 r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
1303 TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
1304 " rc: %i errno: %i", r, errno);
1305 }
1306
1307 /* VM VCPU Args Set
1308 *
1309 * Input Args:
1310 * vm - Virtual Machine
1311 * vcpuid - VCPU ID
1312 * num - number of arguments
1313 * ... - arguments, each of type uint64_t
1314 *
1315 * Output Args: None
1316 *
1317 * Return: None
1318 *
1319 * Sets the first num function input arguments to the values
1320 * given as variable args. Each of the variable args is expected to
1321 * be of type uint64_t.
1322 */
vcpu_args_set(struct kvm_vm * vm,uint32_t vcpuid,unsigned int num,...)1323 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
1324 {
1325 va_list ap;
1326 struct kvm_regs regs;
1327
1328 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
1329 " num: %u\n",
1330 num);
1331
1332 va_start(ap, num);
1333 vcpu_regs_get(vm, vcpuid, ®s);
1334
1335 if (num >= 1)
1336 regs.rdi = va_arg(ap, uint64_t);
1337
1338 if (num >= 2)
1339 regs.rsi = va_arg(ap, uint64_t);
1340
1341 if (num >= 3)
1342 regs.rdx = va_arg(ap, uint64_t);
1343
1344 if (num >= 4)
1345 regs.rcx = va_arg(ap, uint64_t);
1346
1347 if (num >= 5)
1348 regs.r8 = va_arg(ap, uint64_t);
1349
1350 if (num >= 6)
1351 regs.r9 = va_arg(ap, uint64_t);
1352
1353 vcpu_regs_set(vm, vcpuid, ®s);
1354 va_end(ap);
1355 }
1356
1357 /* VM VCPU System Regs Get
1358 *
1359 * Input Args:
1360 * vm - Virtual Machine
1361 * vcpuid - VCPU ID
1362 *
1363 * Output Args:
1364 * sregs - current state of VCPU system regs
1365 *
1366 * Return: None
1367 *
1368 * Obtains the current system register state for the VCPU specified by
1369 * vcpuid and stores it at the location given by sregs.
1370 */
vcpu_sregs_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_sregs * sregs)1371 void vcpu_sregs_get(struct kvm_vm *vm,
1372 uint32_t vcpuid, struct kvm_sregs *sregs)
1373 {
1374 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1375 int ret;
1376
1377 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1378
1379 /* Get the regs. */
1380 /* Get the regs. */
1381 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1382 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1383 ret, errno);
1384 }
1385
1386 /* VM VCPU System Regs Set
1387 *
1388 * Input Args:
1389 * vm - Virtual Machine
1390 * vcpuid - VCPU ID
1391 * sregs - Values to set VCPU system regs to
1392 *
1393 * Output Args: None
1394 *
1395 * Return: None
1396 *
1397 * Sets the system regs of the VCPU specified by vcpuid to the values
1398 * given by sregs.
1399 */
vcpu_sregs_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_sregs * sregs)1400 void vcpu_sregs_set(struct kvm_vm *vm,
1401 uint32_t vcpuid, struct kvm_sregs *sregs)
1402 {
1403 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1404 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1405 "rc: %i errno: %i", ret, errno);
1406 }
1407
_vcpu_sregs_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_sregs * sregs)1408 int _vcpu_sregs_set(struct kvm_vm *vm,
1409 uint32_t vcpuid, struct kvm_sregs *sregs)
1410 {
1411 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1412 int ret;
1413
1414 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1415
1416 /* Get the regs. */
1417 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1418 }
1419
1420 /* VCPU Ioctl
1421 *
1422 * Input Args:
1423 * vm - Virtual Machine
1424 * vcpuid - VCPU ID
1425 * cmd - Ioctl number
1426 * arg - Argument to pass to the ioctl
1427 *
1428 * Return: None
1429 *
1430 * Issues an arbitrary ioctl on a VCPU fd.
1431 */
vcpu_ioctl(struct kvm_vm * vm,uint32_t vcpuid,unsigned long cmd,void * arg)1432 void vcpu_ioctl(struct kvm_vm *vm,
1433 uint32_t vcpuid, unsigned long cmd, void *arg)
1434 {
1435 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1436 int ret;
1437
1438 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1439
1440 ret = ioctl(vcpu->fd, cmd, arg);
1441 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1442 cmd, ret, errno, strerror(errno));
1443 }
1444
1445 /* VM Ioctl
1446 *
1447 * Input Args:
1448 * vm - Virtual Machine
1449 * cmd - Ioctl number
1450 * arg - Argument to pass to the ioctl
1451 *
1452 * Return: None
1453 *
1454 * Issues an arbitrary ioctl on a VM fd.
1455 */
vm_ioctl(struct kvm_vm * vm,unsigned long cmd,void * arg)1456 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1457 {
1458 int ret;
1459
1460 ret = ioctl(vm->fd, cmd, arg);
1461 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1462 cmd, ret, errno, strerror(errno));
1463 }
1464
1465 /* VM Dump
1466 *
1467 * Input Args:
1468 * vm - Virtual Machine
1469 * indent - Left margin indent amount
1470 *
1471 * Output Args:
1472 * stream - Output FILE stream
1473 *
1474 * Return: None
1475 *
1476 * Dumps the current state of the VM given by vm, to the FILE stream
1477 * given by stream.
1478 */
vm_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1479 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1480 {
1481 struct userspace_mem_region *region;
1482 struct vcpu *vcpu;
1483
1484 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1485 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1486 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1487 fprintf(stream, "%*sMem Regions:\n", indent, "");
1488 for (region = vm->userspace_mem_region_head; region;
1489 region = region->next) {
1490 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1491 "host_virt: %p\n", indent + 2, "",
1492 (uint64_t) region->region.guest_phys_addr,
1493 (uint64_t) region->region.memory_size,
1494 region->host_mem);
1495 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1496 sparsebit_dump(stream, region->unused_phy_pages, 0);
1497 }
1498 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1499 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1500 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1501 vm->pgd_created);
1502 if (vm->pgd_created) {
1503 fprintf(stream, "%*sVirtual Translation Tables:\n",
1504 indent + 2, "");
1505 virt_dump(stream, vm, indent + 4);
1506 }
1507 fprintf(stream, "%*sVCPUs:\n", indent, "");
1508 for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
1509 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1510 }
1511
1512 /* VM VCPU Dump
1513 *
1514 * Input Args:
1515 * vm - Virtual Machine
1516 * vcpuid - VCPU ID
1517 * indent - Left margin indent amount
1518 *
1519 * Output Args:
1520 * stream - Output FILE stream
1521 *
1522 * Return: None
1523 *
1524 * Dumps the current state of the VCPU specified by vcpuid, within the VM
1525 * given by vm, to the FILE stream given by stream.
1526 */
vcpu_dump(FILE * stream,struct kvm_vm * vm,uint32_t vcpuid,uint8_t indent)1527 void vcpu_dump(FILE *stream, struct kvm_vm *vm,
1528 uint32_t vcpuid, uint8_t indent)
1529 {
1530 struct kvm_regs regs;
1531 struct kvm_sregs sregs;
1532
1533 fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
1534
1535 fprintf(stream, "%*sregs:\n", indent + 2, "");
1536 vcpu_regs_get(vm, vcpuid, ®s);
1537 regs_dump(stream, ®s, indent + 4);
1538
1539 fprintf(stream, "%*ssregs:\n", indent + 2, "");
1540 vcpu_sregs_get(vm, vcpuid, &sregs);
1541 sregs_dump(stream, &sregs, indent + 4);
1542 }
1543
1544 /* Known KVM exit reasons */
1545 static struct exit_reason {
1546 unsigned int reason;
1547 const char *name;
1548 } exit_reasons_known[] = {
1549 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1550 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1551 {KVM_EXIT_IO, "IO"},
1552 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1553 {KVM_EXIT_DEBUG, "DEBUG"},
1554 {KVM_EXIT_HLT, "HLT"},
1555 {KVM_EXIT_MMIO, "MMIO"},
1556 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1557 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1558 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1559 {KVM_EXIT_INTR, "INTR"},
1560 {KVM_EXIT_SET_TPR, "SET_TPR"},
1561 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1562 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1563 {KVM_EXIT_S390_RESET, "S390_RESET"},
1564 {KVM_EXIT_DCR, "DCR"},
1565 {KVM_EXIT_NMI, "NMI"},
1566 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1567 {KVM_EXIT_OSI, "OSI"},
1568 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1569 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1570 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1571 #endif
1572 };
1573
1574 /* Exit Reason String
1575 *
1576 * Input Args:
1577 * exit_reason - Exit reason
1578 *
1579 * Output Args: None
1580 *
1581 * Return:
1582 * Constant string pointer describing the exit reason.
1583 *
1584 * Locates and returns a constant string that describes the KVM exit
1585 * reason given by exit_reason. If no such string is found, a constant
1586 * string of "Unknown" is returned.
1587 */
exit_reason_str(unsigned int exit_reason)1588 const char *exit_reason_str(unsigned int exit_reason)
1589 {
1590 unsigned int n1;
1591
1592 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1593 if (exit_reason == exit_reasons_known[n1].reason)
1594 return exit_reasons_known[n1].name;
1595 }
1596
1597 return "Unknown";
1598 }
1599
1600 /* Physical Page Allocate
1601 *
1602 * Input Args:
1603 * vm - Virtual Machine
1604 * paddr_min - Physical address minimum
1605 * memslot - Memory region to allocate page from
1606 *
1607 * Output Args: None
1608 *
1609 * Return:
1610 * Starting physical address
1611 *
1612 * Within the VM specified by vm, locates an available physical page
1613 * at or above paddr_min. If found, the page is marked as in use
1614 * and its address is returned. A TEST_ASSERT failure occurs if no
1615 * page is available at or above paddr_min.
1616 */
vm_phy_page_alloc(struct kvm_vm * vm,vm_paddr_t paddr_min,uint32_t memslot)1617 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
1618 vm_paddr_t paddr_min, uint32_t memslot)
1619 {
1620 struct userspace_mem_region *region;
1621 sparsebit_idx_t pg;
1622
1623 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1624 "not divisible by page size.\n"
1625 " paddr_min: 0x%lx page_size: 0x%x",
1626 paddr_min, vm->page_size);
1627
1628 /* Locate memory region. */
1629 region = memslot2region(vm, memslot);
1630
1631 /* Locate next available physical page at or above paddr_min. */
1632 pg = paddr_min >> vm->page_shift;
1633
1634 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1635 pg = sparsebit_next_set(region->unused_phy_pages, pg);
1636 if (pg == 0) {
1637 fprintf(stderr, "No guest physical page available, "
1638 "paddr_min: 0x%lx page_size: 0x%x memslot: %u",
1639 paddr_min, vm->page_size, memslot);
1640 fputs("---- vm dump ----\n", stderr);
1641 vm_dump(stderr, vm, 2);
1642 abort();
1643 }
1644 }
1645
1646 /* Specify page as in use and return its address. */
1647 sparsebit_clear(region->unused_phy_pages, pg);
1648
1649 return pg * vm->page_size;
1650 }
1651
1652 /* Address Guest Virtual to Host Virtual
1653 *
1654 * Input Args:
1655 * vm - Virtual Machine
1656 * gva - VM virtual address
1657 *
1658 * Output Args: None
1659 *
1660 * Return:
1661 * Equivalent host virtual address
1662 */
addr_gva2hva(struct kvm_vm * vm,vm_vaddr_t gva)1663 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1664 {
1665 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1666 }
1667
guest_args_read(struct kvm_vm * vm,uint32_t vcpu_id,struct guest_args * args)1668 void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
1669 struct guest_args *args)
1670 {
1671 struct kvm_run *run = vcpu_state(vm, vcpu_id);
1672 struct kvm_regs regs;
1673
1674 memset(®s, 0, sizeof(regs));
1675 vcpu_regs_get(vm, vcpu_id, ®s);
1676
1677 args->port = run->io.port;
1678 args->arg0 = regs.rdi;
1679 args->arg1 = regs.rsi;
1680 }
1681