Lines Matching refs:vm
78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
102 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, in vcpu_enable_cap() argument
105 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); in vcpu_enable_cap()
117 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
119 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
120 if (vm->kvm_fd < 0) in vm_open()
128 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); in vm_open()
129 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
130 "rc: %i errno: %i", vm->fd, errno); in vm_open()
185 struct kvm_vm *vm; in vm_create() local
190 vm = calloc(1, sizeof(*vm)); in vm_create()
191 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in vm_create()
193 INIT_LIST_HEAD(&vm->vcpus); in vm_create()
194 INIT_LIST_HEAD(&vm->userspace_mem_regions); in vm_create()
196 vm->mode = mode; in vm_create()
197 vm->type = 0; in vm_create()
199 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; in vm_create()
200 vm->va_bits = vm_guest_mode_params[mode].va_bits; in vm_create()
201 vm->page_size = vm_guest_mode_params[mode].page_size; in vm_create()
202 vm->page_shift = vm_guest_mode_params[mode].page_shift; in vm_create()
205 switch (vm->mode) { in vm_create()
207 vm->pgtable_levels = 4; in vm_create()
210 vm->pgtable_levels = 3; in vm_create()
213 vm->pgtable_levels = 4; in vm_create()
216 vm->pgtable_levels = 3; in vm_create()
219 vm->pgtable_levels = 4; in vm_create()
222 vm->pgtable_levels = 3; in vm_create()
226 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in vm_create()
232 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in vm_create()
234 vm->va_bits); in vm_create()
236 vm->pa_bits); in vm_create()
237 vm->pgtable_levels = 4; in vm_create()
238 vm->va_bits = 48; in vm_create()
248 if (vm->pa_bits != 40) in vm_create()
249 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in vm_create()
252 vm_open(vm, perm); in vm_create()
255 vm->vpages_valid = sparsebit_alloc(); in vm_create()
256 sparsebit_set_num(vm->vpages_valid, in vm_create()
257 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create()
258 sparsebit_set_num(vm->vpages_valid, in vm_create()
259 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_create()
260 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create()
263 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_create()
266 vm->vpages_mapped = sparsebit_alloc(); in vm_create()
268 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in vm_create()
271 return vm; in vm_create()
308 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
313 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
318 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log() argument
326 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
351 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
355 list_for_each_entry(region, &vm->userspace_mem_regions, list) { in userspace_mem_region_find()
383 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, in kvm_userspace_memory_region_find() argument
388 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
411 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_find() argument
415 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_find()
467 static void __vm_mem_region_delete(struct kvm_vm *vm, in __vm_mem_region_delete() argument
475 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
533 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument
548 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva()
555 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
556 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
557 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
558 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
560 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
561 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
602 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
609 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; in vm_userspace_mem_region_add()
612 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_userspace_mem_region_add()
614 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_userspace_mem_region_add()
616 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
619 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
620 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
621 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
625 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
632 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
639 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
644 list_for_each_entry(region, &vm->userspace_mem_regions, list) { in vm_userspace_mem_region_add()
661 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
701 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
704 region->host_mem, npages * vm->page_size, src_type); in vm_userspace_mem_region_add()
710 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
714 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
716 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
725 list_add(®ion->list, &vm->userspace_mem_regions); in vm_userspace_mem_region_add()
744 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
748 list_for_each_entry(region, &vm->userspace_mem_regions, list) { in memslot2region()
756 vm_dump(stderr, vm, 2); in memslot2region()
775 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
780 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
784 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
805 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) in vm_mem_region_move() argument
810 region = memslot2region(vm, slot); in vm_mem_region_move()
814 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
834 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) in vm_mem_region_delete() argument
836 __vm_mem_region_delete(vm, memslot2region(vm, slot)); in vm_mem_region_delete()
884 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) in vm_vcpu_add() argument
889 vcpu = vcpu_find(vm, vcpuid); in vm_vcpu_add()
901 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); in vm_vcpu_add()
914 list_add(&vcpu->list, &vm->vcpus); in vm_vcpu_add()
937 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
940 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
943 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
944 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
948 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
950 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
959 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
962 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
971 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
974 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
987 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
993 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1000 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1024 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, in vm_vaddr_alloc() argument
1027 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
1029 virt_pgd_alloc(vm, pgd_memslot); in vm_vaddr_alloc()
1035 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in vm_vaddr_alloc()
1039 pages--, vaddr += vm->page_size) { in vm_vaddr_alloc()
1042 paddr = vm_phy_page_alloc(vm, in vm_vaddr_alloc()
1043 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); in vm_vaddr_alloc()
1045 virt_pg_map(vm, vaddr, paddr, pgd_memslot); in vm_vaddr_alloc()
1047 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
1048 vaddr >> vm->page_shift); in vm_vaddr_alloc()
1071 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
1074 size_t page_size = vm->page_size; in virt_map()
1081 virt_pg_map(vm, vaddr, paddr, pgd_memslot); in virt_map()
1104 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1108 list_for_each_entry(region, &vm->userspace_mem_regions, list) { in addr_gpa2hva()
1137 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1141 list_for_each_entry(region, &vm->userspace_mem_regions, list) { in addr_hva2gpa()
1166 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1170 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); in vm_create_irqchip()
1174 vm->has_irqchip = true; in vm_create_irqchip()
1192 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_state() argument
1194 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_state()
1214 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run() argument
1216 int ret = _vcpu_run(vm, vcpuid); in vcpu_run()
1221 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in _vcpu_run() argument
1223 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_run()
1231 assert_on_unhandled_exception(vm, vcpuid); in _vcpu_run()
1236 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run_complete_io() argument
1238 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_run_complete_io()
1252 void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_set_guest_debug() argument
1255 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_guest_debug()
1276 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_set_mp_state() argument
1279 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_mp_state()
1305 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_get_reg_list() argument
1310 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n); in vcpu_get_reg_list()
1314 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); in vcpu_get_reg_list()
1333 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) in vcpu_regs_get() argument
1335 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_get()
1360 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) in vcpu_regs_set() argument
1362 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_set()
1373 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_get() argument
1376 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_get()
1386 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_set() argument
1389 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_set()
1401 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_nested_state_get() argument
1404 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_nested_state_get()
1415 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_nested_state_set() argument
1418 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_nested_state_set()
1449 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in vcpu_sregs_get() argument
1451 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_sregs_get()
1476 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in vcpu_sregs_set() argument
1478 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); in vcpu_sregs_set()
1483 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in _vcpu_sregs_set() argument
1485 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_sregs_set()
1492 void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) in vcpu_fpu_get() argument
1496 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); in vcpu_fpu_get()
1501 void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) in vcpu_fpu_set() argument
1505 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); in vcpu_fpu_set()
1510 void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) in vcpu_get_reg() argument
1514 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); in vcpu_get_reg()
1519 void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) in vcpu_set_reg() argument
1523 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); in vcpu_set_reg()
1541 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_ioctl() argument
1546 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); in vcpu_ioctl()
1551 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, in _vcpu_ioctl() argument
1554 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_ioctl()
1576 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in vm_ioctl() argument
1580 ret = ioctl(vm->fd, cmd, arg); in vm_ioctl()
1600 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1605 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1606 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1607 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1609 list_for_each_entry(region, &vm->userspace_mem_regions, list) { in vm_dump()
1619 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1621 vm->pgd_created); in vm_dump()
1622 if (vm->pgd_created) { in vm_dump()
1625 virt_dump(stream, vm, indent + 4); in vm_dump()
1628 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
1629 vcpu_dump(stream, vm, vcpu->id, indent + 2); in vm_dump()
1708 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
1716 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
1719 paddr_min, vm->page_size); in vm_phy_pages_alloc()
1721 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1722 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
1736 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
1738 vm_dump(stderr, vm, 2); in vm_phy_pages_alloc()
1745 return base * vm->page_size; in vm_phy_pages_alloc()
1748 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
1751 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
1766 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
1768 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
1783 bool vm_is_unrestricted_guest(struct kvm_vm *vm) in vm_is_unrestricted_guest() argument
1789 if (vm == NULL) { in vm_is_unrestricted_guest()
1807 unsigned int vm_get_page_size(struct kvm_vm *vm) in vm_get_page_size() argument
1809 return vm->page_size; in vm_get_page_size()
1812 unsigned int vm_get_page_shift(struct kvm_vm *vm) in vm_get_page_shift() argument
1814 return vm->page_shift; in vm_get_page_shift()
1817 unsigned int vm_get_max_gfn(struct kvm_vm *vm) in vm_get_max_gfn() argument
1819 return vm->max_gfn; in vm_get_max_gfn()
1822 int vm_get_fd(struct kvm_vm *vm) in vm_get_fd() argument
1824 return vm->fd; in vm_get_fd()