Home
last modified time | relevance | path

Searched refs:vm (Results 1 – 25 of 39) sorted by relevance

12

/tools/testing/selftests/kvm/lib/aarch64/
Dprocessor.c19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
37 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
38 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
[all …]
Ducall.c12 static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) in ucall_mmio_init() argument
14 if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) in ucall_mmio_init()
17 virt_pg_map(vm, gpa, gpa, 0); in ucall_mmio_init()
20 sync_global_to_guest(vm, ucall_exit_mmio_addr); in ucall_mmio_init()
25 void ucall_init(struct kvm_vm *vm, void *arg) in ucall_init() argument
33 ret = ucall_mmio_init(vm, gpa); in ucall_init()
54 bits = vm->va_bits - 1; in ucall_init()
55 bits = vm->pa_bits < bits ? vm->pa_bits : bits; in ucall_init()
60 if (ucall_mmio_init(vm, start - offset)) in ucall_init()
62 if (ucall_mmio_init(vm, start + offset)) in ucall_init()
[all …]
/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c18 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) in virt_pgd_alloc() argument
22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc()
23 vm->page_size); in virt_pgd_alloc()
25 if (vm->pgd_created) in virt_pgd_alloc()
28 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_pgd_alloc()
30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc()
32 vm->pgd = paddr; in virt_pgd_alloc()
33 vm->pgd_created = true; in virt_pgd_alloc()
41 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) in virt_alloc_region() argument
45 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region()
[all …]
Ducall.c9 void ucall_init(struct kvm_vm *vm, void *arg) in ucall_init() argument
13 void ucall_uninit(struct kvm_vm *vm) in ucall_uninit() argument
36 uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) in get_ucall() argument
38 struct kvm_run *run = vcpu_state(vm, vcpu_id); in get_ucall()
47 memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]), in get_ucall()
50 vcpu_run_complete_io(vm, vcpu_id); in get_ucall()
/tools/testing/selftests/kvm/lib/
Dkvm_util.c77 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
81 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
88 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
90 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
91 if (vm->kvm_fd < 0) in vm_open()
99 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); in vm_open()
100 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
101 "rc: %i errno: %i", vm->fd, errno); in vm_open()
137 struct kvm_vm *vm; in _vm_create() local
141 vm = calloc(1, sizeof(*vm)); in _vm_create()
[all …]
/tools/testing/selftests/kvm/include/
Dkvm_util.h69 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
76 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
77 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
80 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
83 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
86 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
87 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
90 void vm_create_irqchip(struct kvm_vm *vm);
92 void vm_userspace_mem_region_add(struct kvm_vm *vm,
97 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
[all …]
/tools/testing/selftests/kvm/x86_64/
Dplatform_info_test.c38 static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) in set_msr_platform_info_enabled() argument
45 vm_enable_cap(vm, &cap); in set_msr_platform_info_enabled()
48 static void test_msr_platform_info_enabled(struct kvm_vm *vm) in test_msr_platform_info_enabled() argument
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_enabled()
53 set_msr_platform_info_enabled(vm, true); in test_msr_platform_info_enabled()
54 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_enabled()
59 get_ucall(vm, VCPU_ID, &uc); in test_msr_platform_info_enabled()
69 static void test_msr_platform_info_disabled(struct kvm_vm *vm) in test_msr_platform_info_disabled() argument
71 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_disabled()
73 set_msr_platform_info_enabled(vm, false); in test_msr_platform_info_disabled()
[all …]
Dvmx_set_nested_state_test.c30 void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) in test_nested_state() argument
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); in test_nested_state()
35 void test_nested_state_expect_errno(struct kvm_vm *vm, in test_nested_state_expect_errno() argument
41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); in test_nested_state_expect_errno()
48 void test_nested_state_expect_einval(struct kvm_vm *vm, in test_nested_state_expect_einval() argument
51 test_nested_state_expect_errno(vm, state, EINVAL); in test_nested_state_expect_einval()
54 void test_nested_state_expect_efault(struct kvm_vm *vm, in test_nested_state_expect_efault() argument
57 test_nested_state_expect_errno(vm, state, EFAULT); in test_nested_state_expect_efault()
91 void test_vmx_nested_state(struct kvm_vm *vm) in test_vmx_nested_state() argument
101 test_nested_state_expect_einval(vm, state); in test_vmx_nested_state()
[all …]
Dsmm_test.c93 struct kvm_vm *vm; in main() local
99 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
101 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
103 run = vcpu_state(vm, VCPU_ID); in main()
105 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, in main()
107 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) in main()
110 memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); in main()
111 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, in main()
114 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); in main()
117 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
[all …]
Devmcs_test.c77 struct kvm_vm *vm; in main() local
84 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
86 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
94 vcpu_enable_evmcs(vm, VCPU_ID); in main()
96 run = vcpu_state(vm, VCPU_ID); in main()
98 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
100 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
101 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
104 _vcpu_run(vm, VCPU_ID); in main()
110 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
[all …]
Dvmx_dirty_log_test.c76 struct kvm_vm *vm; in main() local
84 vm = vm_create_default(VCPU_ID, 0, l1_guest_code); in main()
85 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
86 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
87 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
88 run = vcpu_state(vm, VCPU_ID); in main()
91 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in main()
101 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, in main()
111 prepare_eptp(vmx, vm, 0); in main()
112 nested_map_memslot(vmx, vm, 0, 0); in main()
[all …]
Dstate_test.c125 struct kvm_vm *vm; in main() local
132 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
133 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
134 run = vcpu_state(vm, VCPU_ID); in main()
136 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
139 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
140 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
143 vcpu_args_set(vm, VCPU_ID, 1, 0); in main()
147 _vcpu_run(vm, VCPU_ID); in main()
153 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
[all …]
Dsync_regs_test.c82 struct kvm_vm *vm; in main() local
103 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
105 run = vcpu_state(vm, VCPU_ID); in main()
109 rv = _vcpu_run(vm, VCPU_ID); in main()
113 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
116 rv = _vcpu_run(vm, VCPU_ID); in main()
120 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
124 rv = _vcpu_run(vm, VCPU_ID); in main()
128 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
131 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dvmx_close_while_nested_test.c28 static struct kvm_vm *vm; variable
59 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
60 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
63 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
64 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
67 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
70 vcpu_run(vm, VCPU_ID); in main()
79 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dset_sregs_test.c30 struct kvm_vm *vm; in main() local
37 vm = vm_create_default(VCPU_ID, 0, NULL); in main()
39 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
41 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
45 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
49 kvm_vm_free(vm); in main()
Dhyperv_cpuid.c100 void test_hv_cpuid_e2big(struct kvm_vm *vm) in test_hv_cpuid_e2big() argument
105 ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); in test_hv_cpuid_e2big()
113 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm) in kvm_get_supported_hv_cpuid() argument
127 vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid); in kvm_get_supported_hv_cpuid()
135 struct kvm_vm *vm; in main() local
150 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
152 test_hv_cpuid_e2big(vm); in main()
154 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); in main()
168 vcpu_enable_evmcs(vm, VCPU_ID); in main()
170 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); in main()
[all …]
Dcr4_cpuid_sync_test.c67 struct kvm_vm *vm; in main() local
83 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
84 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
85 run = vcpu_state(vm, VCPU_ID); in main()
88 rc = _vcpu_run(vm, VCPU_ID); in main()
96 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
99 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
101 vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
113 kvm_vm_free(vm); in main()
Dvmx_tsc_adjust_test.c61 static struct kvm_vm *vm; variable
134 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
135 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
138 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
139 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
142 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
145 vcpu_run(vm, VCPU_ID); in main()
151 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
165 kvm_vm_free(vm); in main()
/tools/testing/selftests/kvm/lib/x86_64/
Dvmx.c43 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) in vcpu_enable_evmcs() argument
52 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); in vcpu_enable_evmcs()
75 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument
77 vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx()
81 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
86 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
87 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx()
[all …]
Dprocessor.c229 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) in virt_pgd_alloc() argument
231 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pgd_alloc()
232 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pgd_alloc()
235 if (!vm->pgd_created) { in virt_pgd_alloc()
236 vm_paddr_t paddr = vm_phy_page_alloc(vm, in virt_pgd_alloc()
238 vm->pgd = paddr; in virt_pgd_alloc()
239 vm->pgd_created = true; in virt_pgd_alloc()
258 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_pg_map() argument
264 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pg_map()
265 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pg_map()
[all …]
Ducall.c11 void ucall_init(struct kvm_vm *vm, void *arg) in ucall_init() argument
15 void ucall_uninit(struct kvm_vm *vm) in ucall_uninit() argument
38 uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) in get_ucall() argument
40 struct kvm_run *run = vcpu_state(vm, vcpu_id); in get_ucall()
46 vcpu_regs_get(vm, vcpu_id, &regs); in get_ucall()
47 memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), in get_ucall()
50 vcpu_run_complete_io(vm, vcpu_id); in get_ucall()
/tools/testing/selftests/kvm/
Ddirty_log_test.c151 struct kvm_vm *vm = data; in vcpu_worker() local
156 run = vcpu_state(vm, VCPU_ID); in vcpu_worker()
158 guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array); in vcpu_worker()
163 ret = _vcpu_run(vm, VCPU_ID); in vcpu_worker()
165 if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) { in vcpu_worker()
252 struct kvm_vm *vm; in create_vm() local
255 vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); in create_vm()
256 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); in create_vm()
258 vm_create_irqchip(vm); in create_vm()
260 vm_vcpu_add_default(vm, vcpuid, guest_code); in create_vm()
[all …]
/tools/testing/selftests/kvm/s390x/
Dsync_regs_test.c68 struct kvm_vm *vm; in main() local
84 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
86 run = vcpu_state(vm, VCPU_ID); in main()
90 rv = _vcpu_run(vm, VCPU_ID); in main()
94 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
97 rv = _vcpu_run(vm, VCPU_ID); in main()
101 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
105 rv = _vcpu_run(vm, VCPU_ID); in main()
109 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
112 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dmemop.c34 struct kvm_vm *vm; in main() local
50 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
51 run = vcpu_state(vm, VCPU_ID); in main()
57 ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1); in main()
63 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
66 vcpu_run(vm, VCPU_ID); in main()
81 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
93 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
103 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
114 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
[all …]
/tools/testing/selftests/kvm/include/aarch64/
Dprocessor.h39 static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr) in get_reg() argument
44 vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg); in get_reg()
47 static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val) in set_reg() argument
52 vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg); in set_reg()
55 void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init);
56 void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,

12