Home
last modified time | relevance | path

Searched refs:vm (Results 1 – 25 of 58) sorted by relevance

123

/tools/testing/selftests/kvm/lib/aarch64/
Dprocessor.c19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
37 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
38 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
[all …]
Ducall.c12 static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) in ucall_mmio_init() argument
14 if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) in ucall_mmio_init()
17 virt_pg_map(vm, gpa, gpa, 0); in ucall_mmio_init()
20 sync_global_to_guest(vm, ucall_exit_mmio_addr); in ucall_mmio_init()
25 void ucall_init(struct kvm_vm *vm, void *arg) in ucall_init() argument
33 ret = ucall_mmio_init(vm, gpa); in ucall_init()
54 bits = vm->va_bits - 1; in ucall_init()
55 bits = vm->pa_bits < bits ? vm->pa_bits : bits; in ucall_init()
60 if (ucall_mmio_init(vm, start - offset)) in ucall_init()
62 if (ucall_mmio_init(vm, start + offset)) in ucall_init()
[all …]
/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c18 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) in virt_pgd_alloc() argument
22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc()
23 vm->page_size); in virt_pgd_alloc()
25 if (vm->pgd_created) in virt_pgd_alloc()
28 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_pgd_alloc()
30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc()
32 vm->pgd = paddr; in virt_pgd_alloc()
33 vm->pgd_created = true; in virt_pgd_alloc()
41 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) in virt_alloc_region() argument
45 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region()
[all …]
/tools/testing/selftests/kvm/lib/
Dkvm_util.c78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
102 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, in vcpu_enable_cap() argument
105 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); in vcpu_enable_cap()
117 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
119 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
120 if (vm->kvm_fd < 0) in vm_open()
128 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); in vm_open()
129 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
130 "rc: %i errno: %i", vm->fd, errno); in vm_open()
[all …]
/tools/testing/selftests/kvm/include/
Dkvm_util.h65 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
66 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
68 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
74 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
75 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
78 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
81 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
84 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
102 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
105 void vm_create_irqchip(struct kvm_vm *vm);
[all …]
Dperf_test_util.h52 struct kvm_vm *vm; member
96 struct kvm_vm *vm; in create_vm() local
116 vm = vm_create(mode, pages, O_RDWR); in create_vm()
117 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); in create_vm()
119 vm_create_irqchip(vm); in create_vm()
122 perf_test_args.vm = vm; in create_vm()
123 perf_test_args.guest_page_size = vm_get_page_size(vm); in create_vm()
137 TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), in create_vm()
140 guest_num_pages, vm_get_max_gfn(vm), vcpus, in create_vm()
146 guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * in create_vm()
[all …]
/tools/testing/selftests/kvm/x86_64/
Dplatform_info_test.c38 static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) in set_msr_platform_info_enabled() argument
45 vm_enable_cap(vm, &cap); in set_msr_platform_info_enabled()
48 static void test_msr_platform_info_enabled(struct kvm_vm *vm) in test_msr_platform_info_enabled() argument
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_enabled()
53 set_msr_platform_info_enabled(vm, true); in test_msr_platform_info_enabled()
54 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_enabled()
59 get_ucall(vm, VCPU_ID, &uc); in test_msr_platform_info_enabled()
68 static void test_msr_platform_info_disabled(struct kvm_vm *vm) in test_msr_platform_info_disabled() argument
70 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_disabled()
72 set_msr_platform_info_enabled(vm, false); in test_msr_platform_info_disabled()
[all …]
Dvmx_set_nested_state_test.c30 void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) in test_nested_state() argument
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); in test_nested_state()
35 void test_nested_state_expect_errno(struct kvm_vm *vm, in test_nested_state_expect_errno() argument
41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); in test_nested_state_expect_errno()
48 void test_nested_state_expect_einval(struct kvm_vm *vm, in test_nested_state_expect_einval() argument
51 test_nested_state_expect_errno(vm, state, EINVAL); in test_nested_state_expect_einval()
54 void test_nested_state_expect_efault(struct kvm_vm *vm, in test_nested_state_expect_efault() argument
57 test_nested_state_expect_errno(vm, state, EFAULT); in test_nested_state_expect_efault()
89 void test_vmx_nested_state(struct kvm_vm *vm) in test_vmx_nested_state() argument
99 test_nested_state_expect_einval(vm, state); in test_vmx_nested_state()
[all …]
Dsmm_test.c97 struct kvm_vm *vm; in main() local
103 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
105 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
107 run = vcpu_state(vm, VCPU_ID); in main()
109 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, in main()
111 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) in main()
114 memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); in main()
115 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, in main()
118 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); in main()
122 vcpu_alloc_svm(vm, &nested_gva); in main()
[all …]
Devmcs_test.c86 struct kvm_vm *vm; in main() local
93 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
95 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
104 vcpu_enable_evmcs(vm, VCPU_ID); in main()
106 run = vcpu_state(vm, VCPU_ID); in main()
108 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
110 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
111 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
114 _vcpu_run(vm, VCPU_ID); in main()
120 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
[all …]
Dvmx_dirty_log_test.c76 struct kvm_vm *vm; in main() local
84 vm = vm_create_default(VCPU_ID, 0, l1_guest_code); in main()
85 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
86 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
87 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
88 run = vcpu_state(vm, VCPU_ID); in main()
91 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in main()
101 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0); in main()
110 prepare_eptp(vmx, vm, 0); in main()
111 nested_map_memslot(vmx, vm, 0, 0); in main()
[all …]
Dsync_regs_test.c82 struct kvm_vm *vm; in main() local
103 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
105 run = vcpu_state(vm, VCPU_ID); in main()
109 rv = _vcpu_run(vm, VCPU_ID); in main()
113 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
116 rv = _vcpu_run(vm, VCPU_ID); in main()
120 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
124 rv = _vcpu_run(vm, VCPU_ID); in main()
128 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
131 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dtsc_msrs_test.c19 #define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, 0, x))
78 static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage) in run_vcpu() argument
82 vcpu_args_set(vm, vcpuid, 1, vcpuid); in run_vcpu()
84 vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL); in run_vcpu()
86 switch (get_ucall(vm, vcpuid, &uc)) { in run_vcpu()
100 exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason)); in run_vcpu()
106 struct kvm_vm *vm; in main() local
109 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
110 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
117 run_vcpu(vm, VCPU_ID, 1); in main()
[all …]
Dsvm_vmcall_test.c17 static struct kvm_vm *vm; variable
46 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
47 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
49 vcpu_alloc_svm(vm, &svm_gva); in main()
50 vcpu_args_set(vm, VCPU_ID, 1, svm_gva); in main()
53 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
56 vcpu_run(vm, VCPU_ID); in main()
62 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
75 kvm_vm_free(vm); in main()
Dvmx_preemption_timer_test.c160 struct kvm_vm *vm; in main() local
173 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
174 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
175 run = vcpu_state(vm, VCPU_ID); in main()
177 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
180 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
181 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
188 _vcpu_run(vm, VCPU_ID); in main()
194 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
236 state = vcpu_save_state(vm, VCPU_ID); in main()
[all …]
Dstate_test.c160 struct kvm_vm *vm; in main() local
167 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
168 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
169 run = vcpu_state(vm, VCPU_ID); in main()
171 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
175 vcpu_alloc_svm(vm, &nested_gva); in main()
177 vcpu_alloc_vmx(vm, &nested_gva); in main()
183 vcpu_args_set(vm, VCPU_ID, 1, nested_gva); in main()
186 _vcpu_run(vm, VCPU_ID); in main()
192 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
[all …]
Dvmx_close_while_nested_test.c28 static struct kvm_vm *vm; variable
59 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
60 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
63 vcpu_alloc_vmx(vm, &vmx_pages_gva); in main()
64 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
67 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
70 vcpu_run(vm, VCPU_ID); in main()
79 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
/tools/testing/selftests/kvm/
Dset_memory_region_test.c57 struct kvm_vm *vm = data; in vcpu_worker() local
67 run = vcpu_state(vm, VCPU_ID); in vcpu_worker()
70 vcpu_run(vm, VCPU_ID); in vcpu_worker()
73 cmd = get_ucall(vm, VCPU_ID, &uc); in vcpu_worker()
118 struct kvm_vm *vm; in spawn_vm() local
122 vm = vm_create_default(VCPU_ID, 0, guest_code); in spawn_vm()
124 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in spawn_vm()
126 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, in spawn_vm()
134 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); in spawn_vm()
137 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0); in spawn_vm()
[all …]
Ddirty_log_test.c154 static void clear_log_create_vm_done(struct kvm_vm *vm) in clear_log_create_vm_done() argument
165 vm_enable_cap(vm, &cap); in clear_log_create_vm_done()
168 static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot, in dirty_log_collect_dirty_pages() argument
171 kvm_vm_get_dirty_log(vm, slot, bitmap); in dirty_log_collect_dirty_pages()
174 static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot, in clear_log_collect_dirty_pages() argument
177 kvm_vm_get_dirty_log(vm, slot, bitmap); in clear_log_collect_dirty_pages()
178 kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages); in clear_log_collect_dirty_pages()
186 void (*create_vm_done)(struct kvm_vm *vm);
188 void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
232 static void log_mode_create_vm_done(struct kvm_vm *vm) in log_mode_create_vm_done() argument
[all …]
Dsteal_time.c63 static void steal_time_init(struct kvm_vm *vm) in steal_time_init() argument
76 vcpu_set_cpuid(vm, i, kvm_get_supported_cpuid()); in steal_time_init()
80 sync_global_to_guest(vm, st_gva[i]); in steal_time_init()
82 ret = _vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK); in steal_time_init()
85 vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); in steal_time_init()
89 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid) in steal_time_dump() argument
91 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]); in steal_time_dump()
172 static void steal_time_init(struct kvm_vm *vm) in steal_time_init() argument
180 ret = _vcpu_ioctl(vm, 0, KVM_HAS_DEVICE_ATTR, &dev); in steal_time_init()
189 vcpu_ioctl(vm, i, KVM_HAS_DEVICE_ATTR, &dev); in steal_time_init()
[all …]
/tools/testing/selftests/kvm/lib/x86_64/
Dprocessor.c212 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) in virt_pgd_alloc() argument
214 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pgd_alloc()
215 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pgd_alloc()
218 if (!vm->pgd_created) { in virt_pgd_alloc()
219 vm_paddr_t paddr = vm_phy_page_alloc(vm, in virt_pgd_alloc()
221 vm->pgd = paddr; in virt_pgd_alloc()
222 vm->pgd_created = true; in virt_pgd_alloc()
226 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_pg_map() argument
232 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pg_map()
233 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pg_map()
[all …]
Dvmx.c46 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) in vcpu_enable_evmcs() argument
55 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); in vcpu_enable_evmcs()
78 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument
80 vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
81 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx()
84 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
85 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
86 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
89 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
90 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx()
[all …]
/tools/testing/selftests/kvm/s390x/
Dresets.c21 struct kvm_vm *vm; variable
68 vcpu_get_reg(vm, VCPU_ID, &reg); in test_one_reg()
79 irqs = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_GET_IRQ_STATE, &irq_state); in assert_noirq()
95 vcpu_regs_get(vm, VCPU_ID, &regs); in assert_clear()
98 vcpu_sregs_get(vm, VCPU_ID, &sregs); in assert_clear()
101 vcpu_fpu_get(vm, VCPU_ID, &fpu); in assert_clear()
136 vcpu_sregs_get(vm, VCPU_ID, &sregs); in assert_initial()
162 vcpu_fpu_get(vm, VCPU_ID, &fpu); in assert_initial()
199 irqs = _vcpu_ioctl(vm, cpu_id, KVM_S390_SET_IRQ_STATE, &irq_state); in inject_irq()
207 vm = vm_create_default(VCPU_ID, 0, guest_code_initial); in test_normal()
[all …]
Dsync_regs_test.c78 struct kvm_vm *vm; in main() local
94 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
96 run = vcpu_state(vm, VCPU_ID); in main()
100 rv = _vcpu_run(vm, VCPU_ID); in main()
104 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
107 rv = _vcpu_run(vm, VCPU_ID); in main()
111 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
115 rv = _vcpu_run(vm, VCPU_ID); in main()
119 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
122 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dmemop.c34 struct kvm_vm *vm; in main() local
50 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
51 run = vcpu_state(vm, VCPU_ID); in main()
57 ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1); in main()
63 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
66 vcpu_run(vm, VCPU_ID); in main()
81 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
93 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
103 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
114 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
[all …]

123