/tools/testing/selftests/kvm/lib/ |
D | kvm_util_internal.h | 65 vm_vaddr_t gdt; 66 vm_vaddr_t tss; 67 vm_vaddr_t idt; 68 vm_vaddr_t handlers;
|
D | elf.c | 160 vm_vaddr_t seg_vstart = phdr.p_vaddr; in kvm_vm_elf_load() 161 seg_vstart &= ~(vm_vaddr_t)(vm->page_size - 1); in kvm_vm_elf_load() 162 vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; in kvm_vm_elf_load() 166 vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart); in kvm_vm_elf_load()
|
D | kvm_util.c | 689 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() 1168 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() 1169 vm_vaddr_t vaddr_min) in vm_vaddr_unused_gap() 1255 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) in vm_vaddr_alloc() 1267 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in vm_vaddr_alloc() 1270 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; in vm_vaddr_alloc() 1296 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) in vm_vaddr_alloc_pages() 1315 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) in vm_vaddr_alloc_page() 2237 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva()
|
/tools/testing/selftests/kvm/lib/aarch64/ |
D | ucall.c | 10 static vm_vaddr_t *ucall_exit_mmio_addr; 19 ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; in ucall_mmio_init() 88 WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc); in ucall() 101 vm_vaddr_t gva; in get_ucall()
|
D | processor.c | 17 static vm_vaddr_t exception_handlers; 24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() 32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() 43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pmd_index() 54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) in pte_index() 141 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() 406 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vm_init_descriptor_tables()
|
/tools/testing/selftests/kvm/include/ |
D | kvm_util.h | 29 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ typedef 100 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, 145 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 146 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 147 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 152 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 171 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 341 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 346 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
|
/tools/testing/selftests/kvm/x86_64/ |
D | get_cpuid_test.c | 145 struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) in vcpu_alloc_cpuid() 148 vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); in vcpu_alloc_cpuid() 160 vm_vaddr_t cpuid_gva; in main()
|
D | vmx_close_while_nested_test.c | 55 vm_vaddr_t vmx_pages_gva; in main()
|
D | svm_vmcall_test.c | 42 vm_vaddr_t svm_gva; in main()
|
D | hyperv_features.c | 52 static inline u64 hypercall(u64 control, vm_vaddr_t input_address, in hypercall() 53 vm_vaddr_t output_address) in hypercall() 125 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) in guest_hcall() 629 vm_vaddr_t msr_gva, hcall_page, hcall_params; in main()
|
D | svm_int_ctl_test.c | 91 vm_vaddr_t svm_gva; in main()
|
D | vmx_tsc_adjust_test.c | 130 vm_vaddr_t vmx_pages_gva; in main()
|
D | vmx_apic_access_test.c | 82 vm_vaddr_t vmx_pages_gva; in main()
|
D | vmx_dirty_log_test.c | 71 vm_vaddr_t vmx_pages_gva = 0; in main()
|
D | smm_test.c | 135 vm_vaddr_t nested_gva = 0; in main()
|
D | vmx_nested_tsc_scaling_test.c | 154 vm_vaddr_t vmx_pages_gva; in main()
|
D | evmcs_test.c | 143 vm_vaddr_t vmx_pages_gva = 0; in main()
|
D | vmx_preemption_timer_test.c | 157 vm_vaddr_t vmx_pages_gva = 0; in main()
|
D | state_test.c | 157 vm_vaddr_t nested_gva = 0; in main()
|
D | hyperv_clock.c | 213 vm_vaddr_t tsc_page_gva; in main()
|
/tools/testing/selftests/kvm/include/x86_64/ |
D | svm_util.h | 33 struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
|
/tools/testing/selftests/kvm/lib/x86_64/ |
D | svm.c | 31 vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) in vcpu_alloc_svm() 33 vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); in vcpu_alloc_svm()
|
D | ucall.c | 50 memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), in get_ucall()
|
D | processor.c | 20 vm_vaddr_t exception_handlers; 520 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() 619 vm_vaddr_t stack_vaddr; in vm_vcpu_add_default() 1281 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vcpu_init_descriptor_tables() 1287 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); in vm_install_exception_handler() 1289 handlers[vector] = (vm_vaddr_t)handler; in vm_install_exception_handler()
|
/tools/testing/selftests/kvm/lib/s390x/ |
D | processor.c | 89 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa()
|