Home
last modified time | relevance | path

Searched full:vm (Results 1 – 25 of 1877) sorted by relevance

12345678910>>...76

/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/aarch64/
Dprocessor.c19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
37 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
38 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
[all …]
/kernel/linux/linux-5.10/drivers/virtio/
Dvirtio_mem.c168 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument
177 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device()
187 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument
191 list_del_rcu(&vm->next); in unregister_virtio_mem_device()
218 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, in virtio_mem_phys_to_sb_id() argument
224 return (addr - mb_addr) / vm->subblock_size; in virtio_mem_phys_to_sb_id()
230 static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id, in virtio_mem_mb_set_state() argument
233 const unsigned long idx = mb_id - vm->first_mb_id; in virtio_mem_mb_set_state()
236 old_state = vm->mb_state[idx]; in virtio_mem_mb_set_state()
237 vm->mb_state[idx] = state; in virtio_mem_mb_set_state()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/include/
Dkvm_util.h22 * structure kvm_util is using to maintain the state of a VM.
65 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
66 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
68 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
74 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
75 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
78 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
81 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
84 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
87 * VM VCPU Dump
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/
Dkvm_util.c66 /* VM Enable Capability
69 * vm - Virtual Machine
76 * Enables a capability (KVM_CAP_*) on the VM.
78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
92 * vm - Virtual Machine
102 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, in vcpu_enable_cap() argument
105 struct vcpu *vcpu = vcpu_find(vm, vcpu_id); in vcpu_enable_cap()
117 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
119 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c18 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) in virt_pgd_alloc() argument
22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc()
23 vm->page_size); in virt_pgd_alloc()
25 if (vm->pgd_created) in virt_pgd_alloc()
28 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_pgd_alloc()
30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc()
32 vm->pgd = paddr; in virt_pgd_alloc()
33 vm->pgd_created = true; in virt_pgd_alloc()
41 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) in virt_alloc_region() argument
45 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region()
[all …]
/kernel/linux/linux-4.19/tools/testing/selftests/kvm/lib/
Dkvm_util.c66 /* VM Enable Capability
69 * vm - Virtual Machine
76 * Enables a capability (KVM_CAP_*) on the VM.
78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
89 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
91 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
92 if (vm->kvm_fd < 0) in vm_open()
95 /* Create VM. */ in vm_open()
96 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL); in vm_open()
[all …]
Dx86.c230 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) in virt_pgd_alloc() argument
234 TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " in virt_pgd_alloc()
235 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pgd_alloc()
238 if (!vm->pgd_created) { in virt_pgd_alloc()
239 vm_paddr_t paddr = vm_phy_page_alloc(vm, in virt_pgd_alloc()
241 vm->pgd = paddr; in virt_pgd_alloc()
242 vm->pgd_created = true; in virt_pgd_alloc()
246 /* VM Virtual Page Map
249 * vm - Virtual Machine
250 * vaddr - VM Virtual Address
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/lima/
Dlima_vm.c18 struct lima_vm *vm; member
35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument
43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range()
47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument
52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page()
57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page()
58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page()
59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page()
60 if (!vm->bts[pbe].cpu) in lima_vm_map_page()
63 pts = vm->bts[pbe].dma; in lima_vm_map_page()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_vm.c46 * for the entire GPU, there are multiple VM page tables active
47 * at any given time. The VM page tables can contain a mix
51 * Each VM has an ID associated with it and there is a page table
88 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
92 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_lock() argument
94 mutex_lock(&vm->eviction_lock); in amdgpu_vm_eviction_lock()
95 vm->saved_flags = memalloc_nofs_save(); in amdgpu_vm_eviction_lock()
98 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_trylock() argument
100 if (mutex_trylock(&vm->eviction_lock)) { in amdgpu_vm_eviction_trylock()
101 vm->saved_flags = memalloc_nofs_save(); in amdgpu_vm_eviction_trylock()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/
Dmock_gtt.c27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument
35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument
41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument
51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument
56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument
60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument
73 ppgtt->vm.gt = &i915->gt; in mock_ppgtt()
74 ppgtt->vm.i915 = i915; in mock_ppgtt()
75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt()
76 ppgtt->vm.file = ERR_PTR(-ENODEV); in mock_ppgtt()
[all …]
Di915_gem_gtt.c160 if (!ppgtt->vm.allocate_va_range) in igt_ppgtt_alloc()
171 limit = min(ppgtt->vm.total, limit); in igt_ppgtt_alloc()
177 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size); in igt_ppgtt_alloc()
181 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
183 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
187 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size); in igt_ppgtt_alloc()
190 ppgtt->vm.clear_range(&ppgtt->vm, 0, size); in igt_ppgtt_alloc()
192 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
199 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last); in igt_ppgtt_alloc()
203 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/radeon/
Dradeon_vm.c37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
42 * Each VM has an ID associated with it and there is a page table
78 * radeon_vm_manager_init - init the vm manager
82 * Init the vm manager (cayman+).
100 * radeon_vm_manager_fini - tear down the vm manager
104 * Tear down the VM manager (cayman+).
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
129 struct radeon_vm *vm, in radeon_vm_get_bos() argument
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/
Dradeon_vm.c37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
42 * Each VM has an ID associated with it and there is a page table
78 * radeon_vm_manager_init - init the vm manager
82 * Init the vm manager (cayman+).
100 * radeon_vm_manager_fini - tear down the vm manager
104 * Tear down the VM manager (cayman+).
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
129 struct radeon_vm *vm, in radeon_vm_get_bos() argument
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/i915/
Di915_gem_gtt.c202 err = vma->vm->allocate_va_range(vma->vm, in ppgtt_bind_vma()
213 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); in ppgtt_bind_vma()
220 vma->vm->clear_range(vma->vm, vma->node.start, vma->size); in ppgtt_unbind_vma()
417 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) in vm_alloc_page() argument
422 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in vm_alloc_page()
423 i915_gem_shrink_all(vm->i915); in vm_alloc_page()
425 page = stash_pop_page(&vm->free_pages); in vm_alloc_page()
429 if (!vm->pt_kmap_wc) in vm_alloc_page()
433 page = stash_pop_page(&vm->i915->mm.wc_stash); in vm_alloc_page()
460 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); in vm_alloc_page()
[all …]
/kernel/linux/linux-5.10/sound/pci/ctxfi/
Dctvmem.c26 * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
36 if (size > vm->size) { in get_vm_block()
42 mutex_lock(&vm->lock); in get_vm_block()
43 list_for_each(pos, &vm->unused) { in get_vm_block()
48 if (pos == &vm->unused) in get_vm_block()
52 /* Move the vm node from unused list to used list directly */ in get_vm_block()
53 list_move(&entry->list, &vm->used); in get_vm_block()
54 vm->size -= size; in get_vm_block()
65 list_add(&block->list, &vm->used); in get_vm_block()
[all …]
/kernel/linux/linux-4.19/sound/pci/ctxfi/
Dctvmem.c29 * Find or create vm block based on requested @size.
33 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument
39 if (size > vm->size) { in get_vm_block()
45 mutex_lock(&vm->lock); in get_vm_block()
46 list_for_each(pos, &vm->unused) { in get_vm_block()
51 if (pos == &vm->unused) in get_vm_block()
55 /* Move the vm node from unused list to used list directly */ in get_vm_block()
56 list_move(&entry->list, &vm->used); in get_vm_block()
57 vm->size -= size; in get_vm_block()
68 list_add(&block->list, &vm->used); in get_vm_block()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/
Dintel_ggtt.c44 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
46 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
48 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
51 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
54 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
60 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
112 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend()
115 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend()
117 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend()
130 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend()
[all …]
Dgen8_ppgtt.c55 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt()
56 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt()
67 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt()
146 gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument
148 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count()
149 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count()
153 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument
155 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index()
157 if (vm->top == 2) in gen8_pdp_for_page_index()
160 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index()
[all …]
Dintel_gtt.c14 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument
16 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma()
17 i915_gem_shrink_all(vm->i915); in alloc_pt_dma()
19 return i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma()
22 int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) in pin_pt_dma() argument
34 void __i915_vm_close(struct i915_address_space *vm) in __i915_vm_close() argument
38 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex)) in __i915_vm_close()
41 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in __i915_vm_close()
54 GEM_BUG_ON(!list_empty(&vm->bound_list)); in __i915_vm_close()
56 mutex_unlock(&vm->mutex); in __i915_vm_close()
[all …]
/kernel/linux/linux-4.19/tools/testing/selftests/kvm/include/
Dkvm_util.h28 * structure kvm_util is using to maintain the state of a VM.
53 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
59 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
62 struct kvm_vm *vm, const vm_vaddr_t gva, size_t len);
64 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
67 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
68 void vcpu_dump(FILE *stream, struct kvm_vm *vm,
71 void vm_create_irqchip(struct kvm_vm *vm);
73 void vm_userspace_mem_region_add(struct kvm_vm *vm,
78 void vcpu_ioctl(struct kvm_vm *vm,
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/amd/amdgpu/
Damdgpu_vm.c43 * for the entire GPU, there are multiple VM page tables active
44 * at any given time. The VM page tables can contain a mix
48 * Each VM has an ID associated with it and there is a page table
71 * Encapsulate some VM table update parameters to reduce
83 * @vm: optional amdgpu_vm we do this update for
85 struct amdgpu_vm *vm; member
107 * DMA addresses to use for mapping, used during VM update by CPU
115 * used during VM update by CPU
137 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
139 * @base: base structure for tracking BO usage in a VM
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/x86_64/
Dprocessor.c212 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) in virt_pgd_alloc() argument
214 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pgd_alloc()
215 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pgd_alloc()
218 if (!vm->pgd_created) { in virt_pgd_alloc()
219 vm_paddr_t paddr = vm_phy_page_alloc(vm, in virt_pgd_alloc()
221 vm->pgd = paddr; in virt_pgd_alloc()
222 vm->pgd_created = true; in virt_pgd_alloc()
226 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_pg_map() argument
232 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_pg_map()
233 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_pg_map()
[all …]
Dvmx.c46 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) in vcpu_enable_evmcs() argument
55 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); in vcpu_enable_evmcs()
69 * vm - The VM to allocate guest-virtual addresses in.
78 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument
80 vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
81 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx()
84 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
85 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
86 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
89 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/i915/selftests/
Dmock_gtt.c27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument
35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument
54 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument
69 ppgtt->vm.i915 = i915; in mock_ppgtt()
70 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt()
71 ppgtt->vm.file = ERR_PTR(-ENODEV); in mock_ppgtt()
73 i915_address_space_init(&ppgtt->vm, i915); in mock_ppgtt()
75 ppgtt->vm.clear_range = nop_clear_range; in mock_ppgtt()
76 ppgtt->vm.insert_page = mock_insert_page; in mock_ppgtt()
77 ppgtt->vm.insert_entries = mock_insert_entries; in mock_ppgtt()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/x86_64/
Dvmx_set_nested_state_test.c30 void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) in test_nested_state() argument
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); in test_nested_state()
35 void test_nested_state_expect_errno(struct kvm_vm *vm, in test_nested_state_expect_errno() argument
41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); in test_nested_state_expect_errno()
48 void test_nested_state_expect_einval(struct kvm_vm *vm, in test_nested_state_expect_einval() argument
51 test_nested_state_expect_errno(vm, state, EINVAL); in test_nested_state_expect_einval()
54 void test_nested_state_expect_efault(struct kvm_vm *vm, in test_nested_state_expect_efault() argument
57 test_nested_state_expect_errno(vm, state, EFAULT); in test_nested_state_expect_efault()
89 void test_vmx_nested_state(struct kvm_vm *vm) in test_vmx_nested_state() argument
99 test_nested_state_expect_einval(vm, state); in test_vmx_nested_state()
[all …]

12345678910>>...76