Home
last modified time | relevance | path

Searched refs:VCPU_ID (Results 1 – 25 of 30) sorted by relevance

12

/kernel/linux/linux-5.10/tools/testing/selftests/kvm/s390x/
Dresets.c16 #define VCPU_ID 3 macro
19 struct kvm_s390_irq buf[VCPU_ID + LOCAL_IRQS];
68 vcpu_get_reg(vm, VCPU_ID, &reg); in test_one_reg()
79 irqs = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_GET_IRQ_STATE, &irq_state); in assert_noirq()
95 vcpu_regs_get(vm, VCPU_ID, &regs); in assert_clear()
98 vcpu_sregs_get(vm, VCPU_ID, &sregs); in assert_clear()
101 vcpu_fpu_get(vm, VCPU_ID, &fpu); in assert_clear()
136 vcpu_sregs_get(vm, VCPU_ID, &sregs); in assert_initial()
162 vcpu_fpu_get(vm, VCPU_ID, &fpu); in assert_initial()
207 vm = vm_create_default(VCPU_ID, 0, guest_code_initial); in test_normal()
[all …]
Dsync_regs_test.c24 #define VCPU_ID 5 macro
94 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
96 run = vcpu_state(vm, VCPU_ID); in main()
100 rv = _vcpu_run(vm, VCPU_ID); in main()
104 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
107 rv = _vcpu_run(vm, VCPU_ID); in main()
111 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
115 rv = _vcpu_run(vm, VCPU_ID); in main()
119 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
122 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dmemop.c16 #define VCPU_ID 1 macro
50 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
51 run = vcpu_state(vm, VCPU_ID); in main()
63 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
66 vcpu_run(vm, VCPU_ID); in main()
81 vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
93 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
103 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
114 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
124 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); in main()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/x86_64/
Devmcs_test.c20 #define VCPU_ID 5 macro
93 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
95 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
104 vcpu_enable_evmcs(vm, VCPU_ID); in main()
106 run = vcpu_state(vm, VCPU_ID); in main()
108 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
111 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
114 _vcpu_run(vm, VCPU_ID); in main()
120 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
138 state = vcpu_save_state(vm, VCPU_ID); in main()
[all …]
Dsync_regs_test.c23 #define VCPU_ID 5 macro
103 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
105 run = vcpu_state(vm, VCPU_ID); in main()
109 rv = _vcpu_run(vm, VCPU_ID); in main()
113 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
116 rv = _vcpu_run(vm, VCPU_ID); in main()
120 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; in main()
124 rv = _vcpu_run(vm, VCPU_ID); in main()
128 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; in main()
131 rv = _vcpu_run(vm, VCPU_ID); in main()
[all …]
Dsmm_test.c22 #define VCPU_ID 1 macro
103 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
105 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
107 run = vcpu_state(vm, VCPU_ID); in main()
118 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); in main()
130 vcpu_args_set(vm, VCPU_ID, 1, nested_gva); in main()
133 _vcpu_run(vm, VCPU_ID); in main()
140 vcpu_regs_get(vm, VCPU_ID, &regs); in main()
152 state = vcpu_save_state(vm, VCPU_ID); in main()
155 vm_vcpu_add(vm, VCPU_ID); in main()
[all …]
Dplatform_info_test.c24 #define VCPU_ID 0 macro
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_enabled()
54 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_enabled()
59 get_ucall(vm, VCPU_ID, &uc); in test_msr_platform_info_enabled()
70 struct kvm_run *run = vcpu_state(vm, VCPU_ID); in test_msr_platform_info_disabled()
73 vcpu_run(vm, VCPU_ID); in test_msr_platform_info_disabled()
95 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
97 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); in main()
98 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, in main()
102 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); in main()
Dvmx_preemption_timer_test.c25 #define VCPU_ID 5 macro
173 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
174 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
175 run = vcpu_state(vm, VCPU_ID); in main()
177 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
181 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
188 _vcpu_run(vm, VCPU_ID); in main()
194 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
236 state = vcpu_save_state(vm, VCPU_ID); in main()
238 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
[all …]
Dstate_test.c23 #define VCPU_ID 5 macro
167 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
168 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
169 run = vcpu_state(vm, VCPU_ID); in main()
171 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
183 vcpu_args_set(vm, VCPU_ID, 1, nested_gva); in main()
186 _vcpu_run(vm, VCPU_ID); in main()
192 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
210 state = vcpu_save_state(vm, VCPU_ID); in main()
212 vcpu_regs_get(vm, VCPU_ID, &regs1); in main()
[all …]
Ddebug_regs.c12 #define VCPU_ID 0 macro
55 #define APPLY_DEBUG() vcpu_set_guest_debug(vm, VCPU_ID, &debug)
58 vcpu_regs_get(vm, VCPU_ID, &regs); \
60 vcpu_regs_set(vm, VCPU_ID, &regs); \
87 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
88 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
89 run = vcpu_state(vm, VCPU_ID); in main()
95 vcpu_run(vm, VCPU_ID); in main()
111 vcpu_run(vm, VCPU_ID); in main()
134 vcpu_run(vm, VCPU_ID); in main()
[all …]
Dcr4_cpuid_sync_test.c24 #define VCPU_ID 1 macro
83 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
84 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
85 run = vcpu_state(vm, VCPU_ID); in main()
88 rc = _vcpu_run(vm, VCPU_ID); in main()
96 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
99 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
101 vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
Dvmx_close_while_nested_test.c21 #define VCPU_ID 5 macro
59 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
60 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
64 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
67 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
70 vcpu_run(vm, VCPU_ID); in main()
79 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dsvm_vmcall_test.c15 #define VCPU_ID 5 macro
46 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
47 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
50 vcpu_args_set(vm, VCPU_ID, 1, svm_gva); in main()
53 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
56 vcpu_run(vm, VCPU_ID); in main()
62 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dset_sregs_test.c25 #define VCPU_ID 5 macro
37 vm = vm_create_default(VCPU_ID, 0, NULL); in main()
39 vcpu_sregs_get(vm, VCPU_ID, &sregs); in main()
41 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
45 rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs); in main()
Dvmx_dirty_log_test.c20 #define VCPU_ID 1 macro
84 vm = vm_create_default(VCPU_ID, 0, l1_guest_code); in main()
85 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
87 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
88 run = vcpu_state(vm, VCPU_ID); in main()
120 _vcpu_run(vm, VCPU_ID); in main()
126 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dvmx_apic_access_test.c31 #define VCPU_ID 0 macro
89 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
90 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
101 vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa); in main()
104 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
107 vcpu_run(vm, VCPU_ID); in main()
125 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dvmx_tsc_adjust_test.c36 #define VCPU_ID 5 macro
134 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); in main()
135 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
139 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); in main()
142 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); in main()
145 vcpu_run(vm, VCPU_ID); in main()
151 switch (get_ucall(vm, VCPU_ID, &uc)) { in main()
Dxss_msr_test.c15 #define VCPU_ID 1 macro
47 vm = vm_create_default(VCPU_ID, 0, 0); in main()
58 xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS); in main()
62 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val); in main()
70 r = _vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, 1ull << i); in main()
Dkvm_pv_test.c174 #define VCPU_ID 0 macro
182 run = vcpu_state(vm, VCPU_ID); in enter_guest()
185 r = _vcpu_run(vm, VCPU_ID); in enter_guest()
191 switch (get_ucall(vm, VCPU_ID, &uc)) { in enter_guest()
218 vm = vm_create_default(VCPU_ID, 0, guest_main); in main()
222 vcpu_enable_cap(vm, VCPU_ID, &cap); in main()
226 vcpu_set_cpuid(vm, VCPU_ID, best); in main()
229 vcpu_init_descriptor_tables(vm, VCPU_ID); in main()
Dtsc_msrs_test.c12 #define VCPU_ID 0 macro
109 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
110 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
117 run_vcpu(vm, VCPU_ID, 1); in main()
123 run_vcpu(vm, VCPU_ID, 2); in main()
135 run_vcpu(vm, VCPU_ID, 3); in main()
151 run_vcpu(vm, VCPU_ID, 4); in main()
160 run_vcpu(vm, VCPU_ID, 5); in main()
Dhyperv_cpuid.c23 #define VCPU_ID 0 macro
118 ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); in test_hv_cpuid_e2big()
140 vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid); in kvm_get_supported_hv_cpuid()
165 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
178 vcpu_enable_evmcs(vm, VCPU_ID); in main()
Duser_msr_test.c22 #define VCPU_ID 5 macro
138 switch (get_ucall(vm, VCPU_ID, &uc)) { in handle_ucall()
207 vm = vm_create_default(VCPU_ID, 0, guest_code); in main()
208 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in main()
209 run = vcpu_state(vm, VCPU_ID); in main()
222 rc = _vcpu_run(vm, VCPU_ID); in main()
Dvmx_set_nested_state_test.c26 #define VCPU_ID 5 macro
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); in test_nested_state()
41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); in test_nested_state_expect_errno()
125 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in test_vmx_nested_state()
141 vcpu_enable_evmcs(vm, VCPU_ID); in test_vmx_nested_state()
237 vcpu_nested_state_get(vm, VCPU_ID, state); in test_vmx_nested_state()
265 vm = vm_create_default(VCPU_ID, 0, 0); in main()
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/
Dset_memory_region_test.c20 #define VCPU_ID 0 macro
67 run = vcpu_state(vm, VCPU_ID); in vcpu_worker()
70 vcpu_run(vm, VCPU_ID); in vcpu_worker()
73 cmd = get_ucall(vm, VCPU_ID, &uc); in vcpu_worker()
122 vm = vm_create_default(VCPU_ID, 0, guest_code); in spawn_vm()
124 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in spawn_vm()
291 run = vcpu_state(vm, VCPU_ID); in test_delete_memory_region()
297 vcpu_regs_get(vm, VCPU_ID, &regs); in test_delete_memory_region()
320 vm_vcpu_add(vm, VCPU_ID); in test_zero_memory_regions()
324 vcpu_run(vm, VCPU_ID); in test_zero_memory_regions()
[all …]
Ddirty_log_test.c22 #define VCPU_ID 1 macro
266 run = vcpu_state(vm, VCPU_ID); in vcpu_worker()
273 ret = _vcpu_run(vm, VCPU_ID); in vcpu_worker()
275 if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) { in vcpu_worker()
399 vm = create_vm(mode, VCPU_ID, in run_test()
447 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); in run_test()

12