1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/objtool.h>
4 #include <linux/percpu.h>
5
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
8
9 #include "cpuid.h"
10 #include "hyperv.h"
11 #include "mmu.h"
12 #include "nested.h"
13 #include "pmu.h"
14 #include "sgx.h"
15 #include "trace.h"
16 #include "vmx.h"
17 #include "x86.h"
18
19 static bool __read_mostly enable_shadow_vmcs = 1;
20 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
21
22 static bool __read_mostly nested_early_check = 0;
23 module_param(nested_early_check, bool, S_IRUGO);
24
25 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
26
27 /*
28 * Hyper-V requires all of these, so mark them as supported even though
29 * they are just treated the same as all-context.
30 */
31 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
32 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
33 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
34 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
35 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
36
37 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
38
39 enum {
40 VMX_VMREAD_BITMAP,
41 VMX_VMWRITE_BITMAP,
42 VMX_BITMAP_NR
43 };
44 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
45
46 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
47 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
48
49 struct shadow_vmcs_field {
50 u16 encoding;
51 u16 offset;
52 };
53 static struct shadow_vmcs_field shadow_read_only_fields[] = {
54 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
55 #include "vmcs_shadow_fields.h"
56 };
57 static int max_shadow_read_only_fields =
58 ARRAY_SIZE(shadow_read_only_fields);
59
60 static struct shadow_vmcs_field shadow_read_write_fields[] = {
61 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
62 #include "vmcs_shadow_fields.h"
63 };
64 static int max_shadow_read_write_fields =
65 ARRAY_SIZE(shadow_read_write_fields);
66
init_vmcs_shadow_fields(void)67 static void init_vmcs_shadow_fields(void)
68 {
69 int i, j;
70
71 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
72 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
73
74 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
75 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
76 u16 field = entry.encoding;
77
78 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
79 (i + 1 == max_shadow_read_only_fields ||
80 shadow_read_only_fields[i + 1].encoding != field + 1))
81 pr_err("Missing field from shadow_read_only_field %x\n",
82 field + 1);
83
84 clear_bit(field, vmx_vmread_bitmap);
85 if (field & 1)
86 #ifdef CONFIG_X86_64
87 continue;
88 #else
89 entry.offset += sizeof(u32);
90 #endif
91 shadow_read_only_fields[j++] = entry;
92 }
93 max_shadow_read_only_fields = j;
94
95 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
96 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
97 u16 field = entry.encoding;
98
99 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
100 (i + 1 == max_shadow_read_write_fields ||
101 shadow_read_write_fields[i + 1].encoding != field + 1))
102 pr_err("Missing field from shadow_read_write_field %x\n",
103 field + 1);
104
105 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
106 field <= GUEST_TR_AR_BYTES,
107 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
108
109 /*
110 * PML and the preemption timer can be emulated, but the
111 * processor cannot vmwrite to fields that don't exist
112 * on bare metal.
113 */
114 switch (field) {
115 case GUEST_PML_INDEX:
116 if (!cpu_has_vmx_pml())
117 continue;
118 break;
119 case VMX_PREEMPTION_TIMER_VALUE:
120 if (!cpu_has_vmx_preemption_timer())
121 continue;
122 break;
123 case GUEST_INTR_STATUS:
124 if (!cpu_has_vmx_apicv())
125 continue;
126 break;
127 default:
128 break;
129 }
130
131 clear_bit(field, vmx_vmwrite_bitmap);
132 clear_bit(field, vmx_vmread_bitmap);
133 if (field & 1)
134 #ifdef CONFIG_X86_64
135 continue;
136 #else
137 entry.offset += sizeof(u32);
138 #endif
139 shadow_read_write_fields[j++] = entry;
140 }
141 max_shadow_read_write_fields = j;
142 }
143
144 /*
145 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
146 * set the success or error code of an emulated VMX instruction (as specified
147 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
148 * instruction.
149 */
nested_vmx_succeed(struct kvm_vcpu * vcpu)150 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
151 {
152 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
153 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
154 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
155 return kvm_skip_emulated_instruction(vcpu);
156 }
157
nested_vmx_failInvalid(struct kvm_vcpu * vcpu)158 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
159 {
160 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
161 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
162 X86_EFLAGS_SF | X86_EFLAGS_OF))
163 | X86_EFLAGS_CF);
164 return kvm_skip_emulated_instruction(vcpu);
165 }
166
nested_vmx_failValid(struct kvm_vcpu * vcpu,u32 vm_instruction_error)167 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
168 u32 vm_instruction_error)
169 {
170 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
171 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
172 X86_EFLAGS_SF | X86_EFLAGS_OF))
173 | X86_EFLAGS_ZF);
174 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
175 /*
176 * We don't need to force sync to shadow VMCS because
177 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
178 * fields and thus must be synced.
179 */
180 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
181 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
182
183 return kvm_skip_emulated_instruction(vcpu);
184 }
185
nested_vmx_fail(struct kvm_vcpu * vcpu,u32 vm_instruction_error)186 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
187 {
188 struct vcpu_vmx *vmx = to_vmx(vcpu);
189
190 /*
191 * failValid writes the error number to the current VMCS, which
192 * can't be done if there isn't a current VMCS.
193 */
194 if (vmx->nested.current_vmptr == -1ull &&
195 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
196 return nested_vmx_failInvalid(vcpu);
197
198 return nested_vmx_failValid(vcpu, vm_instruction_error);
199 }
200
nested_vmx_abort(struct kvm_vcpu * vcpu,u32 indicator)201 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
202 {
203 /* TODO: not to reset guest simply here. */
204 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
206 }
207
vmx_control_verify(u32 control,u32 low,u32 high)208 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
209 {
210 return fixed_bits_valid(control, low, high);
211 }
212
vmx_control_msr(u32 low,u32 high)213 static inline u64 vmx_control_msr(u32 low, u32 high)
214 {
215 return low | ((u64)high << 32);
216 }
217
vmx_disable_shadow_vmcs(struct vcpu_vmx * vmx)218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
219 {
220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
221 vmcs_write64(VMCS_LINK_POINTER, -1ull);
222 vmx->nested.need_vmcs12_to_shadow_sync = false;
223 }
224
nested_release_evmcs(struct kvm_vcpu * vcpu)225 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
226 {
227 struct vcpu_vmx *vmx = to_vmx(vcpu);
228
229 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
230 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
231 vmx->nested.hv_evmcs = NULL;
232 }
233
234 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
235 }
236
vmx_sync_vmcs_host_state(struct vcpu_vmx * vmx,struct loaded_vmcs * prev)237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
238 struct loaded_vmcs *prev)
239 {
240 struct vmcs_host_state *dest, *src;
241
242 if (unlikely(!vmx->guest_state_loaded))
243 return;
244
245 src = &prev->host_state;
246 dest = &vmx->loaded_vmcs->host_state;
247
248 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
249 dest->ldt_sel = src->ldt_sel;
250 #ifdef CONFIG_X86_64
251 dest->ds_sel = src->ds_sel;
252 dest->es_sel = src->es_sel;
253 #endif
254 }
255
vmx_switch_vmcs(struct kvm_vcpu * vcpu,struct loaded_vmcs * vmcs)256 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
257 {
258 struct vcpu_vmx *vmx = to_vmx(vcpu);
259 struct loaded_vmcs *prev;
260 int cpu;
261
262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
263 return;
264
265 cpu = get_cpu();
266 prev = vmx->loaded_vmcs;
267 vmx->loaded_vmcs = vmcs;
268 vmx_vcpu_load_vmcs(vcpu, cpu, prev);
269 vmx_sync_vmcs_host_state(vmx, prev);
270 put_cpu();
271
272 vmx_register_cache_reset(vcpu);
273 }
274
275 /*
276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
277 * just stops using VMX.
278 */
free_nested(struct kvm_vcpu * vcpu)279 static void free_nested(struct kvm_vcpu *vcpu)
280 {
281 struct vcpu_vmx *vmx = to_vmx(vcpu);
282
283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
284 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
285
286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
287 return;
288
289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
290
291 vmx->nested.vmxon = false;
292 vmx->nested.smm.vmxon = false;
293 free_vpid(vmx->nested.vpid02);
294 vmx->nested.posted_intr_nv = -1;
295 vmx->nested.current_vmptr = -1ull;
296 if (enable_shadow_vmcs) {
297 vmx_disable_shadow_vmcs(vmx);
298 vmcs_clear(vmx->vmcs01.shadow_vmcs);
299 free_vmcs(vmx->vmcs01.shadow_vmcs);
300 vmx->vmcs01.shadow_vmcs = NULL;
301 }
302 kfree(vmx->nested.cached_vmcs12);
303 vmx->nested.cached_vmcs12 = NULL;
304 kfree(vmx->nested.cached_shadow_vmcs12);
305 vmx->nested.cached_shadow_vmcs12 = NULL;
306 /* Unpin physical memory we referred to in the vmcs02 */
307 if (vmx->nested.apic_access_page) {
308 kvm_release_page_clean(vmx->nested.apic_access_page);
309 vmx->nested.apic_access_page = NULL;
310 }
311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
313 vmx->nested.pi_desc = NULL;
314
315 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
316
317 nested_release_evmcs(vcpu);
318
319 free_loaded_vmcs(&vmx->nested.vmcs02);
320 }
321
322 /*
323 * Ensure that the current vmcs of the logical processor is the
324 * vmcs01 of the vcpu before calling free_nested().
325 */
nested_vmx_free_vcpu(struct kvm_vcpu * vcpu)326 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
327 {
328 vcpu_load(vcpu);
329 vmx_leave_nested(vcpu);
330 vcpu_put(vcpu);
331 }
332
333 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
334
nested_ept_root_matches(hpa_t root_hpa,u64 root_eptp,u64 eptp)335 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
336 {
337 return VALID_PAGE(root_hpa) &&
338 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
339 }
340
nested_ept_invalidate_addr(struct kvm_vcpu * vcpu,gpa_t eptp,gpa_t addr)341 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
342 gpa_t addr)
343 {
344 uint i;
345 struct kvm_mmu_root_info *cached_root;
346
347 WARN_ON_ONCE(!mmu_is_nested(vcpu));
348
349 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
350 cached_root = &vcpu->arch.mmu->prev_roots[i];
351
352 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
353 eptp))
354 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
355 }
356 }
357
nested_ept_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)358 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
359 struct x86_exception *fault)
360 {
361 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
362 struct vcpu_vmx *vmx = to_vmx(vcpu);
363 u32 vm_exit_reason;
364 unsigned long exit_qualification = vcpu->arch.exit_qualification;
365
366 if (vmx->nested.pml_full) {
367 vm_exit_reason = EXIT_REASON_PML_FULL;
368 vmx->nested.pml_full = false;
369 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
370 } else {
371 if (fault->error_code & PFERR_RSVD_MASK)
372 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
373 else
374 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
375
376 /*
377 * Although the caller (kvm_inject_emulated_page_fault) would
378 * have already synced the faulting address in the shadow EPT
379 * tables for the current EPTP12, we also need to sync it for
380 * any other cached EPTP02s based on the same EP4TA, since the
381 * TLB associates mappings to the EP4TA rather than the full EPTP.
382 */
383 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
384 fault->address);
385 }
386
387 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
388 vmcs12->guest_physical_address = fault->address;
389 }
390
nested_ept_new_eptp(struct kvm_vcpu * vcpu)391 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
392 {
393 kvm_init_shadow_ept_mmu(vcpu,
394 to_vmx(vcpu)->nested.msrs.ept_caps &
395 VMX_EPT_EXECUTE_ONLY_BIT,
396 nested_ept_ad_enabled(vcpu),
397 nested_ept_get_eptp(vcpu));
398 }
399
nested_ept_init_mmu_context(struct kvm_vcpu * vcpu)400 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
401 {
402 WARN_ON(mmu_is_nested(vcpu));
403
404 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
405 nested_ept_new_eptp(vcpu);
406 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
407 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
408 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
409
410 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
411 }
412
nested_ept_uninit_mmu_context(struct kvm_vcpu * vcpu)413 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
414 {
415 vcpu->arch.mmu = &vcpu->arch.root_mmu;
416 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
417 }
418
nested_vmx_is_page_fault_vmexit(struct vmcs12 * vmcs12,u16 error_code)419 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
420 u16 error_code)
421 {
422 bool inequality, bit;
423
424 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
425 inequality =
426 (error_code & vmcs12->page_fault_error_code_mask) !=
427 vmcs12->page_fault_error_code_match;
428 return inequality ^ bit;
429 }
430
431
432 /*
433 * KVM wants to inject page-faults which it got to the guest. This function
434 * checks whether in a nested guest, we need to inject them to L1 or L2.
435 */
nested_vmx_check_exception(struct kvm_vcpu * vcpu,unsigned long * exit_qual)436 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
437 {
438 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
439 unsigned int nr = vcpu->arch.exception.nr;
440 bool has_payload = vcpu->arch.exception.has_payload;
441 unsigned long payload = vcpu->arch.exception.payload;
442
443 if (nr == PF_VECTOR) {
444 if (vcpu->arch.exception.nested_apf) {
445 *exit_qual = vcpu->arch.apf.nested_apf_token;
446 return 1;
447 }
448 if (nested_vmx_is_page_fault_vmexit(vmcs12,
449 vcpu->arch.exception.error_code)) {
450 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
451 return 1;
452 }
453 } else if (vmcs12->exception_bitmap & (1u << nr)) {
454 if (nr == DB_VECTOR) {
455 if (!has_payload) {
456 payload = vcpu->arch.dr6;
457 payload &= ~DR6_BT;
458 payload ^= DR6_ACTIVE_LOW;
459 }
460 *exit_qual = payload;
461 } else
462 *exit_qual = 0;
463 return 1;
464 }
465
466 return 0;
467 }
468
469
vmx_inject_page_fault_nested(struct kvm_vcpu * vcpu,struct x86_exception * fault)470 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
471 struct x86_exception *fault)
472 {
473 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
474
475 WARN_ON(!is_guest_mode(vcpu));
476
477 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
478 !to_vmx(vcpu)->nested.nested_run_pending) {
479 vmcs12->vm_exit_intr_error_code = fault->error_code;
480 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
481 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
482 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
483 fault->address);
484 } else {
485 kvm_inject_page_fault(vcpu, fault);
486 }
487 }
488
nested_vmx_check_io_bitmap_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)489 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
490 struct vmcs12 *vmcs12)
491 {
492 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
493 return 0;
494
495 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
496 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
497 return -EINVAL;
498
499 return 0;
500 }
501
nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)502 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
503 struct vmcs12 *vmcs12)
504 {
505 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
506 return 0;
507
508 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
509 return -EINVAL;
510
511 return 0;
512 }
513
nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)514 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
515 struct vmcs12 *vmcs12)
516 {
517 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
518 return 0;
519
520 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
521 return -EINVAL;
522
523 return 0;
524 }
525
526 /*
527 * If a msr is allowed by L0, we should check whether it is allowed by L1.
528 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
529 */
nested_vmx_disable_intercept_for_msr(unsigned long * msr_bitmap_l1,unsigned long * msr_bitmap_nested,u32 msr,int type)530 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
531 unsigned long *msr_bitmap_nested,
532 u32 msr, int type)
533 {
534 int f = sizeof(unsigned long);
535
536 /*
537 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
538 * have the write-low and read-high bitmap offsets the wrong way round.
539 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
540 */
541 if (msr <= 0x1fff) {
542 if (type & MSR_TYPE_R &&
543 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
544 /* read-low */
545 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
546
547 if (type & MSR_TYPE_W &&
548 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
549 /* write-low */
550 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
551
552 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
553 msr &= 0x1fff;
554 if (type & MSR_TYPE_R &&
555 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
556 /* read-high */
557 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
558
559 if (type & MSR_TYPE_W &&
560 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
561 /* write-high */
562 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
563
564 }
565 }
566
enable_x2apic_msr_intercepts(unsigned long * msr_bitmap)567 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
568 {
569 int msr;
570
571 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
572 unsigned word = msr / BITS_PER_LONG;
573
574 msr_bitmap[word] = ~0;
575 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
576 }
577 }
578
579 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \
580 static inline \
581 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
582 unsigned long *msr_bitmap_l1, \
583 unsigned long *msr_bitmap_l0, u32 msr) \
584 { \
585 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
586 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
587 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
588 else \
589 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
590 }
591 BUILD_NVMX_MSR_INTERCEPT_HELPER(read)
BUILD_NVMX_MSR_INTERCEPT_HELPER(write)592 BUILD_NVMX_MSR_INTERCEPT_HELPER(write)
593
594 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
595 unsigned long *msr_bitmap_l1,
596 unsigned long *msr_bitmap_l0,
597 u32 msr, int types)
598 {
599 if (types & MSR_TYPE_R)
600 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
601 msr_bitmap_l0, msr);
602 if (types & MSR_TYPE_W)
603 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
604 msr_bitmap_l0, msr);
605 }
606
607 /*
608 * Merge L0's and L1's MSR bitmap, return false to indicate that
609 * we do not use the hardware.
610 */
nested_vmx_prepare_msr_bitmap(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)611 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
612 struct vmcs12 *vmcs12)
613 {
614 struct vcpu_vmx *vmx = to_vmx(vcpu);
615 int msr;
616 unsigned long *msr_bitmap_l1;
617 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
618 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
619
620 /* Nothing to do if the MSR bitmap is not in use. */
621 if (!cpu_has_vmx_msr_bitmap() ||
622 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
623 return false;
624
625 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
626 return false;
627
628 msr_bitmap_l1 = (unsigned long *)map->hva;
629
630 /*
631 * To keep the control flow simple, pay eight 8-byte writes (sixteen
632 * 4-byte writes on 32-bit systems) up front to enable intercepts for
633 * the x2APIC MSR range and selectively disable them below.
634 */
635 enable_x2apic_msr_intercepts(msr_bitmap_l0);
636
637 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
638 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
639 /*
640 * L0 need not intercept reads for MSRs between 0x800
641 * and 0x8ff, it just lets the processor take the value
642 * from the virtual-APIC page; take those 256 bits
643 * directly from the L1 bitmap.
644 */
645 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
646 unsigned word = msr / BITS_PER_LONG;
647
648 msr_bitmap_l0[word] = msr_bitmap_l1[word];
649 }
650 }
651
652 nested_vmx_disable_intercept_for_msr(
653 msr_bitmap_l1, msr_bitmap_l0,
654 X2APIC_MSR(APIC_TASKPRI),
655 MSR_TYPE_R | MSR_TYPE_W);
656
657 if (nested_cpu_has_vid(vmcs12)) {
658 nested_vmx_disable_intercept_for_msr(
659 msr_bitmap_l1, msr_bitmap_l0,
660 X2APIC_MSR(APIC_EOI),
661 MSR_TYPE_W);
662 nested_vmx_disable_intercept_for_msr(
663 msr_bitmap_l1, msr_bitmap_l0,
664 X2APIC_MSR(APIC_SELF_IPI),
665 MSR_TYPE_W);
666 }
667 }
668
669 /*
670 * Always check vmcs01's bitmap to honor userspace MSR filters and any
671 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
672 */
673 #ifdef CONFIG_X86_64
674 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
675 MSR_FS_BASE, MSR_TYPE_RW);
676
677 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
678 MSR_GS_BASE, MSR_TYPE_RW);
679
680 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
681 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
682 #endif
683 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
684 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
685
686 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
687 MSR_IA32_PRED_CMD, MSR_TYPE_W);
688
689 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
690
691 return true;
692 }
693
nested_cache_shadow_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)694 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
695 struct vmcs12 *vmcs12)
696 {
697 struct kvm_host_map map;
698 struct vmcs12 *shadow;
699
700 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
701 vmcs12->vmcs_link_pointer == -1ull)
702 return;
703
704 shadow = get_shadow_vmcs12(vcpu);
705
706 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
707 return;
708
709 memcpy(shadow, map.hva, VMCS12_SIZE);
710 kvm_vcpu_unmap(vcpu, &map, false);
711 }
712
nested_flush_cached_shadow_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)713 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
714 struct vmcs12 *vmcs12)
715 {
716 struct vcpu_vmx *vmx = to_vmx(vcpu);
717
718 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
719 vmcs12->vmcs_link_pointer == -1ull)
720 return;
721
722 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
723 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
724 }
725
726 /*
727 * In nested virtualization, check if L1 has set
728 * VM_EXIT_ACK_INTR_ON_EXIT
729 */
nested_exit_intr_ack_set(struct kvm_vcpu * vcpu)730 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
731 {
732 return get_vmcs12(vcpu)->vm_exit_controls &
733 VM_EXIT_ACK_INTR_ON_EXIT;
734 }
735
nested_vmx_check_apic_access_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)736 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
737 struct vmcs12 *vmcs12)
738 {
739 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
740 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
741 return -EINVAL;
742 else
743 return 0;
744 }
745
nested_vmx_check_apicv_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)746 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
747 struct vmcs12 *vmcs12)
748 {
749 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
750 !nested_cpu_has_apic_reg_virt(vmcs12) &&
751 !nested_cpu_has_vid(vmcs12) &&
752 !nested_cpu_has_posted_intr(vmcs12))
753 return 0;
754
755 /*
756 * If virtualize x2apic mode is enabled,
757 * virtualize apic access must be disabled.
758 */
759 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
760 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
761 return -EINVAL;
762
763 /*
764 * If virtual interrupt delivery is enabled,
765 * we must exit on external interrupts.
766 */
767 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
768 return -EINVAL;
769
770 /*
771 * bits 15:8 should be zero in posted_intr_nv,
772 * the descriptor address has been already checked
773 * in nested_get_vmcs12_pages.
774 *
775 * bits 5:0 of posted_intr_desc_addr should be zero.
776 */
777 if (nested_cpu_has_posted_intr(vmcs12) &&
778 (CC(!nested_cpu_has_vid(vmcs12)) ||
779 CC(!nested_exit_intr_ack_set(vcpu)) ||
780 CC((vmcs12->posted_intr_nv & 0xff00)) ||
781 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
782 return -EINVAL;
783
784 /* tpr shadow is needed by all apicv features. */
785 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
786 return -EINVAL;
787
788 return 0;
789 }
790
nested_vmx_check_msr_switch(struct kvm_vcpu * vcpu,u32 count,u64 addr)791 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
792 u32 count, u64 addr)
793 {
794 if (count == 0)
795 return 0;
796
797 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
798 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
799 return -EINVAL;
800
801 return 0;
802 }
803
nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)804 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
805 struct vmcs12 *vmcs12)
806 {
807 if (CC(nested_vmx_check_msr_switch(vcpu,
808 vmcs12->vm_exit_msr_load_count,
809 vmcs12->vm_exit_msr_load_addr)) ||
810 CC(nested_vmx_check_msr_switch(vcpu,
811 vmcs12->vm_exit_msr_store_count,
812 vmcs12->vm_exit_msr_store_addr)))
813 return -EINVAL;
814
815 return 0;
816 }
817
nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)818 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
819 struct vmcs12 *vmcs12)
820 {
821 if (CC(nested_vmx_check_msr_switch(vcpu,
822 vmcs12->vm_entry_msr_load_count,
823 vmcs12->vm_entry_msr_load_addr)))
824 return -EINVAL;
825
826 return 0;
827 }
828
nested_vmx_check_pml_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)829 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
830 struct vmcs12 *vmcs12)
831 {
832 if (!nested_cpu_has_pml(vmcs12))
833 return 0;
834
835 if (CC(!nested_cpu_has_ept(vmcs12)) ||
836 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
837 return -EINVAL;
838
839 return 0;
840 }
841
nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)842 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
843 struct vmcs12 *vmcs12)
844 {
845 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
846 !nested_cpu_has_ept(vmcs12)))
847 return -EINVAL;
848 return 0;
849 }
850
nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)851 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
852 struct vmcs12 *vmcs12)
853 {
854 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
855 !nested_cpu_has_ept(vmcs12)))
856 return -EINVAL;
857 return 0;
858 }
859
nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)860 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
861 struct vmcs12 *vmcs12)
862 {
863 if (!nested_cpu_has_shadow_vmcs(vmcs12))
864 return 0;
865
866 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
867 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
868 return -EINVAL;
869
870 return 0;
871 }
872
nested_vmx_msr_check_common(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)873 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
874 struct vmx_msr_entry *e)
875 {
876 /* x2APIC MSR accesses are not allowed */
877 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
878 return -EINVAL;
879 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
880 CC(e->index == MSR_IA32_UCODE_REV))
881 return -EINVAL;
882 if (CC(e->reserved != 0))
883 return -EINVAL;
884 return 0;
885 }
886
nested_vmx_load_msr_check(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)887 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
888 struct vmx_msr_entry *e)
889 {
890 if (CC(e->index == MSR_FS_BASE) ||
891 CC(e->index == MSR_GS_BASE) ||
892 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
893 nested_vmx_msr_check_common(vcpu, e))
894 return -EINVAL;
895 return 0;
896 }
897
nested_vmx_store_msr_check(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)898 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
899 struct vmx_msr_entry *e)
900 {
901 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
902 nested_vmx_msr_check_common(vcpu, e))
903 return -EINVAL;
904 return 0;
905 }
906
nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu * vcpu)907 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
908 {
909 struct vcpu_vmx *vmx = to_vmx(vcpu);
910 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
911 vmx->nested.msrs.misc_high);
912
913 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
914 }
915
916 /*
917 * Load guest's/host's msr at nested entry/exit.
918 * return 0 for success, entry index for failure.
919 *
920 * One of the failure modes for MSR load/store is when a list exceeds the
921 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
922 * as possible, process all valid entries before failing rather than precheck
923 * for a capacity violation.
924 */
nested_vmx_load_msr(struct kvm_vcpu * vcpu,u64 gpa,u32 count)925 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
926 {
927 u32 i;
928 struct vmx_msr_entry e;
929 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
930
931 for (i = 0; i < count; i++) {
932 if (unlikely(i >= max_msr_list_size))
933 goto fail;
934
935 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
936 &e, sizeof(e))) {
937 pr_debug_ratelimited(
938 "%s cannot read MSR entry (%u, 0x%08llx)\n",
939 __func__, i, gpa + i * sizeof(e));
940 goto fail;
941 }
942 if (nested_vmx_load_msr_check(vcpu, &e)) {
943 pr_debug_ratelimited(
944 "%s check failed (%u, 0x%x, 0x%x)\n",
945 __func__, i, e.index, e.reserved);
946 goto fail;
947 }
948 if (kvm_set_msr(vcpu, e.index, e.value)) {
949 pr_debug_ratelimited(
950 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
951 __func__, i, e.index, e.value);
952 goto fail;
953 }
954 }
955 return 0;
956 fail:
957 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
958 return i + 1;
959 }
960
nested_vmx_get_vmexit_msr_value(struct kvm_vcpu * vcpu,u32 msr_index,u64 * data)961 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
962 u32 msr_index,
963 u64 *data)
964 {
965 struct vcpu_vmx *vmx = to_vmx(vcpu);
966
967 /*
968 * If the L0 hypervisor stored a more accurate value for the TSC that
969 * does not include the time taken for emulation of the L2->L1
970 * VM-exit in L0, use the more accurate value.
971 */
972 if (msr_index == MSR_IA32_TSC) {
973 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
974 MSR_IA32_TSC);
975
976 if (i >= 0) {
977 u64 val = vmx->msr_autostore.guest.val[i].value;
978
979 *data = kvm_read_l1_tsc(vcpu, val);
980 return true;
981 }
982 }
983
984 if (kvm_get_msr(vcpu, msr_index, data)) {
985 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
986 msr_index);
987 return false;
988 }
989 return true;
990 }
991
read_and_check_msr_entry(struct kvm_vcpu * vcpu,u64 gpa,int i,struct vmx_msr_entry * e)992 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
993 struct vmx_msr_entry *e)
994 {
995 if (kvm_vcpu_read_guest(vcpu,
996 gpa + i * sizeof(*e),
997 e, 2 * sizeof(u32))) {
998 pr_debug_ratelimited(
999 "%s cannot read MSR entry (%u, 0x%08llx)\n",
1000 __func__, i, gpa + i * sizeof(*e));
1001 return false;
1002 }
1003 if (nested_vmx_store_msr_check(vcpu, e)) {
1004 pr_debug_ratelimited(
1005 "%s check failed (%u, 0x%x, 0x%x)\n",
1006 __func__, i, e->index, e->reserved);
1007 return false;
1008 }
1009 return true;
1010 }
1011
nested_vmx_store_msr(struct kvm_vcpu * vcpu,u64 gpa,u32 count)1012 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
1013 {
1014 u64 data;
1015 u32 i;
1016 struct vmx_msr_entry e;
1017 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
1018
1019 for (i = 0; i < count; i++) {
1020 if (unlikely(i >= max_msr_list_size))
1021 return -EINVAL;
1022
1023 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1024 return -EINVAL;
1025
1026 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
1027 return -EINVAL;
1028
1029 if (kvm_vcpu_write_guest(vcpu,
1030 gpa + i * sizeof(e) +
1031 offsetof(struct vmx_msr_entry, value),
1032 &data, sizeof(data))) {
1033 pr_debug_ratelimited(
1034 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1035 __func__, i, e.index, data);
1036 return -EINVAL;
1037 }
1038 }
1039 return 0;
1040 }
1041
nested_msr_store_list_has_msr(struct kvm_vcpu * vcpu,u32 msr_index)1042 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1043 {
1044 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1045 u32 count = vmcs12->vm_exit_msr_store_count;
1046 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1047 struct vmx_msr_entry e;
1048 u32 i;
1049
1050 for (i = 0; i < count; i++) {
1051 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1052 return false;
1053
1054 if (e.index == msr_index)
1055 return true;
1056 }
1057 return false;
1058 }
1059
prepare_vmx_msr_autostore_list(struct kvm_vcpu * vcpu,u32 msr_index)1060 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1061 u32 msr_index)
1062 {
1063 struct vcpu_vmx *vmx = to_vmx(vcpu);
1064 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1065 bool in_vmcs12_store_list;
1066 int msr_autostore_slot;
1067 bool in_autostore_list;
1068 int last;
1069
1070 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1071 in_autostore_list = msr_autostore_slot >= 0;
1072 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1073
1074 if (in_vmcs12_store_list && !in_autostore_list) {
1075 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
1076 /*
1077 * Emulated VMEntry does not fail here. Instead a less
1078 * accurate value will be returned by
1079 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1080 * instead of reading the value from the vmcs02 VMExit
1081 * MSR-store area.
1082 */
1083 pr_warn_ratelimited(
1084 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1085 msr_index);
1086 return;
1087 }
1088 last = autostore->nr++;
1089 autostore->val[last].index = msr_index;
1090 } else if (!in_vmcs12_store_list && in_autostore_list) {
1091 last = --autostore->nr;
1092 autostore->val[msr_autostore_slot] = autostore->val[last];
1093 }
1094 }
1095
1096 /*
1097 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1098 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1099 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1100 * @entry_failure_code.
1101 */
nested_vmx_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_ept,bool reload_pdptrs,enum vm_entry_failure_code * entry_failure_code)1102 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
1103 bool nested_ept, bool reload_pdptrs,
1104 enum vm_entry_failure_code *entry_failure_code)
1105 {
1106 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
1107 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1108 return -EINVAL;
1109 }
1110
1111 /*
1112 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1113 * must not be dereferenced.
1114 */
1115 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
1116 CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
1117 *entry_failure_code = ENTRY_FAIL_PDPTE;
1118 return -EINVAL;
1119 }
1120
1121 if (!nested_ept)
1122 kvm_mmu_new_pgd(vcpu, cr3);
1123
1124 vcpu->arch.cr3 = cr3;
1125 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
1126
1127 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
1128 kvm_init_mmu(vcpu);
1129
1130 return 0;
1131 }
1132
1133 /*
1134 * Returns if KVM is able to config CPU to tag TLB entries
1135 * populated by L2 differently than TLB entries populated
1136 * by L1.
1137 *
1138 * If L0 uses EPT, L1 and L2 run with different EPTP because
1139 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1140 * are tagged with different EPTP.
1141 *
1142 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1143 * with different VPID (L1 entries are tagged with vmx->vpid
1144 * while L2 entries are tagged with vmx->nested.vpid02).
1145 */
nested_has_guest_tlb_tag(struct kvm_vcpu * vcpu)1146 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1147 {
1148 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1149
1150 return enable_ept ||
1151 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1152 }
1153
nested_vmx_transition_tlb_flush(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,bool is_vmenter)1154 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1155 struct vmcs12 *vmcs12,
1156 bool is_vmenter)
1157 {
1158 struct vcpu_vmx *vmx = to_vmx(vcpu);
1159
1160 /*
1161 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1162 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1163 * full TLB flush from the guest's perspective. This is required even
1164 * if VPID is disabled in the host as KVM may need to synchronize the
1165 * MMU in response to the guest TLB flush.
1166 *
1167 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1168 * EPT is a special snowflake, as guest-physical mappings aren't
1169 * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1170 * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1171 * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1172 * those mappings.
1173 */
1174 if (!nested_cpu_has_vpid(vmcs12)) {
1175 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1176 return;
1177 }
1178
1179 /* L2 should never have a VPID if VPID is disabled. */
1180 WARN_ON(!enable_vpid);
1181
1182 /*
1183 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
1184 * emulate a guest TLB flush as KVM does not track vpid12 history nor
1185 * is the VPID incorporated into the MMU context. I.e. KVM must assume
1186 * that the new vpid12 has never been used and thus represents a new
1187 * guest ASID that cannot have entries in the TLB.
1188 */
1189 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1190 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1191 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1192 return;
1193 }
1194
1195 /*
1196 * If VPID is enabled, used by vmc12, and vpid12 is not changing but
1197 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
1198 * KVM was unable to allocate a VPID for L2, flush the current context
1199 * as the effective ASID is common to both L1 and L2.
1200 */
1201 if (!nested_has_guest_tlb_tag(vcpu))
1202 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1203 }
1204
is_bitwise_subset(u64 superset,u64 subset,u64 mask)1205 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1206 {
1207 superset &= mask;
1208 subset &= mask;
1209
1210 return (superset | subset) == superset;
1211 }
1212
vmx_restore_vmx_basic(struct vcpu_vmx * vmx,u64 data)1213 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1214 {
1215 const u64 feature_and_reserved =
1216 /* feature (except bit 48; see below) */
1217 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1218 /* reserved */
1219 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1220 u64 vmx_basic = vmcs_config.nested.basic;
1221
1222 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1223 return -EINVAL;
1224
1225 /*
1226 * KVM does not emulate a version of VMX that constrains physical
1227 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1228 */
1229 if (data & BIT_ULL(48))
1230 return -EINVAL;
1231
1232 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1233 vmx_basic_vmcs_revision_id(data))
1234 return -EINVAL;
1235
1236 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1237 return -EINVAL;
1238
1239 vmx->nested.msrs.basic = data;
1240 return 0;
1241 }
1242
vmx_get_control_msr(struct nested_vmx_msrs * msrs,u32 msr_index,u32 ** low,u32 ** high)1243 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
1244 u32 **low, u32 **high)
1245 {
1246 switch (msr_index) {
1247 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1248 *low = &msrs->pinbased_ctls_low;
1249 *high = &msrs->pinbased_ctls_high;
1250 break;
1251 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1252 *low = &msrs->procbased_ctls_low;
1253 *high = &msrs->procbased_ctls_high;
1254 break;
1255 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1256 *low = &msrs->exit_ctls_low;
1257 *high = &msrs->exit_ctls_high;
1258 break;
1259 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1260 *low = &msrs->entry_ctls_low;
1261 *high = &msrs->entry_ctls_high;
1262 break;
1263 case MSR_IA32_VMX_PROCBASED_CTLS2:
1264 *low = &msrs->secondary_ctls_low;
1265 *high = &msrs->secondary_ctls_high;
1266 break;
1267 default:
1268 BUG();
1269 }
1270 }
1271
1272 static int
vmx_restore_control_msr(struct vcpu_vmx * vmx,u32 msr_index,u64 data)1273 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1274 {
1275 u32 *lowp, *highp;
1276 u64 supported;
1277
1278 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1279
1280 supported = vmx_control_msr(*lowp, *highp);
1281
1282 /* Check must-be-1 bits are still 1. */
1283 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1284 return -EINVAL;
1285
1286 /* Check must-be-0 bits are still 0. */
1287 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1288 return -EINVAL;
1289
1290 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1291 *lowp = data;
1292 *highp = data >> 32;
1293 return 0;
1294 }
1295
vmx_restore_vmx_misc(struct vcpu_vmx * vmx,u64 data)1296 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1297 {
1298 const u64 feature_and_reserved_bits =
1299 /* feature */
1300 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1301 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1302 /* reserved */
1303 GENMASK_ULL(13, 9) | BIT_ULL(31);
1304 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1305 vmcs_config.nested.misc_high);
1306
1307 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1308 return -EINVAL;
1309
1310 if ((vmx->nested.msrs.pinbased_ctls_high &
1311 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1312 vmx_misc_preemption_timer_rate(data) !=
1313 vmx_misc_preemption_timer_rate(vmx_misc))
1314 return -EINVAL;
1315
1316 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1317 return -EINVAL;
1318
1319 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1320 return -EINVAL;
1321
1322 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1323 return -EINVAL;
1324
1325 vmx->nested.msrs.misc_low = data;
1326 vmx->nested.msrs.misc_high = data >> 32;
1327
1328 return 0;
1329 }
1330
vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx * vmx,u64 data)1331 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1332 {
1333 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1334 vmcs_config.nested.vpid_caps);
1335
1336 /* Every bit is either reserved or a feature bit. */
1337 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1338 return -EINVAL;
1339
1340 vmx->nested.msrs.ept_caps = data;
1341 vmx->nested.msrs.vpid_caps = data >> 32;
1342 return 0;
1343 }
1344
vmx_get_fixed0_msr(struct nested_vmx_msrs * msrs,u32 msr_index)1345 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
1346 {
1347 switch (msr_index) {
1348 case MSR_IA32_VMX_CR0_FIXED0:
1349 return &msrs->cr0_fixed0;
1350 case MSR_IA32_VMX_CR4_FIXED0:
1351 return &msrs->cr4_fixed0;
1352 default:
1353 BUG();
1354 }
1355 }
1356
vmx_restore_fixed0_msr(struct vcpu_vmx * vmx,u32 msr_index,u64 data)1357 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1358 {
1359 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1360
1361 /*
1362 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1363 * must be 1 in the restored value.
1364 */
1365 if (!is_bitwise_subset(data, *msr, -1ULL))
1366 return -EINVAL;
1367
1368 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1369 return 0;
1370 }
1371
1372 /*
1373 * Called when userspace is restoring VMX MSRs.
1374 *
1375 * Returns 0 on success, non-0 otherwise.
1376 */
vmx_set_vmx_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 data)1377 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1378 {
1379 struct vcpu_vmx *vmx = to_vmx(vcpu);
1380
1381 /*
1382 * Don't allow changes to the VMX capability MSRs while the vCPU
1383 * is in VMX operation.
1384 */
1385 if (vmx->nested.vmxon)
1386 return -EBUSY;
1387
1388 switch (msr_index) {
1389 case MSR_IA32_VMX_BASIC:
1390 return vmx_restore_vmx_basic(vmx, data);
1391 case MSR_IA32_VMX_PINBASED_CTLS:
1392 case MSR_IA32_VMX_PROCBASED_CTLS:
1393 case MSR_IA32_VMX_EXIT_CTLS:
1394 case MSR_IA32_VMX_ENTRY_CTLS:
1395 /*
1396 * The "non-true" VMX capability MSRs are generated from the
1397 * "true" MSRs, so we do not support restoring them directly.
1398 *
1399 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1400 * should restore the "true" MSRs with the must-be-1 bits
1401 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1402 * DEFAULT SETTINGS".
1403 */
1404 return -EINVAL;
1405 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1406 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1407 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1408 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1409 case MSR_IA32_VMX_PROCBASED_CTLS2:
1410 return vmx_restore_control_msr(vmx, msr_index, data);
1411 case MSR_IA32_VMX_MISC:
1412 return vmx_restore_vmx_misc(vmx, data);
1413 case MSR_IA32_VMX_CR0_FIXED0:
1414 case MSR_IA32_VMX_CR4_FIXED0:
1415 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1416 case MSR_IA32_VMX_CR0_FIXED1:
1417 case MSR_IA32_VMX_CR4_FIXED1:
1418 /*
1419 * These MSRs are generated based on the vCPU's CPUID, so we
1420 * do not support restoring them directly.
1421 */
1422 return -EINVAL;
1423 case MSR_IA32_VMX_EPT_VPID_CAP:
1424 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1425 case MSR_IA32_VMX_VMCS_ENUM:
1426 vmx->nested.msrs.vmcs_enum = data;
1427 return 0;
1428 case MSR_IA32_VMX_VMFUNC:
1429 if (data & ~vmcs_config.nested.vmfunc_controls)
1430 return -EINVAL;
1431 vmx->nested.msrs.vmfunc_controls = data;
1432 return 0;
1433 default:
1434 /*
1435 * The rest of the VMX capability MSRs do not support restore.
1436 */
1437 return -EINVAL;
1438 }
1439 }
1440
1441 /* Returns 0 on success, non-0 otherwise. */
vmx_get_vmx_msr(struct nested_vmx_msrs * msrs,u32 msr_index,u64 * pdata)1442 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1443 {
1444 switch (msr_index) {
1445 case MSR_IA32_VMX_BASIC:
1446 *pdata = msrs->basic;
1447 break;
1448 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1449 case MSR_IA32_VMX_PINBASED_CTLS:
1450 *pdata = vmx_control_msr(
1451 msrs->pinbased_ctls_low,
1452 msrs->pinbased_ctls_high);
1453 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1454 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1455 break;
1456 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1457 case MSR_IA32_VMX_PROCBASED_CTLS:
1458 *pdata = vmx_control_msr(
1459 msrs->procbased_ctls_low,
1460 msrs->procbased_ctls_high);
1461 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1462 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1463 break;
1464 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1465 case MSR_IA32_VMX_EXIT_CTLS:
1466 *pdata = vmx_control_msr(
1467 msrs->exit_ctls_low,
1468 msrs->exit_ctls_high);
1469 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1470 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1471 break;
1472 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1473 case MSR_IA32_VMX_ENTRY_CTLS:
1474 *pdata = vmx_control_msr(
1475 msrs->entry_ctls_low,
1476 msrs->entry_ctls_high);
1477 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1478 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1479 break;
1480 case MSR_IA32_VMX_MISC:
1481 *pdata = vmx_control_msr(
1482 msrs->misc_low,
1483 msrs->misc_high);
1484 break;
1485 case MSR_IA32_VMX_CR0_FIXED0:
1486 *pdata = msrs->cr0_fixed0;
1487 break;
1488 case MSR_IA32_VMX_CR0_FIXED1:
1489 *pdata = msrs->cr0_fixed1;
1490 break;
1491 case MSR_IA32_VMX_CR4_FIXED0:
1492 *pdata = msrs->cr4_fixed0;
1493 break;
1494 case MSR_IA32_VMX_CR4_FIXED1:
1495 *pdata = msrs->cr4_fixed1;
1496 break;
1497 case MSR_IA32_VMX_VMCS_ENUM:
1498 *pdata = msrs->vmcs_enum;
1499 break;
1500 case MSR_IA32_VMX_PROCBASED_CTLS2:
1501 *pdata = vmx_control_msr(
1502 msrs->secondary_ctls_low,
1503 msrs->secondary_ctls_high);
1504 break;
1505 case MSR_IA32_VMX_EPT_VPID_CAP:
1506 *pdata = msrs->ept_caps |
1507 ((u64)msrs->vpid_caps << 32);
1508 break;
1509 case MSR_IA32_VMX_VMFUNC:
1510 *pdata = msrs->vmfunc_controls;
1511 break;
1512 default:
1513 return 1;
1514 }
1515
1516 return 0;
1517 }
1518
1519 /*
1520 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1521 * been modified by the L1 guest. Note, "writable" in this context means
1522 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1523 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1524 * VM-exit information fields (which are actually writable if the vCPU is
1525 * configured to support "VMWRITE to any supported field in the VMCS").
1526 */
copy_shadow_to_vmcs12(struct vcpu_vmx * vmx)1527 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1528 {
1529 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1530 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1531 struct shadow_vmcs_field field;
1532 unsigned long val;
1533 int i;
1534
1535 if (WARN_ON(!shadow_vmcs))
1536 return;
1537
1538 preempt_disable();
1539
1540 vmcs_load(shadow_vmcs);
1541
1542 for (i = 0; i < max_shadow_read_write_fields; i++) {
1543 field = shadow_read_write_fields[i];
1544 val = __vmcs_readl(field.encoding);
1545 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1546 }
1547
1548 vmcs_clear(shadow_vmcs);
1549 vmcs_load(vmx->loaded_vmcs->vmcs);
1550
1551 preempt_enable();
1552 }
1553
copy_vmcs12_to_shadow(struct vcpu_vmx * vmx)1554 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1555 {
1556 const struct shadow_vmcs_field *fields[] = {
1557 shadow_read_write_fields,
1558 shadow_read_only_fields
1559 };
1560 const int max_fields[] = {
1561 max_shadow_read_write_fields,
1562 max_shadow_read_only_fields
1563 };
1564 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1565 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1566 struct shadow_vmcs_field field;
1567 unsigned long val;
1568 int i, q;
1569
1570 if (WARN_ON(!shadow_vmcs))
1571 return;
1572
1573 vmcs_load(shadow_vmcs);
1574
1575 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1576 for (i = 0; i < max_fields[q]; i++) {
1577 field = fields[q][i];
1578 val = vmcs12_read_any(vmcs12, field.encoding,
1579 field.offset);
1580 __vmcs_writel(field.encoding, val);
1581 }
1582 }
1583
1584 vmcs_clear(shadow_vmcs);
1585 vmcs_load(vmx->loaded_vmcs->vmcs);
1586 }
1587
copy_enlightened_to_vmcs12(struct vcpu_vmx * vmx,u32 hv_clean_fields)1588 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
1589 {
1590 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1591 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1592
1593 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1594 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1595 vmcs12->guest_rip = evmcs->guest_rip;
1596
1597 if (unlikely(!(hv_clean_fields &
1598 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1599 vmcs12->guest_rsp = evmcs->guest_rsp;
1600 vmcs12->guest_rflags = evmcs->guest_rflags;
1601 vmcs12->guest_interruptibility_info =
1602 evmcs->guest_interruptibility_info;
1603 }
1604
1605 if (unlikely(!(hv_clean_fields &
1606 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1607 vmcs12->cpu_based_vm_exec_control =
1608 evmcs->cpu_based_vm_exec_control;
1609 }
1610
1611 if (unlikely(!(hv_clean_fields &
1612 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1613 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1614 }
1615
1616 if (unlikely(!(hv_clean_fields &
1617 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1618 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1619 }
1620
1621 if (unlikely(!(hv_clean_fields &
1622 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1623 vmcs12->vm_entry_intr_info_field =
1624 evmcs->vm_entry_intr_info_field;
1625 vmcs12->vm_entry_exception_error_code =
1626 evmcs->vm_entry_exception_error_code;
1627 vmcs12->vm_entry_instruction_len =
1628 evmcs->vm_entry_instruction_len;
1629 }
1630
1631 if (unlikely(!(hv_clean_fields &
1632 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1633 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1634 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1635 vmcs12->host_cr0 = evmcs->host_cr0;
1636 vmcs12->host_cr3 = evmcs->host_cr3;
1637 vmcs12->host_cr4 = evmcs->host_cr4;
1638 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1639 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1640 vmcs12->host_rip = evmcs->host_rip;
1641 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1642 vmcs12->host_es_selector = evmcs->host_es_selector;
1643 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1644 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1645 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1646 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1647 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1648 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1649 }
1650
1651 if (unlikely(!(hv_clean_fields &
1652 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1653 vmcs12->pin_based_vm_exec_control =
1654 evmcs->pin_based_vm_exec_control;
1655 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1656 vmcs12->secondary_vm_exec_control =
1657 evmcs->secondary_vm_exec_control;
1658 }
1659
1660 if (unlikely(!(hv_clean_fields &
1661 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1662 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1663 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1664 }
1665
1666 if (unlikely(!(hv_clean_fields &
1667 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1668 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1669 }
1670
1671 if (unlikely(!(hv_clean_fields &
1672 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1673 vmcs12->guest_es_base = evmcs->guest_es_base;
1674 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1675 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1676 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1677 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1678 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1679 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1680 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1681 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1682 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1683 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1684 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1685 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1686 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1687 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1688 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1689 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1690 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1691 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1692 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1693 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1694 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1695 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1696 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1697 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1698 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1699 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1700 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1701 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1702 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1703 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1704 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1705 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1706 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1707 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1708 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1709 }
1710
1711 if (unlikely(!(hv_clean_fields &
1712 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1713 vmcs12->tsc_offset = evmcs->tsc_offset;
1714 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1715 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1716 }
1717
1718 if (unlikely(!(hv_clean_fields &
1719 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1720 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1721 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1722 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1723 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1724 vmcs12->guest_cr0 = evmcs->guest_cr0;
1725 vmcs12->guest_cr3 = evmcs->guest_cr3;
1726 vmcs12->guest_cr4 = evmcs->guest_cr4;
1727 vmcs12->guest_dr7 = evmcs->guest_dr7;
1728 }
1729
1730 if (unlikely(!(hv_clean_fields &
1731 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1732 vmcs12->host_fs_base = evmcs->host_fs_base;
1733 vmcs12->host_gs_base = evmcs->host_gs_base;
1734 vmcs12->host_tr_base = evmcs->host_tr_base;
1735 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1736 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1737 vmcs12->host_rsp = evmcs->host_rsp;
1738 }
1739
1740 if (unlikely(!(hv_clean_fields &
1741 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1742 vmcs12->ept_pointer = evmcs->ept_pointer;
1743 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1744 }
1745
1746 if (unlikely(!(hv_clean_fields &
1747 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1748 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1749 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1750 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1751 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1752 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1753 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1754 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1755 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1756 vmcs12->guest_pending_dbg_exceptions =
1757 evmcs->guest_pending_dbg_exceptions;
1758 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1759 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1760 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1761 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1762 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1763 }
1764
1765 /*
1766 * Not used?
1767 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1768 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1769 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1770 * vmcs12->page_fault_error_code_mask =
1771 * evmcs->page_fault_error_code_mask;
1772 * vmcs12->page_fault_error_code_match =
1773 * evmcs->page_fault_error_code_match;
1774 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1775 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1776 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1777 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1778 */
1779
1780 /*
1781 * Read only fields:
1782 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1783 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1784 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1785 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1786 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1787 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1788 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1789 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1790 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1791 * vmcs12->exit_qualification = evmcs->exit_qualification;
1792 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1793 *
1794 * Not present in struct vmcs12:
1795 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1796 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1797 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1798 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1799 */
1800
1801 return;
1802 }
1803
copy_vmcs12_to_enlightened(struct vcpu_vmx * vmx)1804 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1805 {
1806 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1807 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1808
1809 /*
1810 * Should not be changed by KVM:
1811 *
1812 * evmcs->host_es_selector = vmcs12->host_es_selector;
1813 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1814 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1815 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1816 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1817 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1818 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1819 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1820 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1821 * evmcs->host_cr0 = vmcs12->host_cr0;
1822 * evmcs->host_cr3 = vmcs12->host_cr3;
1823 * evmcs->host_cr4 = vmcs12->host_cr4;
1824 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1825 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1826 * evmcs->host_rip = vmcs12->host_rip;
1827 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1828 * evmcs->host_fs_base = vmcs12->host_fs_base;
1829 * evmcs->host_gs_base = vmcs12->host_gs_base;
1830 * evmcs->host_tr_base = vmcs12->host_tr_base;
1831 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1832 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1833 * evmcs->host_rsp = vmcs12->host_rsp;
1834 * sync_vmcs02_to_vmcs12() doesn't read these:
1835 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1836 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1837 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1838 * evmcs->ept_pointer = vmcs12->ept_pointer;
1839 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1840 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1841 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1842 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1843 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1844 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1845 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1846 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1847 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1848 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1849 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1850 * evmcs->page_fault_error_code_mask =
1851 * vmcs12->page_fault_error_code_mask;
1852 * evmcs->page_fault_error_code_match =
1853 * vmcs12->page_fault_error_code_match;
1854 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1855 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1856 * evmcs->tsc_offset = vmcs12->tsc_offset;
1857 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1858 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1859 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1860 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1861 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1862 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1863 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1864 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1865 *
1866 * Not present in struct vmcs12:
1867 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1868 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1869 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1870 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1871 */
1872
1873 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1874 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1875 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1876 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1877 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1878 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1879 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1880 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1881
1882 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1883 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1884 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1885 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1886 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1887 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1888 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1889 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1890 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1891 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1892
1893 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1894 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1895 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1896 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1897 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1898 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1899 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1900 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1901
1902 evmcs->guest_es_base = vmcs12->guest_es_base;
1903 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1904 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1905 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1906 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1907 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1908 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1909 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1910 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1911 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1912
1913 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1914 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1915
1916 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1917 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1918 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1919 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1920
1921 evmcs->guest_pending_dbg_exceptions =
1922 vmcs12->guest_pending_dbg_exceptions;
1923 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1924 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1925
1926 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1927 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1928
1929 evmcs->guest_cr0 = vmcs12->guest_cr0;
1930 evmcs->guest_cr3 = vmcs12->guest_cr3;
1931 evmcs->guest_cr4 = vmcs12->guest_cr4;
1932 evmcs->guest_dr7 = vmcs12->guest_dr7;
1933
1934 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1935
1936 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1937 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1938 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1939 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1940 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1941 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1942 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1943 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1944
1945 evmcs->exit_qualification = vmcs12->exit_qualification;
1946
1947 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1948 evmcs->guest_rsp = vmcs12->guest_rsp;
1949 evmcs->guest_rflags = vmcs12->guest_rflags;
1950
1951 evmcs->guest_interruptibility_info =
1952 vmcs12->guest_interruptibility_info;
1953 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1954 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1955 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1956 evmcs->vm_entry_exception_error_code =
1957 vmcs12->vm_entry_exception_error_code;
1958 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1959
1960 evmcs->guest_rip = vmcs12->guest_rip;
1961
1962 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1963
1964 return;
1965 }
1966
1967 /*
1968 * This is an equivalent of the nested hypervisor executing the vmptrld
1969 * instruction.
1970 */
nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu * vcpu,bool from_launch)1971 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
1972 struct kvm_vcpu *vcpu, bool from_launch)
1973 {
1974 struct vcpu_vmx *vmx = to_vmx(vcpu);
1975 bool evmcs_gpa_changed = false;
1976 u64 evmcs_gpa;
1977
1978 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1979 return EVMPTRLD_DISABLED;
1980
1981 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
1982 nested_release_evmcs(vcpu);
1983 return EVMPTRLD_DISABLED;
1984 }
1985
1986 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
1987 vmx->nested.current_vmptr = -1ull;
1988
1989 nested_release_evmcs(vcpu);
1990
1991 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
1992 &vmx->nested.hv_evmcs_map))
1993 return EVMPTRLD_ERROR;
1994
1995 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
1996
1997 /*
1998 * Currently, KVM only supports eVMCS version 1
1999 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2000 * value to first u32 field of eVMCS which should specify eVMCS
2001 * VersionNumber.
2002 *
2003 * Guest should be aware of supported eVMCS versions by host by
2004 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2005 * expected to set this CPUID leaf according to the value
2006 * returned in vmcs_version from nested_enable_evmcs().
2007 *
2008 * However, it turns out that Microsoft Hyper-V fails to comply
2009 * to their own invented interface: When Hyper-V use eVMCS, it
2010 * just sets first u32 field of eVMCS to revision_id specified
2011 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2012 * which is one of the supported versions specified in
2013 * CPUID.0x4000000A.EAX[0:15].
2014 *
2015 * To overcome Hyper-V bug, we accept here either a supported
2016 * eVMCS version or VMCS12 revision_id as valid values for first
2017 * u32 field of eVMCS.
2018 */
2019 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2020 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2021 nested_release_evmcs(vcpu);
2022 return EVMPTRLD_VMFAIL;
2023 }
2024
2025 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2026
2027 evmcs_gpa_changed = true;
2028 /*
2029 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2030 * reloaded from guest's memory (read only fields, fields not
2031 * present in struct hv_enlightened_vmcs, ...). Make sure there
2032 * are no leftovers.
2033 */
2034 if (from_launch) {
2035 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2036 memset(vmcs12, 0, sizeof(*vmcs12));
2037 vmcs12->hdr.revision_id = VMCS12_REVISION;
2038 }
2039
2040 }
2041
2042 /*
2043 * Clean fields data can't be used on VMLAUNCH and when we switch
2044 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2045 */
2046 if (from_launch || evmcs_gpa_changed)
2047 vmx->nested.hv_evmcs->hv_clean_fields &=
2048 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2049
2050 return EVMPTRLD_SUCCEEDED;
2051 }
2052
nested_sync_vmcs12_to_shadow(struct kvm_vcpu * vcpu)2053 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
2054 {
2055 struct vcpu_vmx *vmx = to_vmx(vcpu);
2056
2057 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2058 copy_vmcs12_to_enlightened(vmx);
2059 else
2060 copy_vmcs12_to_shadow(vmx);
2061
2062 vmx->nested.need_vmcs12_to_shadow_sync = false;
2063 }
2064
vmx_preemption_timer_fn(struct hrtimer * timer)2065 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2066 {
2067 struct vcpu_vmx *vmx =
2068 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2069
2070 vmx->nested.preemption_timer_expired = true;
2071 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2072 kvm_vcpu_kick(&vmx->vcpu);
2073
2074 return HRTIMER_NORESTART;
2075 }
2076
vmx_calc_preemption_timer_value(struct kvm_vcpu * vcpu)2077 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
2078 {
2079 struct vcpu_vmx *vmx = to_vmx(vcpu);
2080 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2081
2082 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2083 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2084
2085 if (!vmx->nested.has_preemption_timer_deadline) {
2086 vmx->nested.preemption_timer_deadline =
2087 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2088 vmx->nested.has_preemption_timer_deadline = true;
2089 }
2090 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2091 }
2092
vmx_start_preemption_timer(struct kvm_vcpu * vcpu,u64 preemption_timeout)2093 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
2094 u64 preemption_timeout)
2095 {
2096 struct vcpu_vmx *vmx = to_vmx(vcpu);
2097
2098 /*
2099 * A timer value of zero is architecturally guaranteed to cause
2100 * a VMExit prior to executing any instructions in the guest.
2101 */
2102 if (preemption_timeout == 0) {
2103 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2104 return;
2105 }
2106
2107 if (vcpu->arch.virtual_tsc_khz == 0)
2108 return;
2109
2110 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2111 preemption_timeout *= 1000000;
2112 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2113 hrtimer_start(&vmx->nested.preemption_timer,
2114 ktime_add_ns(ktime_get(), preemption_timeout),
2115 HRTIMER_MODE_ABS_PINNED);
2116 }
2117
nested_vmx_calc_efer(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2118 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2119 {
2120 if (vmx->nested.nested_run_pending &&
2121 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2122 return vmcs12->guest_ia32_efer;
2123 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2124 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2125 else
2126 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2127 }
2128
prepare_vmcs02_constant_state(struct vcpu_vmx * vmx)2129 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2130 {
2131 /*
2132 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2133 * according to L0's settings (vmcs12 is irrelevant here). Host
2134 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2135 * will be set as needed prior to VMLAUNCH/VMRESUME.
2136 */
2137 if (vmx->nested.vmcs02_initialized)
2138 return;
2139 vmx->nested.vmcs02_initialized = true;
2140
2141 /*
2142 * We don't care what the EPTP value is we just need to guarantee
2143 * it's valid so we don't get a false positive when doing early
2144 * consistency checks.
2145 */
2146 if (enable_ept && nested_early_check)
2147 vmcs_write64(EPT_POINTER,
2148 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
2149
2150 /* All VMFUNCs are currently emulated through L0 vmexits. */
2151 if (cpu_has_vmx_vmfunc())
2152 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2153
2154 if (cpu_has_vmx_posted_intr())
2155 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2156
2157 if (cpu_has_vmx_msr_bitmap())
2158 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2159
2160 /*
2161 * PML is emulated for L2, but never enabled in hardware as the MMU
2162 * handles A/D emulation. Disabling PML for L2 also avoids having to
2163 * deal with filtering out L2 GPAs from the buffer.
2164 */
2165 if (enable_pml) {
2166 vmcs_write64(PML_ADDRESS, 0);
2167 vmcs_write16(GUEST_PML_INDEX, -1);
2168 }
2169
2170 if (cpu_has_vmx_encls_vmexit())
2171 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2172
2173 /*
2174 * Set the MSR load/store lists to match L0's settings. Only the
2175 * addresses are constant (for vmcs02), the counts can change based
2176 * on L2's behavior, e.g. switching to/from long mode.
2177 */
2178 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2179 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2180 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2181
2182 vmx_set_constant_host_state(vmx);
2183 }
2184
prepare_vmcs02_early_rare(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2185 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2186 struct vmcs12 *vmcs12)
2187 {
2188 prepare_vmcs02_constant_state(vmx);
2189
2190 vmcs_write64(VMCS_LINK_POINTER, -1ull);
2191
2192 if (enable_vpid) {
2193 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2194 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2195 else
2196 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2197 }
2198 }
2199
prepare_vmcs02_early(struct vcpu_vmx * vmx,struct loaded_vmcs * vmcs01,struct vmcs12 * vmcs12)2200 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2201 struct vmcs12 *vmcs12)
2202 {
2203 u32 exec_control;
2204 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2205
2206 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2207 prepare_vmcs02_early_rare(vmx, vmcs12);
2208
2209 /*
2210 * PIN CONTROLS
2211 */
2212 exec_control = __pin_controls_get(vmcs01);
2213 exec_control |= (vmcs12->pin_based_vm_exec_control &
2214 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2215
2216 /* Posted interrupts setting is only taken from vmcs12. */
2217 vmx->nested.pi_pending = false;
2218 if (nested_cpu_has_posted_intr(vmcs12))
2219 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2220 else
2221 exec_control &= ~PIN_BASED_POSTED_INTR;
2222 pin_controls_set(vmx, exec_control);
2223
2224 /*
2225 * EXEC CONTROLS
2226 */
2227 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
2228 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2229 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2230 exec_control &= ~CPU_BASED_TPR_SHADOW;
2231 exec_control |= vmcs12->cpu_based_vm_exec_control;
2232
2233 vmx->nested.l1_tpr_threshold = -1;
2234 if (exec_control & CPU_BASED_TPR_SHADOW)
2235 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2236 #ifdef CONFIG_X86_64
2237 else
2238 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2239 CPU_BASED_CR8_STORE_EXITING;
2240 #endif
2241
2242 /*
2243 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2244 * for I/O port accesses.
2245 */
2246 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2247 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2248
2249 /*
2250 * This bit will be computed in nested_get_vmcs12_pages, because
2251 * we do not have access to L1's MSR bitmap yet. For now, keep
2252 * the same bit as before, hoping to avoid multiple VMWRITEs that
2253 * only set/clear this bit.
2254 */
2255 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2256 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2257
2258 exec_controls_set(vmx, exec_control);
2259
2260 /*
2261 * SECONDARY EXEC CONTROLS
2262 */
2263 if (cpu_has_secondary_exec_ctrls()) {
2264 exec_control = __secondary_exec_controls_get(vmcs01);
2265
2266 /* Take the following fields only from vmcs12 */
2267 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2268 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2269 SECONDARY_EXEC_ENABLE_INVPCID |
2270 SECONDARY_EXEC_ENABLE_RDTSCP |
2271 SECONDARY_EXEC_XSAVES |
2272 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2273 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2274 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2275 SECONDARY_EXEC_ENABLE_VMFUNC |
2276 SECONDARY_EXEC_DESC);
2277
2278 if (nested_cpu_has(vmcs12,
2279 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2280 exec_control |= vmcs12->secondary_vm_exec_control;
2281
2282 /* PML is emulated and never enabled in hardware for L2. */
2283 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
2284
2285 /* VMCS shadowing for L2 is emulated for now */
2286 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2287
2288 /*
2289 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2290 * will not have to rewrite the controls just for this bit.
2291 */
2292 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2293 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2294 exec_control |= SECONDARY_EXEC_DESC;
2295
2296 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2297 vmcs_write16(GUEST_INTR_STATUS,
2298 vmcs12->guest_intr_status);
2299
2300 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2301 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2302
2303 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2304 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2305
2306 secondary_exec_controls_set(vmx, exec_control);
2307 }
2308
2309 /*
2310 * ENTRY CONTROLS
2311 *
2312 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2313 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2314 * on the related bits (if supported by the CPU) in the hope that
2315 * we can avoid VMWrites during vmx_set_efer().
2316 *
2317 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
2318 * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
2319 * do the same for L2.
2320 */
2321 exec_control = __vm_entry_controls_get(vmcs01);
2322 exec_control |= (vmcs12->vm_entry_controls &
2323 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
2324 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
2325 if (cpu_has_load_ia32_efer()) {
2326 if (guest_efer & EFER_LMA)
2327 exec_control |= VM_ENTRY_IA32E_MODE;
2328 if (guest_efer != host_efer)
2329 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2330 }
2331 vm_entry_controls_set(vmx, exec_control);
2332
2333 /*
2334 * EXIT CONTROLS
2335 *
2336 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2337 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2338 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2339 */
2340 exec_control = __vm_exit_controls_get(vmcs01);
2341 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2342 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2343 else
2344 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
2345 vm_exit_controls_set(vmx, exec_control);
2346
2347 /*
2348 * Interrupt/Exception Fields
2349 */
2350 if (vmx->nested.nested_run_pending) {
2351 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2352 vmcs12->vm_entry_intr_info_field);
2353 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2354 vmcs12->vm_entry_exception_error_code);
2355 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2356 vmcs12->vm_entry_instruction_len);
2357 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2358 vmcs12->guest_interruptibility_info);
2359 vmx->loaded_vmcs->nmi_known_unmasked =
2360 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2361 } else {
2362 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2363 }
2364 }
2365
prepare_vmcs02_rare(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2366 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2367 {
2368 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2369
2370 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2371 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2372 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2373 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2374 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2375 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2376 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2377 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2378 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2379 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2380 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2381 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2382 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2383 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2384 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2385 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2386 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2387 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2388 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2389 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2390 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2391 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2392 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2393 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2394 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2395 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2396 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2397 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2398 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2399 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2400 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2401 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2402 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2403 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2404 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2405 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2406 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2407 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2408
2409 vmx->segment_cache.bitmask = 0;
2410 }
2411
2412 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2413 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2414 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2415 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2416 vmcs12->guest_pending_dbg_exceptions);
2417 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2418 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2419
2420 /*
2421 * L1 may access the L2's PDPTR, so save them to construct
2422 * vmcs12
2423 */
2424 if (enable_ept) {
2425 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2426 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2427 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2428 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2429 }
2430
2431 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2432 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2433 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2434 }
2435
2436 if (nested_cpu_has_xsaves(vmcs12))
2437 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2438
2439 /*
2440 * Whether page-faults are trapped is determined by a combination of
2441 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2442 * doesn't care about page faults then we should set all of these to
2443 * L1's desires. However, if L0 does care about (some) page faults, it
2444 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2445 * simply ask to exit on each and every L2 page fault. This is done by
2446 * setting MASK=MATCH=0 and (see below) EB.PF=1.
2447 * Note that below we don't need special code to set EB.PF beyond the
2448 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2449 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2450 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2451 */
2452 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2453 /*
2454 * TODO: if both L0 and L1 need the same MASK and MATCH,
2455 * go ahead and use it?
2456 */
2457 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2458 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2459 } else {
2460 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2461 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2462 }
2463
2464 if (cpu_has_vmx_apicv()) {
2465 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2466 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2467 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2468 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2469 }
2470
2471 /*
2472 * Make sure the msr_autostore list is up to date before we set the
2473 * count in the vmcs02.
2474 */
2475 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2476
2477 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2478 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2479 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2480
2481 set_cr4_guest_host_mask(vmx);
2482 }
2483
2484 /*
2485 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2486 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2487 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2488 * guest in a way that will both be appropriate to L1's requests, and our
2489 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2490 * function also has additional necessary side-effects, like setting various
2491 * vcpu->arch fields.
2492 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2493 * is assigned to entry_failure_code on failure.
2494 */
prepare_vmcs02(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,bool from_vmentry,enum vm_entry_failure_code * entry_failure_code)2495 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2496 bool from_vmentry,
2497 enum vm_entry_failure_code *entry_failure_code)
2498 {
2499 struct vcpu_vmx *vmx = to_vmx(vcpu);
2500 bool load_guest_pdptrs_vmcs12 = false;
2501
2502 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
2503 prepare_vmcs02_rare(vmx, vmcs12);
2504 vmx->nested.dirty_vmcs12 = false;
2505
2506 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
2507 !(vmx->nested.hv_evmcs->hv_clean_fields &
2508 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2509 }
2510
2511 if (vmx->nested.nested_run_pending &&
2512 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2513 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2514 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2515 } else {
2516 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2517 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2518 }
2519 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2520 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2521 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2522 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2523
2524 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2525 * bitwise-or of what L1 wants to trap for L2, and what we want to
2526 * trap. Note that CR0.TS also needs updating - we do this later.
2527 */
2528 vmx_update_exception_bitmap(vcpu);
2529 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2530 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2531
2532 if (vmx->nested.nested_run_pending &&
2533 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2534 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2535 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2536 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2537 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2538 }
2539
2540 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2541 vcpu->arch.l1_tsc_offset,
2542 vmx_get_l2_tsc_offset(vcpu),
2543 vmx_get_l2_tsc_multiplier(vcpu));
2544
2545 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2546 vcpu->arch.l1_tsc_scaling_ratio,
2547 vmx_get_l2_tsc_multiplier(vcpu));
2548
2549 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2550 if (kvm_has_tsc_control)
2551 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2552
2553 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2554
2555 if (nested_cpu_has_ept(vmcs12))
2556 nested_ept_init_mmu_context(vcpu);
2557
2558 /*
2559 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2560 * bits which we consider mandatory enabled.
2561 * The CR0_READ_SHADOW is what L2 should have expected to read given
2562 * the specifications by L1; It's not enough to take
2563 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2564 * have more bits than L1 expected.
2565 */
2566 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2567 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2568
2569 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2570 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2571
2572 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2573 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2574 vmx_set_efer(vcpu, vcpu->arch.efer);
2575
2576 /*
2577 * Guest state is invalid and unrestricted guest is disabled,
2578 * which means L1 attempted VMEntry to L2 with invalid state.
2579 * Fail the VMEntry.
2580 *
2581 * However when force loading the guest state (SMM exit or
2582 * loading nested state after migration, it is possible to
2583 * have invalid guest state now, which will be later fixed by
2584 * restoring L2 register state
2585 */
2586 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
2587 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2588 return -EINVAL;
2589 }
2590
2591 /* Shadow page tables on either EPT or shadow page tables. */
2592 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2593 from_vmentry, entry_failure_code))
2594 return -EINVAL;
2595
2596 /*
2597 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2598 * on nested VM-Exit, which can occur without actually running L2 and
2599 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2600 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2601 * transition to HLT instead of running L2.
2602 */
2603 if (enable_ept)
2604 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2605
2606 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2607 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2608 is_pae_paging(vcpu)) {
2609 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2610 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2611 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2612 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2613 }
2614
2615 if (!enable_ept)
2616 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2617
2618 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2619 intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
2620 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2621 vmcs12->guest_ia32_perf_global_ctrl))) {
2622 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2623 return -EINVAL;
2624 }
2625
2626 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2627 kvm_rip_write(vcpu, vmcs12->guest_rip);
2628
2629 /*
2630 * It was observed that genuine Hyper-V running in L1 doesn't reset
2631 * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2632 * bits when it changes a field in eVMCS. Mark all fields as clean
2633 * here.
2634 */
2635 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2636 vmx->nested.hv_evmcs->hv_clean_fields |=
2637 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2638
2639 return 0;
2640 }
2641
nested_vmx_check_nmi_controls(struct vmcs12 * vmcs12)2642 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2643 {
2644 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2645 nested_cpu_has_virtual_nmis(vmcs12)))
2646 return -EINVAL;
2647
2648 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2649 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2650 return -EINVAL;
2651
2652 return 0;
2653 }
2654
nested_vmx_check_eptp(struct kvm_vcpu * vcpu,u64 new_eptp)2655 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
2656 {
2657 struct vcpu_vmx *vmx = to_vmx(vcpu);
2658
2659 /* Check for memory type validity */
2660 switch (new_eptp & VMX_EPTP_MT_MASK) {
2661 case VMX_EPTP_MT_UC:
2662 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2663 return false;
2664 break;
2665 case VMX_EPTP_MT_WB:
2666 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2667 return false;
2668 break;
2669 default:
2670 return false;
2671 }
2672
2673 /* Page-walk levels validity. */
2674 switch (new_eptp & VMX_EPTP_PWL_MASK) {
2675 case VMX_EPTP_PWL_5:
2676 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2677 return false;
2678 break;
2679 case VMX_EPTP_PWL_4:
2680 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2681 return false;
2682 break;
2683 default:
2684 return false;
2685 }
2686
2687 /* Reserved bits should not be set */
2688 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
2689 return false;
2690
2691 /* AD, if set, should be supported */
2692 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2693 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2694 return false;
2695 }
2696
2697 return true;
2698 }
2699
2700 /*
2701 * Checks related to VM-Execution Control Fields
2702 */
nested_check_vm_execution_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2703 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2704 struct vmcs12 *vmcs12)
2705 {
2706 struct vcpu_vmx *vmx = to_vmx(vcpu);
2707
2708 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2709 vmx->nested.msrs.pinbased_ctls_low,
2710 vmx->nested.msrs.pinbased_ctls_high)) ||
2711 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2712 vmx->nested.msrs.procbased_ctls_low,
2713 vmx->nested.msrs.procbased_ctls_high)))
2714 return -EINVAL;
2715
2716 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2717 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2718 vmx->nested.msrs.secondary_ctls_low,
2719 vmx->nested.msrs.secondary_ctls_high)))
2720 return -EINVAL;
2721
2722 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2723 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2724 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2725 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2726 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2727 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2728 nested_vmx_check_nmi_controls(vmcs12) ||
2729 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2730 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2731 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2733 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2734 return -EINVAL;
2735
2736 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2737 nested_cpu_has_save_preemption_timer(vmcs12))
2738 return -EINVAL;
2739
2740 if (nested_cpu_has_ept(vmcs12) &&
2741 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2742 return -EINVAL;
2743
2744 if (nested_cpu_has_vmfunc(vmcs12)) {
2745 if (CC(vmcs12->vm_function_control &
2746 ~vmx->nested.msrs.vmfunc_controls))
2747 return -EINVAL;
2748
2749 if (nested_cpu_has_eptp_switching(vmcs12)) {
2750 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2751 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2752 return -EINVAL;
2753 }
2754 }
2755
2756 return 0;
2757 }
2758
2759 /*
2760 * Checks related to VM-Exit Control Fields
2761 */
nested_check_vm_exit_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2762 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2763 struct vmcs12 *vmcs12)
2764 {
2765 struct vcpu_vmx *vmx = to_vmx(vcpu);
2766
2767 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2768 vmx->nested.msrs.exit_ctls_low,
2769 vmx->nested.msrs.exit_ctls_high)) ||
2770 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2771 return -EINVAL;
2772
2773 return 0;
2774 }
2775
2776 /*
2777 * Checks related to VM-Entry Control Fields
2778 */
nested_check_vm_entry_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2779 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2780 struct vmcs12 *vmcs12)
2781 {
2782 struct vcpu_vmx *vmx = to_vmx(vcpu);
2783
2784 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2785 vmx->nested.msrs.entry_ctls_low,
2786 vmx->nested.msrs.entry_ctls_high)))
2787 return -EINVAL;
2788
2789 /*
2790 * From the Intel SDM, volume 3:
2791 * Fields relevant to VM-entry event injection must be set properly.
2792 * These fields are the VM-entry interruption-information field, the
2793 * VM-entry exception error code, and the VM-entry instruction length.
2794 */
2795 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2796 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2797 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2798 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2799 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2800 bool should_have_error_code;
2801 bool urg = nested_cpu_has2(vmcs12,
2802 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2803 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2804
2805 /* VM-entry interruption-info field: interruption type */
2806 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2807 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2808 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2809 return -EINVAL;
2810
2811 /* VM-entry interruption-info field: vector */
2812 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2813 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2814 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2815 return -EINVAL;
2816
2817 /* VM-entry interruption-info field: deliver error code */
2818 should_have_error_code =
2819 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2820 x86_exception_has_error_code(vector);
2821 if (CC(has_error_code != should_have_error_code))
2822 return -EINVAL;
2823
2824 /* VM-entry exception error code */
2825 if (CC(has_error_code &&
2826 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2827 return -EINVAL;
2828
2829 /* VM-entry interruption-info field: reserved bits */
2830 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2831 return -EINVAL;
2832
2833 /* VM-entry instruction length */
2834 switch (intr_type) {
2835 case INTR_TYPE_SOFT_EXCEPTION:
2836 case INTR_TYPE_SOFT_INTR:
2837 case INTR_TYPE_PRIV_SW_EXCEPTION:
2838 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2839 CC(vmcs12->vm_entry_instruction_len == 0 &&
2840 CC(!nested_cpu_has_zero_length_injection(vcpu))))
2841 return -EINVAL;
2842 }
2843 }
2844
2845 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2846 return -EINVAL;
2847
2848 return 0;
2849 }
2850
nested_vmx_check_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2851 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2852 struct vmcs12 *vmcs12)
2853 {
2854 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2855 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2856 nested_check_vm_entry_controls(vcpu, vmcs12))
2857 return -EINVAL;
2858
2859 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
2860 return nested_evmcs_check_controls(vmcs12);
2861
2862 return 0;
2863 }
2864
nested_vmx_check_address_space_size(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2865 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
2866 struct vmcs12 *vmcs12)
2867 {
2868 #ifdef CONFIG_X86_64
2869 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2870 !!(vcpu->arch.efer & EFER_LMA)))
2871 return -EINVAL;
2872 #endif
2873 return 0;
2874 }
2875
nested_vmx_check_host_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2876 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2877 struct vmcs12 *vmcs12)
2878 {
2879 bool ia32e;
2880
2881 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2882 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2883 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
2884 return -EINVAL;
2885
2886 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2887 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2888 return -EINVAL;
2889
2890 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2891 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2892 return -EINVAL;
2893
2894 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2895 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2896 vmcs12->host_ia32_perf_global_ctrl)))
2897 return -EINVAL;
2898
2899 #ifdef CONFIG_X86_64
2900 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2901 #else
2902 ia32e = false;
2903 #endif
2904
2905 if (ia32e) {
2906 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2907 return -EINVAL;
2908 } else {
2909 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2910 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2911 CC((vmcs12->host_rip) >> 32))
2912 return -EINVAL;
2913 }
2914
2915 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2916 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2917 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2918 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2919 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2920 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2921 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2922 CC(vmcs12->host_cs_selector == 0) ||
2923 CC(vmcs12->host_tr_selector == 0) ||
2924 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2925 return -EINVAL;
2926
2927 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2928 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2929 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2930 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2931 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2932 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2933 return -EINVAL;
2934
2935 /*
2936 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2937 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2938 * the values of the LMA and LME bits in the field must each be that of
2939 * the host address-space size VM-exit control.
2940 */
2941 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2942 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2943 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2944 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2945 return -EINVAL;
2946 }
2947
2948 return 0;
2949 }
2950
nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2951 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2952 struct vmcs12 *vmcs12)
2953 {
2954 int r = 0;
2955 struct vmcs12 *shadow;
2956 struct kvm_host_map map;
2957
2958 if (vmcs12->vmcs_link_pointer == -1ull)
2959 return 0;
2960
2961 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2962 return -EINVAL;
2963
2964 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
2965 return -EINVAL;
2966
2967 shadow = map.hva;
2968
2969 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
2970 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2971 r = -EINVAL;
2972
2973 kvm_vcpu_unmap(vcpu, &map, false);
2974 return r;
2975 }
2976
2977 /*
2978 * Checks related to Guest Non-register State
2979 */
nested_check_guest_non_reg_state(struct vmcs12 * vmcs12)2980 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2981 {
2982 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2983 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
2984 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
2985 return -EINVAL;
2986
2987 return 0;
2988 }
2989
nested_vmx_check_guest_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,enum vm_entry_failure_code * entry_failure_code)2990 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2991 struct vmcs12 *vmcs12,
2992 enum vm_entry_failure_code *entry_failure_code)
2993 {
2994 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
2995
2996 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2997
2998 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
2999 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
3000 return -EINVAL;
3001
3002 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3003 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3004 return -EINVAL;
3005
3006 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3007 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
3008 return -EINVAL;
3009
3010 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
3011 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
3012 return -EINVAL;
3013 }
3014
3015 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3016 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
3017 vmcs12->guest_ia32_perf_global_ctrl)))
3018 return -EINVAL;
3019
3020 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
3021 return -EINVAL;
3022
3023 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
3024 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
3025 return -EINVAL;
3026
3027 /*
3028 * If the load IA32_EFER VM-entry control is 1, the following checks
3029 * are performed on the field for the IA32_EFER MSR:
3030 * - Bits reserved in the IA32_EFER MSR must be 0.
3031 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3032 * the IA-32e mode guest VM-exit control. It must also be identical
3033 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3034 * CR0.PG) is 1.
3035 */
3036 if (to_vmx(vcpu)->nested.nested_run_pending &&
3037 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3038 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3039 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3040 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3041 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3042 return -EINVAL;
3043 }
3044
3045 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3046 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3047 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3048 return -EINVAL;
3049
3050 if (nested_check_guest_non_reg_state(vmcs12))
3051 return -EINVAL;
3052
3053 return 0;
3054 }
3055
nested_vmx_check_vmentry_hw(struct kvm_vcpu * vcpu)3056 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
3057 {
3058 struct vcpu_vmx *vmx = to_vmx(vcpu);
3059 unsigned long cr3, cr4;
3060 bool vm_fail;
3061
3062 if (!nested_early_check)
3063 return 0;
3064
3065 if (vmx->msr_autoload.host.nr)
3066 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3067 if (vmx->msr_autoload.guest.nr)
3068 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3069
3070 preempt_disable();
3071
3072 vmx_prepare_switch_to_guest(vcpu);
3073
3074 /*
3075 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3076 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3077 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3078 * there is no need to preserve other bits or save/restore the field.
3079 */
3080 vmcs_writel(GUEST_RFLAGS, 0);
3081
3082 cr3 = __get_current_cr3_fast();
3083 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3084 vmcs_writel(HOST_CR3, cr3);
3085 vmx->loaded_vmcs->host_state.cr3 = cr3;
3086 }
3087
3088 cr4 = cr4_read_shadow();
3089 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3090 vmcs_writel(HOST_CR4, cr4);
3091 vmx->loaded_vmcs->host_state.cr4 = cr4;
3092 }
3093
3094 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3095 __vmx_vcpu_run_flags(vmx));
3096
3097 if (vmx->msr_autoload.host.nr)
3098 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3099 if (vmx->msr_autoload.guest.nr)
3100 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3101
3102 if (vm_fail) {
3103 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3104
3105 preempt_enable();
3106
3107 trace_kvm_nested_vmenter_failed(
3108 "early hardware check VM-instruction error: ", error);
3109 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3110 return 1;
3111 }
3112
3113 /*
3114 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3115 */
3116 if (hw_breakpoint_active())
3117 set_debugreg(__this_cpu_read(cpu_dr7), 7);
3118 local_irq_enable();
3119 preempt_enable();
3120
3121 /*
3122 * A non-failing VMEntry means we somehow entered guest mode with
3123 * an illegal RIP, and that's just the tip of the iceberg. There
3124 * is no telling what memory has been modified or what state has
3125 * been exposed to unknown code. Hitting this all but guarantees
3126 * a (very critical) hardware issue.
3127 */
3128 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3129 VMX_EXIT_REASONS_FAILED_VMENTRY));
3130
3131 return 0;
3132 }
3133
nested_get_evmcs_page(struct kvm_vcpu * vcpu)3134 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
3135 {
3136 struct vcpu_vmx *vmx = to_vmx(vcpu);
3137
3138 /*
3139 * hv_evmcs may end up being not mapped after migration (when
3140 * L2 was running), map it here to make sure vmcs12 changes are
3141 * properly reflected.
3142 */
3143 if (vmx->nested.enlightened_vmcs_enabled &&
3144 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
3145 enum nested_evmptrld_status evmptrld_status =
3146 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3147
3148 if (evmptrld_status == EVMPTRLD_VMFAIL ||
3149 evmptrld_status == EVMPTRLD_ERROR)
3150 return false;
3151
3152 /*
3153 * Post migration VMCS12 always provides the most actual
3154 * information, copy it to eVMCS upon entry.
3155 */
3156 vmx->nested.need_vmcs12_to_shadow_sync = true;
3157 }
3158
3159 return true;
3160 }
3161
nested_get_vmcs12_pages(struct kvm_vcpu * vcpu)3162 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3163 {
3164 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3165 struct vcpu_vmx *vmx = to_vmx(vcpu);
3166 struct kvm_host_map *map;
3167 struct page *page;
3168 u64 hpa;
3169
3170 if (!vcpu->arch.pdptrs_from_userspace &&
3171 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3172 /*
3173 * Reload the guest's PDPTRs since after a migration
3174 * the guest CR3 might be restored prior to setting the nested
3175 * state which can lead to a load of wrong PDPTRs.
3176 */
3177 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
3178 return false;
3179 }
3180
3181
3182 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3183 /*
3184 * Translate L1 physical address to host physical
3185 * address for vmcs02. Keep the page pinned, so this
3186 * physical address remains valid. We keep a reference
3187 * to it so we can release it later.
3188 */
3189 if (vmx->nested.apic_access_page) { /* shouldn't happen */
3190 kvm_release_page_clean(vmx->nested.apic_access_page);
3191 vmx->nested.apic_access_page = NULL;
3192 }
3193 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
3194 if (!is_error_page(page)) {
3195 vmx->nested.apic_access_page = page;
3196 hpa = page_to_phys(vmx->nested.apic_access_page);
3197 vmcs_write64(APIC_ACCESS_ADDR, hpa);
3198 } else {
3199 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3200 __func__);
3201 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3202 vcpu->run->internal.suberror =
3203 KVM_INTERNAL_ERROR_EMULATION;
3204 vcpu->run->internal.ndata = 0;
3205 return false;
3206 }
3207 }
3208
3209 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3210 map = &vmx->nested.virtual_apic_map;
3211
3212 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3213 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3214 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3215 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3216 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3217 /*
3218 * The processor will never use the TPR shadow, simply
3219 * clear the bit from the execution control. Such a
3220 * configuration is useless, but it happens in tests.
3221 * For any other configuration, failing the vm entry is
3222 * _not_ what the processor does but it's basically the
3223 * only possibility we have.
3224 */
3225 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3226 } else {
3227 /*
3228 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3229 * force VM-Entry to fail.
3230 */
3231 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
3232 }
3233 }
3234
3235 if (nested_cpu_has_posted_intr(vmcs12)) {
3236 map = &vmx->nested.pi_desc_map;
3237
3238 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3239 vmx->nested.pi_desc =
3240 (struct pi_desc *)(((void *)map->hva) +
3241 offset_in_page(vmcs12->posted_intr_desc_addr));
3242 vmcs_write64(POSTED_INTR_DESC_ADDR,
3243 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3244 } else {
3245 /*
3246 * Defer the KVM_INTERNAL_EXIT until KVM tries to
3247 * access the contents of the VMCS12 posted interrupt
3248 * descriptor. (Note that KVM may do this when it
3249 * should not, per the architectural specification.)
3250 */
3251 vmx->nested.pi_desc = NULL;
3252 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
3253 }
3254 }
3255 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3256 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3257 else
3258 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3259
3260 return true;
3261 }
3262
vmx_get_nested_state_pages(struct kvm_vcpu * vcpu)3263 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
3264 {
3265 if (!nested_get_evmcs_page(vcpu)) {
3266 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3267 __func__);
3268 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3269 vcpu->run->internal.suberror =
3270 KVM_INTERNAL_ERROR_EMULATION;
3271 vcpu->run->internal.ndata = 0;
3272
3273 return false;
3274 }
3275
3276 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3277 return false;
3278
3279 return true;
3280 }
3281
nested_vmx_write_pml_buffer(struct kvm_vcpu * vcpu,gpa_t gpa)3282 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
3283 {
3284 struct vmcs12 *vmcs12;
3285 struct vcpu_vmx *vmx = to_vmx(vcpu);
3286 gpa_t dst;
3287
3288 if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3289 return 0;
3290
3291 if (WARN_ON_ONCE(vmx->nested.pml_full))
3292 return 1;
3293
3294 /*
3295 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3296 * set is already checked as part of A/D emulation.
3297 */
3298 vmcs12 = get_vmcs12(vcpu);
3299 if (!nested_cpu_has_pml(vmcs12))
3300 return 0;
3301
3302 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3303 vmx->nested.pml_full = true;
3304 return 1;
3305 }
3306
3307 gpa &= ~0xFFFull;
3308 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3309
3310 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3311 offset_in_page(dst), sizeof(gpa)))
3312 return 0;
3313
3314 vmcs12->guest_pml_index--;
3315
3316 return 0;
3317 }
3318
3319 /*
3320 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3321 * for running VMX instructions (except VMXON, whose prerequisites are
3322 * slightly different). It also specifies what exception to inject otherwise.
3323 * Note that many of these exceptions have priority over VM exits, so they
3324 * don't have to be checked again here.
3325 */
nested_vmx_check_permission(struct kvm_vcpu * vcpu)3326 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3327 {
3328 if (!to_vmx(vcpu)->nested.vmxon) {
3329 kvm_queue_exception(vcpu, UD_VECTOR);
3330 return 0;
3331 }
3332
3333 if (vmx_get_cpl(vcpu)) {
3334 kvm_inject_gp(vcpu, 0);
3335 return 0;
3336 }
3337
3338 return 1;
3339 }
3340
vmx_has_apicv_interrupt(struct kvm_vcpu * vcpu)3341 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3342 {
3343 u8 rvi = vmx_get_rvi();
3344 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3345
3346 return ((rvi & 0xf0) > (vppr & 0xf0));
3347 }
3348
3349 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3350 struct vmcs12 *vmcs12);
3351
3352 /*
3353 * If from_vmentry is false, this is being called from state restore (either RSM
3354 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3355 *
3356 * Returns:
3357 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3358 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3359 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3360 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3361 */
nested_vmx_enter_non_root_mode(struct kvm_vcpu * vcpu,bool from_vmentry)3362 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3363 bool from_vmentry)
3364 {
3365 struct vcpu_vmx *vmx = to_vmx(vcpu);
3366 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3367 enum vm_entry_failure_code entry_failure_code;
3368 bool evaluate_pending_interrupts;
3369 union vmx_exit_reason exit_reason = {
3370 .basic = EXIT_REASON_INVALID_STATE,
3371 .failed_vmentry = 1,
3372 };
3373 u32 failed_index;
3374
3375 kvm_service_local_tlb_flush_requests(vcpu);
3376
3377 evaluate_pending_interrupts = exec_controls_get(vmx) &
3378 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3379 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3380 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3381
3382 if (!vmx->nested.nested_run_pending ||
3383 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3384 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3385 if (kvm_mpx_supported() &&
3386 (!vmx->nested.nested_run_pending ||
3387 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3388 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3389
3390 /*
3391 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3392 * nested early checks are disabled. In the event of a "late" VM-Fail,
3393 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3394 * software model to the pre-VMEntry host state. When EPT is disabled,
3395 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3396 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3397 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3398 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3399 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3400 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3401 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3402 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3403 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3404 * path would need to manually save/restore vmcs01.GUEST_CR3.
3405 */
3406 if (!enable_ept && !nested_early_check)
3407 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3408
3409 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3410
3411 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3412
3413 if (from_vmentry) {
3414 if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3415 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3416 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3417 }
3418
3419 if (nested_vmx_check_vmentry_hw(vcpu)) {
3420 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3421 return NVMX_VMENTRY_VMFAIL;
3422 }
3423
3424 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3425 &entry_failure_code)) {
3426 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3427 vmcs12->exit_qualification = entry_failure_code;
3428 goto vmentry_fail_vmexit;
3429 }
3430 }
3431
3432 enter_guest_mode(vcpu);
3433
3434 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
3435 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3436 vmcs12->exit_qualification = entry_failure_code;
3437 goto vmentry_fail_vmexit_guest_mode;
3438 }
3439
3440 if (from_vmentry) {
3441 failed_index = nested_vmx_load_msr(vcpu,
3442 vmcs12->vm_entry_msr_load_addr,
3443 vmcs12->vm_entry_msr_load_count);
3444 if (failed_index) {
3445 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
3446 vmcs12->exit_qualification = failed_index;
3447 goto vmentry_fail_vmexit_guest_mode;
3448 }
3449 } else {
3450 /*
3451 * The MMU is not initialized to point at the right entities yet and
3452 * "get pages" would need to read data from the guest (i.e. we will
3453 * need to perform gpa to hpa translation). Request a call
3454 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3455 * have already been set at vmentry time and should not be reset.
3456 */
3457 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3458 }
3459
3460 /*
3461 * If L1 had a pending IRQ/NMI until it executed
3462 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3463 * disallowed (e.g. interrupts disabled), L0 needs to
3464 * evaluate if this pending event should cause an exit from L2
3465 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3466 * intercept EXTERNAL_INTERRUPT).
3467 *
3468 * Usually this would be handled by the processor noticing an
3469 * IRQ/NMI window request, or checking RVI during evaluation of
3470 * pending virtual interrupts. However, this setting was done
3471 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3472 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3473 */
3474 if (unlikely(evaluate_pending_interrupts))
3475 kvm_make_request(KVM_REQ_EVENT, vcpu);
3476
3477 /*
3478 * Do not start the preemption timer hrtimer until after we know
3479 * we are successful, so that only nested_vmx_vmexit needs to cancel
3480 * the timer.
3481 */
3482 vmx->nested.preemption_timer_expired = false;
3483 if (nested_cpu_has_preemption_timer(vmcs12)) {
3484 u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3485 vmx_start_preemption_timer(vcpu, timer_value);
3486 }
3487
3488 /*
3489 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3490 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3491 * returned as far as L1 is concerned. It will only return (and set
3492 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3493 */
3494 return NVMX_VMENTRY_SUCCESS;
3495
3496 /*
3497 * A failed consistency check that leads to a VMExit during L1's
3498 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3499 * 26.7 "VM-entry failures during or after loading guest state".
3500 */
3501 vmentry_fail_vmexit_guest_mode:
3502 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3503 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3504 leave_guest_mode(vcpu);
3505
3506 vmentry_fail_vmexit:
3507 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3508
3509 if (!from_vmentry)
3510 return NVMX_VMENTRY_VMEXIT;
3511
3512 load_vmcs12_host_state(vcpu, vmcs12);
3513 vmcs12->vm_exit_reason = exit_reason.full;
3514 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
3515 vmx->nested.need_vmcs12_to_shadow_sync = true;
3516 return NVMX_VMENTRY_VMEXIT;
3517 }
3518
3519 /*
3520 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3521 * for running an L2 nested guest.
3522 */
nested_vmx_run(struct kvm_vcpu * vcpu,bool launch)3523 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3524 {
3525 struct vmcs12 *vmcs12;
3526 enum nvmx_vmentry_status status;
3527 struct vcpu_vmx *vmx = to_vmx(vcpu);
3528 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3529 enum nested_evmptrld_status evmptrld_status;
3530
3531 if (!nested_vmx_check_permission(vcpu))
3532 return 1;
3533
3534 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3535 if (evmptrld_status == EVMPTRLD_ERROR) {
3536 kvm_queue_exception(vcpu, UD_VECTOR);
3537 return 1;
3538 } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) {
3539 return nested_vmx_failInvalid(vcpu);
3540 }
3541
3542 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
3543 vmx->nested.current_vmptr == -1ull))
3544 return nested_vmx_failInvalid(vcpu);
3545
3546 vmcs12 = get_vmcs12(vcpu);
3547
3548 /*
3549 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3550 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3551 * rather than RFLAGS.ZF, and no error number is stored to the
3552 * VM-instruction error field.
3553 */
3554 if (CC(vmcs12->hdr.shadow_vmcs))
3555 return nested_vmx_failInvalid(vcpu);
3556
3557 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
3558 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
3559 /* Enlightened VMCS doesn't have launch state */
3560 vmcs12->launch_state = !launch;
3561 } else if (enable_shadow_vmcs) {
3562 copy_shadow_to_vmcs12(vmx);
3563 }
3564
3565 /*
3566 * The nested entry process starts with enforcing various prerequisites
3567 * on vmcs12 as required by the Intel SDM, and act appropriately when
3568 * they fail: As the SDM explains, some conditions should cause the
3569 * instruction to fail, while others will cause the instruction to seem
3570 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3571 * To speed up the normal (success) code path, we should avoid checking
3572 * for misconfigurations which will anyway be caught by the processor
3573 * when using the merged vmcs02.
3574 */
3575 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
3576 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3577
3578 if (CC(vmcs12->launch_state == launch))
3579 return nested_vmx_fail(vcpu,
3580 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3581 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3582
3583 if (nested_vmx_check_controls(vcpu, vmcs12))
3584 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3585
3586 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3587 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3588
3589 if (nested_vmx_check_host_state(vcpu, vmcs12))
3590 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3591
3592 /*
3593 * We're finally done with prerequisite checking, and can start with
3594 * the nested entry.
3595 */
3596 vmx->nested.nested_run_pending = 1;
3597 vmx->nested.has_preemption_timer_deadline = false;
3598 status = nested_vmx_enter_non_root_mode(vcpu, true);
3599 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3600 goto vmentry_failed;
3601
3602 /* Emulate processing of posted interrupts on VM-Enter. */
3603 if (nested_cpu_has_posted_intr(vmcs12) &&
3604 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3605 vmx->nested.pi_pending = true;
3606 kvm_make_request(KVM_REQ_EVENT, vcpu);
3607 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3608 }
3609
3610 /* Hide L1D cache contents from the nested guest. */
3611 vmx->vcpu.arch.l1tf_flush_l1d = true;
3612
3613 /*
3614 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3615 * also be used as part of restoring nVMX state for
3616 * snapshot restore (migration).
3617 *
3618 * In this flow, it is assumed that vmcs12 cache was
3619 * transferred as part of captured nVMX state and should
3620 * therefore not be read from guest memory (which may not
3621 * exist on destination host yet).
3622 */
3623 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3624
3625 switch (vmcs12->guest_activity_state) {
3626 case GUEST_ACTIVITY_HLT:
3627 /*
3628 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3629 * awakened by event injection or by an NMI-window VM-exit or
3630 * by an interrupt-window VM-exit, halt the vcpu.
3631 */
3632 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3633 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3634 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3635 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3636 vmx->nested.nested_run_pending = 0;
3637 return kvm_vcpu_halt(vcpu);
3638 }
3639 break;
3640 case GUEST_ACTIVITY_WAIT_SIPI:
3641 vmx->nested.nested_run_pending = 0;
3642 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3643 break;
3644 default:
3645 break;
3646 }
3647
3648 return 1;
3649
3650 vmentry_failed:
3651 vmx->nested.nested_run_pending = 0;
3652 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3653 return 0;
3654 if (status == NVMX_VMENTRY_VMEXIT)
3655 return 1;
3656 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3657 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3658 }
3659
3660 /*
3661 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3662 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3663 * This function returns the new value we should put in vmcs12.guest_cr0.
3664 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3665 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3666 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3667 * didn't trap the bit, because if L1 did, so would L0).
3668 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3669 * been modified by L2, and L1 knows it. So just leave the old value of
3670 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3671 * isn't relevant, because if L0 traps this bit it can set it to anything.
3672 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3673 * changed these bits, and therefore they need to be updated, but L0
3674 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3675 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3676 */
3677 static inline unsigned long
vmcs12_guest_cr0(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3678 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3679 {
3680 return
3681 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3682 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3683 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3684 vcpu->arch.cr0_guest_owned_bits));
3685 }
3686
3687 static inline unsigned long
vmcs12_guest_cr4(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3688 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3689 {
3690 return
3691 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3692 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3693 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3694 vcpu->arch.cr4_guest_owned_bits));
3695 }
3696
vmcs12_save_pending_event(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 vm_exit_reason,u32 exit_intr_info)3697 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3698 struct vmcs12 *vmcs12,
3699 u32 vm_exit_reason, u32 exit_intr_info)
3700 {
3701 u32 idt_vectoring;
3702 unsigned int nr;
3703
3704 /*
3705 * Per the SDM, VM-Exits due to double and triple faults are never
3706 * considered to occur during event delivery, even if the double/triple
3707 * fault is the result of an escalating vectoring issue.
3708 *
3709 * Note, the SDM qualifies the double fault behavior with "The original
3710 * event results in a double-fault exception". It's unclear why the
3711 * qualification exists since exits due to double fault can occur only
3712 * while vectoring a different exception (injected events are never
3713 * subject to interception), i.e. there's _always_ an original event.
3714 *
3715 * The SDM also uses NMI as a confusing example for the "original event
3716 * causes the VM exit directly" clause. NMI isn't special in any way,
3717 * the same rule applies to all events that cause an exit directly.
3718 * NMI is an odd choice for the example because NMIs can only occur on
3719 * instruction boundaries, i.e. they _can't_ occur during vectoring.
3720 */
3721 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
3722 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
3723 is_double_fault(exit_intr_info))) {
3724 vmcs12->idt_vectoring_info_field = 0;
3725 } else if (vcpu->arch.exception.injected) {
3726 nr = vcpu->arch.exception.nr;
3727 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3728
3729 if (kvm_exception_is_soft(nr)) {
3730 vmcs12->vm_exit_instruction_len =
3731 vcpu->arch.event_exit_inst_len;
3732 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3733 } else
3734 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3735
3736 if (vcpu->arch.exception.has_error_code) {
3737 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3738 vmcs12->idt_vectoring_error_code =
3739 vcpu->arch.exception.error_code;
3740 }
3741
3742 vmcs12->idt_vectoring_info_field = idt_vectoring;
3743 } else if (vcpu->arch.nmi_injected) {
3744 vmcs12->idt_vectoring_info_field =
3745 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3746 } else if (vcpu->arch.interrupt.injected) {
3747 nr = vcpu->arch.interrupt.nr;
3748 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3749
3750 if (vcpu->arch.interrupt.soft) {
3751 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3752 vmcs12->vm_entry_instruction_len =
3753 vcpu->arch.event_exit_inst_len;
3754 } else
3755 idt_vectoring |= INTR_TYPE_EXT_INTR;
3756
3757 vmcs12->idt_vectoring_info_field = idt_vectoring;
3758 } else {
3759 vmcs12->idt_vectoring_info_field = 0;
3760 }
3761 }
3762
3763
nested_mark_vmcs12_pages_dirty(struct kvm_vcpu * vcpu)3764 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3765 {
3766 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3767 gfn_t gfn;
3768
3769 /*
3770 * Don't need to mark the APIC access page dirty; it is never
3771 * written to by the CPU during APIC virtualization.
3772 */
3773
3774 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3775 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3776 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3777 }
3778
3779 if (nested_cpu_has_posted_intr(vmcs12)) {
3780 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3781 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3782 }
3783 }
3784
vmx_complete_nested_posted_interrupt(struct kvm_vcpu * vcpu)3785 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3786 {
3787 struct vcpu_vmx *vmx = to_vmx(vcpu);
3788 int max_irr;
3789 void *vapic_page;
3790 u16 status;
3791
3792 if (!vmx->nested.pi_pending)
3793 return 0;
3794
3795 if (!vmx->nested.pi_desc)
3796 goto mmio_needed;
3797
3798 vmx->nested.pi_pending = false;
3799
3800 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3801 return 0;
3802
3803 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3804 if (max_irr != 256) {
3805 vapic_page = vmx->nested.virtual_apic_map.hva;
3806 if (!vapic_page)
3807 goto mmio_needed;
3808
3809 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3810 vapic_page, &max_irr);
3811 status = vmcs_read16(GUEST_INTR_STATUS);
3812 if ((u8)max_irr > ((u8)status & 0xff)) {
3813 status &= ~0xff;
3814 status |= (u8)max_irr;
3815 vmcs_write16(GUEST_INTR_STATUS, status);
3816 }
3817 }
3818
3819 nested_mark_vmcs12_pages_dirty(vcpu);
3820 return 0;
3821
3822 mmio_needed:
3823 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
3824 return -ENXIO;
3825 }
3826
nested_vmx_inject_exception_vmexit(struct kvm_vcpu * vcpu,unsigned long exit_qual)3827 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3828 unsigned long exit_qual)
3829 {
3830 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3831 unsigned int nr = vcpu->arch.exception.nr;
3832 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3833
3834 if (vcpu->arch.exception.has_error_code) {
3835 /*
3836 * Intel CPUs do not generate error codes with bits 31:16 set,
3837 * and more importantly VMX disallows setting bits 31:16 in the
3838 * injected error code for VM-Entry. Drop the bits to mimic
3839 * hardware and avoid inducing failure on nested VM-Entry if L1
3840 * chooses to inject the exception back to L2. AMD CPUs _do_
3841 * generate "full" 32-bit error codes, so KVM allows userspace
3842 * to inject exception error codes with bits 31:16 set.
3843 */
3844 vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
3845 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3846 }
3847
3848 if (kvm_exception_is_soft(nr))
3849 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3850 else
3851 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3852
3853 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3854 vmx_get_nmi_mask(vcpu))
3855 intr_info |= INTR_INFO_UNBLOCK_NMI;
3856
3857 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3858 }
3859
3860 /*
3861 * Returns true if a debug trap is pending delivery.
3862 *
3863 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3864 * exception may be inferred from the presence of an exception payload.
3865 */
vmx_pending_dbg_trap(struct kvm_vcpu * vcpu)3866 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
3867 {
3868 return vcpu->arch.exception.pending &&
3869 vcpu->arch.exception.nr == DB_VECTOR &&
3870 vcpu->arch.exception.payload;
3871 }
3872
3873 /*
3874 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3875 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3876 * represents these debug traps with a payload that is said to be compatible
3877 * with the 'pending debug exceptions' field, write the payload to the VMCS
3878 * field if a VM-exit is delivered before the debug trap.
3879 */
nested_vmx_update_pending_dbg(struct kvm_vcpu * vcpu)3880 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3881 {
3882 if (vmx_pending_dbg_trap(vcpu))
3883 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
3884 vcpu->arch.exception.payload);
3885 }
3886
nested_vmx_preemption_timer_pending(struct kvm_vcpu * vcpu)3887 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
3888 {
3889 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3890 to_vmx(vcpu)->nested.preemption_timer_expired;
3891 }
3892
vmx_check_nested_events(struct kvm_vcpu * vcpu)3893 static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
3894 {
3895 struct vcpu_vmx *vmx = to_vmx(vcpu);
3896 unsigned long exit_qual;
3897 bool block_nested_events =
3898 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3899 bool mtf_pending = vmx->nested.mtf_pending;
3900 struct kvm_lapic *apic = vcpu->arch.apic;
3901
3902 /*
3903 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3904 * this state is discarded.
3905 */
3906 if (!block_nested_events)
3907 vmx->nested.mtf_pending = false;
3908
3909 if (lapic_in_kernel(vcpu) &&
3910 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3911 if (block_nested_events)
3912 return -EBUSY;
3913 nested_vmx_update_pending_dbg(vcpu);
3914 clear_bit(KVM_APIC_INIT, &apic->pending_events);
3915 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
3916 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3917 return 0;
3918 }
3919
3920 if (lapic_in_kernel(vcpu) &&
3921 test_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3922 if (block_nested_events)
3923 return -EBUSY;
3924
3925 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3926 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3927 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
3928 apic->sipi_vector & 0xFFUL);
3929 return 0;
3930 }
3931
3932 /*
3933 * Process any exceptions that are not debug traps before MTF.
3934 *
3935 * Note that only a pending nested run can block a pending exception.
3936 * Otherwise an injected NMI/interrupt should either be
3937 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
3938 * while delivering the pending exception.
3939 */
3940
3941 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
3942 if (vmx->nested.nested_run_pending)
3943 return -EBUSY;
3944 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3945 goto no_vmexit;
3946 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3947 return 0;
3948 }
3949
3950 if (mtf_pending) {
3951 if (block_nested_events)
3952 return -EBUSY;
3953 nested_vmx_update_pending_dbg(vcpu);
3954 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
3955 return 0;
3956 }
3957
3958 if (vcpu->arch.exception.pending) {
3959 if (vmx->nested.nested_run_pending)
3960 return -EBUSY;
3961 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3962 goto no_vmexit;
3963 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3964 return 0;
3965 }
3966
3967 if (nested_vmx_preemption_timer_pending(vcpu)) {
3968 if (block_nested_events)
3969 return -EBUSY;
3970 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3971 return 0;
3972 }
3973
3974 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
3975 if (block_nested_events)
3976 return -EBUSY;
3977 goto no_vmexit;
3978 }
3979
3980 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
3981 if (block_nested_events)
3982 return -EBUSY;
3983 if (!nested_exit_on_nmi(vcpu))
3984 goto no_vmexit;
3985
3986 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3987 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3988 INTR_INFO_VALID_MASK, 0);
3989 /*
3990 * The NMI-triggered VM exit counts as injection:
3991 * clear this one and block further NMIs.
3992 */
3993 vcpu->arch.nmi_pending = 0;
3994 vmx_set_nmi_mask(vcpu, true);
3995 return 0;
3996 }
3997
3998 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
3999 if (block_nested_events)
4000 return -EBUSY;
4001 if (!nested_exit_on_intr(vcpu))
4002 goto no_vmexit;
4003 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
4004 return 0;
4005 }
4006
4007 no_vmexit:
4008 return vmx_complete_nested_posted_interrupt(vcpu);
4009 }
4010
vmx_get_preemption_timer_value(struct kvm_vcpu * vcpu)4011 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
4012 {
4013 ktime_t remaining =
4014 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
4015 u64 value;
4016
4017 if (ktime_to_ns(remaining) <= 0)
4018 return 0;
4019
4020 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
4021 do_div(value, 1000000);
4022 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
4023 }
4024
is_vmcs12_ext_field(unsigned long field)4025 static bool is_vmcs12_ext_field(unsigned long field)
4026 {
4027 switch (field) {
4028 case GUEST_ES_SELECTOR:
4029 case GUEST_CS_SELECTOR:
4030 case GUEST_SS_SELECTOR:
4031 case GUEST_DS_SELECTOR:
4032 case GUEST_FS_SELECTOR:
4033 case GUEST_GS_SELECTOR:
4034 case GUEST_LDTR_SELECTOR:
4035 case GUEST_TR_SELECTOR:
4036 case GUEST_ES_LIMIT:
4037 case GUEST_CS_LIMIT:
4038 case GUEST_SS_LIMIT:
4039 case GUEST_DS_LIMIT:
4040 case GUEST_FS_LIMIT:
4041 case GUEST_GS_LIMIT:
4042 case GUEST_LDTR_LIMIT:
4043 case GUEST_TR_LIMIT:
4044 case GUEST_GDTR_LIMIT:
4045 case GUEST_IDTR_LIMIT:
4046 case GUEST_ES_AR_BYTES:
4047 case GUEST_DS_AR_BYTES:
4048 case GUEST_FS_AR_BYTES:
4049 case GUEST_GS_AR_BYTES:
4050 case GUEST_LDTR_AR_BYTES:
4051 case GUEST_TR_AR_BYTES:
4052 case GUEST_ES_BASE:
4053 case GUEST_CS_BASE:
4054 case GUEST_SS_BASE:
4055 case GUEST_DS_BASE:
4056 case GUEST_FS_BASE:
4057 case GUEST_GS_BASE:
4058 case GUEST_LDTR_BASE:
4059 case GUEST_TR_BASE:
4060 case GUEST_GDTR_BASE:
4061 case GUEST_IDTR_BASE:
4062 case GUEST_PENDING_DBG_EXCEPTIONS:
4063 case GUEST_BNDCFGS:
4064 return true;
4065 default:
4066 break;
4067 }
4068
4069 return false;
4070 }
4071
sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4072 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4073 struct vmcs12 *vmcs12)
4074 {
4075 struct vcpu_vmx *vmx = to_vmx(vcpu);
4076
4077 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4078 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4079 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4080 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4081 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4082 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4083 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4084 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4085 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4086 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4087 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4088 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4089 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4090 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4091 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4092 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4093 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4094 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4095 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4096 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4097 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4098 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4099 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4100 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4101 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4102 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4103 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4104 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4105 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4106 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4107 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4108 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4109 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4110 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4111 vmcs12->guest_pending_dbg_exceptions =
4112 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4113 if (kvm_mpx_supported())
4114 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
4115
4116 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4117 }
4118
copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4119 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4120 struct vmcs12 *vmcs12)
4121 {
4122 struct vcpu_vmx *vmx = to_vmx(vcpu);
4123 int cpu;
4124
4125 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4126 return;
4127
4128
4129 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4130
4131 cpu = get_cpu();
4132 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4133 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4134
4135 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4136
4137 vmx->loaded_vmcs = &vmx->vmcs01;
4138 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4139 put_cpu();
4140 }
4141
4142 /*
4143 * Update the guest state fields of vmcs12 to reflect changes that
4144 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4145 * VM-entry controls is also updated, since this is really a guest
4146 * state bit.)
4147 */
sync_vmcs02_to_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4148 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4149 {
4150 struct vcpu_vmx *vmx = to_vmx(vcpu);
4151
4152 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
4153 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4154
4155 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4156 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
4157
4158 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4159 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4160
4161 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4162 vmcs12->guest_rip = kvm_rip_read(vcpu);
4163 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4164
4165 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4166 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4167
4168 vmcs12->guest_interruptibility_info =
4169 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
4170
4171 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4172 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4173 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4174 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
4175 else
4176 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4177
4178 if (nested_cpu_has_preemption_timer(vmcs12) &&
4179 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4180 !vmx->nested.nested_run_pending)
4181 vmcs12->vmx_preemption_timer_value =
4182 vmx_get_preemption_timer_value(vcpu);
4183
4184 /*
4185 * In some cases (usually, nested EPT), L2 is allowed to change its
4186 * own CR3 without exiting. If it has changed it, we must keep it.
4187 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4188 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4189 *
4190 * Additionally, restore L2's PDPTR to vmcs12.
4191 */
4192 if (enable_ept) {
4193 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4194 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4195 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4196 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4197 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4198 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4199 }
4200 }
4201
4202 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4203
4204 if (nested_cpu_has_vid(vmcs12))
4205 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4206
4207 vmcs12->vm_entry_controls =
4208 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4209 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4210
4211 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4212 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4213
4214 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4215 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4216 }
4217
4218 /*
4219 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4220 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4221 * and this function updates it to reflect the changes to the guest state while
4222 * L2 was running (and perhaps made some exits which were handled directly by L0
4223 * without going back to L1), and to reflect the exit reason.
4224 * Note that we do not have to copy here all VMCS fields, just those that
4225 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4226 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4227 * which already writes to vmcs12 directly.
4228 */
prepare_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 vm_exit_reason,u32 exit_intr_info,unsigned long exit_qualification)4229 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4230 u32 vm_exit_reason, u32 exit_intr_info,
4231 unsigned long exit_qualification)
4232 {
4233 /* update exit information fields: */
4234 vmcs12->vm_exit_reason = vm_exit_reason;
4235 if (to_vmx(vcpu)->exit_reason.enclave_mode)
4236 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
4237 vmcs12->exit_qualification = exit_qualification;
4238
4239 /*
4240 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
4241 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
4242 * exit info fields are unmodified.
4243 */
4244 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4245 vmcs12->launch_state = 1;
4246
4247 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4248 * instead of reading the real value. */
4249 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4250
4251 /*
4252 * Transfer the event that L0 or L1 may wanted to inject into
4253 * L2 to IDT_VECTORING_INFO_FIELD.
4254 */
4255 vmcs12_save_pending_event(vcpu, vmcs12,
4256 vm_exit_reason, exit_intr_info);
4257
4258 vmcs12->vm_exit_intr_info = exit_intr_info;
4259 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4260 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4261
4262 /*
4263 * According to spec, there's no need to store the guest's
4264 * MSRs if the exit is due to a VM-entry failure that occurs
4265 * during or after loading the guest state. Since this exit
4266 * does not fall in that category, we need to save the MSRs.
4267 */
4268 if (nested_vmx_store_msr(vcpu,
4269 vmcs12->vm_exit_msr_store_addr,
4270 vmcs12->vm_exit_msr_store_count))
4271 nested_vmx_abort(vcpu,
4272 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
4273 }
4274 }
4275
4276 /*
4277 * A part of what we need to when the nested L2 guest exits and we want to
4278 * run its L1 parent, is to reset L1's guest state to the host state specified
4279 * in vmcs12.
4280 * This function is to be called not only on normal nested exit, but also on
4281 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4282 * Failures During or After Loading Guest State").
4283 * This function should be called when the active VMCS is L1's (vmcs01).
4284 */
load_vmcs12_host_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4285 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4286 struct vmcs12 *vmcs12)
4287 {
4288 enum vm_entry_failure_code ignored;
4289 struct kvm_segment seg;
4290
4291 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4292 vcpu->arch.efer = vmcs12->host_ia32_efer;
4293 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4294 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4295 else
4296 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4297 vmx_set_efer(vcpu, vcpu->arch.efer);
4298
4299 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4300 kvm_rip_write(vcpu, vmcs12->host_rip);
4301 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4302 vmx_set_interrupt_shadow(vcpu, 0);
4303
4304 /*
4305 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4306 * actually changed, because vmx_set_cr0 refers to efer set above.
4307 *
4308 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4309 * (KVM doesn't change it);
4310 */
4311 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4312 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4313
4314 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4315 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4316 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4317
4318 nested_ept_uninit_mmu_context(vcpu);
4319
4320 /*
4321 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4322 * couldn't have changed.
4323 */
4324 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
4325 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4326
4327 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4328
4329 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4330 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4331 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4332 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4333 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4334 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4335 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4336
4337 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4338 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4339 vmcs_write64(GUEST_BNDCFGS, 0);
4340
4341 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4342 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4343 vcpu->arch.pat = vmcs12->host_ia32_pat;
4344 }
4345 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
4346 intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
4347 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4348 vmcs12->host_ia32_perf_global_ctrl));
4349
4350 /* Set L1 segment info according to Intel SDM
4351 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4352 seg = (struct kvm_segment) {
4353 .base = 0,
4354 .limit = 0xFFFFFFFF,
4355 .selector = vmcs12->host_cs_selector,
4356 .type = 11,
4357 .present = 1,
4358 .s = 1,
4359 .g = 1
4360 };
4361 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4362 seg.l = 1;
4363 else
4364 seg.db = 1;
4365 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
4366 seg = (struct kvm_segment) {
4367 .base = 0,
4368 .limit = 0xFFFFFFFF,
4369 .type = 3,
4370 .present = 1,
4371 .s = 1,
4372 .db = 1,
4373 .g = 1
4374 };
4375 seg.selector = vmcs12->host_ds_selector;
4376 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
4377 seg.selector = vmcs12->host_es_selector;
4378 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
4379 seg.selector = vmcs12->host_ss_selector;
4380 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
4381 seg.selector = vmcs12->host_fs_selector;
4382 seg.base = vmcs12->host_fs_base;
4383 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
4384 seg.selector = vmcs12->host_gs_selector;
4385 seg.base = vmcs12->host_gs_base;
4386 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
4387 seg = (struct kvm_segment) {
4388 .base = vmcs12->host_tr_base,
4389 .limit = 0x67,
4390 .selector = vmcs12->host_tr_selector,
4391 .type = 11,
4392 .present = 1
4393 };
4394 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
4395
4396 memset(&seg, 0, sizeof(seg));
4397 seg.unusable = 1;
4398 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
4399
4400 kvm_set_dr(vcpu, 7, 0x400);
4401 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4402
4403 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4404 vmcs12->vm_exit_msr_load_count))
4405 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4406
4407 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
4408 }
4409
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx * vmx)4410 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4411 {
4412 struct vmx_uret_msr *efer_msr;
4413 unsigned int i;
4414
4415 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4416 return vmcs_read64(GUEST_IA32_EFER);
4417
4418 if (cpu_has_load_ia32_efer())
4419 return host_efer;
4420
4421 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4422 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4423 return vmx->msr_autoload.guest.val[i].value;
4424 }
4425
4426 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
4427 if (efer_msr)
4428 return efer_msr->data;
4429
4430 return host_efer;
4431 }
4432
nested_vmx_restore_host_state(struct kvm_vcpu * vcpu)4433 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4434 {
4435 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4436 struct vcpu_vmx *vmx = to_vmx(vcpu);
4437 struct vmx_msr_entry g, h;
4438 gpa_t gpa;
4439 u32 i, j;
4440
4441 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4442
4443 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4444 /*
4445 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4446 * as vmcs01.GUEST_DR7 contains a userspace defined value
4447 * and vcpu->arch.dr7 is not squirreled away before the
4448 * nested VMENTER (not worth adding a variable in nested_vmx).
4449 */
4450 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4451 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4452 else
4453 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4454 }
4455
4456 /*
4457 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4458 * handle a variety of side effects to KVM's software model.
4459 */
4460 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4461
4462 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4463 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4464
4465 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4466 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4467
4468 nested_ept_uninit_mmu_context(vcpu);
4469 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4470 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
4471
4472 /*
4473 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4474 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4475 * VMFail, like everything else we just need to ensure our
4476 * software model is up-to-date.
4477 */
4478 if (enable_ept && is_pae_paging(vcpu))
4479 ept_save_pdptrs(vcpu);
4480
4481 kvm_mmu_reset_context(vcpu);
4482
4483 /*
4484 * This nasty bit of open coding is a compromise between blindly
4485 * loading L1's MSRs using the exit load lists (incorrect emulation
4486 * of VMFail), leaving the nested VM's MSRs in the software model
4487 * (incorrect behavior) and snapshotting the modified MSRs (too
4488 * expensive since the lists are unbound by hardware). For each
4489 * MSR that was (prematurely) loaded from the nested VMEntry load
4490 * list, reload it from the exit load list if it exists and differs
4491 * from the guest value. The intent is to stuff host state as
4492 * silently as possible, not to fully process the exit load list.
4493 */
4494 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4495 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4496 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4497 pr_debug_ratelimited(
4498 "%s read MSR index failed (%u, 0x%08llx)\n",
4499 __func__, i, gpa);
4500 goto vmabort;
4501 }
4502
4503 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4504 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4505 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4506 pr_debug_ratelimited(
4507 "%s read MSR failed (%u, 0x%08llx)\n",
4508 __func__, j, gpa);
4509 goto vmabort;
4510 }
4511 if (h.index != g.index)
4512 continue;
4513 if (h.value == g.value)
4514 break;
4515
4516 if (nested_vmx_load_msr_check(vcpu, &h)) {
4517 pr_debug_ratelimited(
4518 "%s check failed (%u, 0x%x, 0x%x)\n",
4519 __func__, j, h.index, h.reserved);
4520 goto vmabort;
4521 }
4522
4523 if (kvm_set_msr(vcpu, h.index, h.value)) {
4524 pr_debug_ratelimited(
4525 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4526 __func__, j, h.index, h.value);
4527 goto vmabort;
4528 }
4529 }
4530 }
4531
4532 return;
4533
4534 vmabort:
4535 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4536 }
4537
4538 /*
4539 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4540 * and modify vmcs12 to make it see what it would expect to see there if
4541 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4542 */
nested_vmx_vmexit(struct kvm_vcpu * vcpu,u32 vm_exit_reason,u32 exit_intr_info,unsigned long exit_qualification)4543 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
4544 u32 exit_intr_info, unsigned long exit_qualification)
4545 {
4546 struct vcpu_vmx *vmx = to_vmx(vcpu);
4547 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4548
4549 /* trying to cancel vmlaunch/vmresume is a bug */
4550 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4551
4552 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4553 /*
4554 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4555 * Enlightened VMCS after migration and we still need to
4556 * do that when something is forcing L2->L1 exit prior to
4557 * the first L2 run.
4558 */
4559 (void)nested_get_evmcs_page(vcpu);
4560 }
4561
4562 /* Service pending TLB flush requests for L2 before switching to L1. */
4563 kvm_service_local_tlb_flush_requests(vcpu);
4564
4565 /*
4566 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4567 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4568 * up-to-date before switching to L1.
4569 */
4570 if (enable_ept && is_pae_paging(vcpu))
4571 vmx_ept_load_pdptrs(vcpu);
4572
4573 leave_guest_mode(vcpu);
4574
4575 if (nested_cpu_has_preemption_timer(vmcs12))
4576 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4577
4578 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4579 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4580 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4581 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4582 }
4583
4584 if (likely(!vmx->fail)) {
4585 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4586
4587 if (vm_exit_reason != -1)
4588 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4589 exit_intr_info, exit_qualification);
4590
4591 /*
4592 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4593 * also be used to capture vmcs12 cache as part of
4594 * capturing nVMX state for snapshot (migration).
4595 *
4596 * Otherwise, this flush will dirty guest memory at a
4597 * point it is already assumed by user-space to be
4598 * immutable.
4599 */
4600 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4601 } else {
4602 /*
4603 * The only expected VM-instruction error is "VM entry with
4604 * invalid control field(s)." Anything else indicates a
4605 * problem with L0. And we should never get here with a
4606 * VMFail of any type if early consistency checks are enabled.
4607 */
4608 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4609 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4610 WARN_ON_ONCE(nested_early_check);
4611 }
4612
4613 /*
4614 * Drop events/exceptions that were queued for re-injection to L2
4615 * (picked up via vmx_complete_interrupts()), as well as exceptions
4616 * that were pending for L2. Note, this must NOT be hoisted above
4617 * prepare_vmcs12(), events/exceptions queued for re-injection need to
4618 * be captured in vmcs12 (see vmcs12_save_pending_event()).
4619 */
4620 vcpu->arch.nmi_injected = false;
4621 kvm_clear_exception_queue(vcpu);
4622 kvm_clear_interrupt_queue(vcpu);
4623
4624 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4625
4626 /*
4627 * If IBRS is advertised to the vCPU, KVM must flush the indirect
4628 * branch predictors when transitioning from L2 to L1, as L1 expects
4629 * hardware (KVM in this case) to provide separate predictor modes.
4630 * Bare metal isolates VMX root (host) from VMX non-root (guest), but
4631 * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
4632 * separate modes for L2 vs L1.
4633 */
4634 if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4635 indirect_branch_prediction_barrier();
4636
4637 /* Update any VMCS fields that might have changed while L2 ran */
4638 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4639 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4640 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4641 if (kvm_has_tsc_control)
4642 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4643
4644 if (vmx->nested.l1_tpr_threshold != -1)
4645 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4646
4647 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4648 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4649 vmx_set_virtual_apic_mode(vcpu);
4650 }
4651
4652 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4653 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4654 vmx_update_cpu_dirty_logging(vcpu);
4655 }
4656
4657 /* Unpin physical memory we referred to in vmcs02 */
4658 if (vmx->nested.apic_access_page) {
4659 kvm_release_page_clean(vmx->nested.apic_access_page);
4660 vmx->nested.apic_access_page = NULL;
4661 }
4662 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4663 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4664 vmx->nested.pi_desc = NULL;
4665
4666 if (vmx->nested.reload_vmcs01_apic_access_page) {
4667 vmx->nested.reload_vmcs01_apic_access_page = false;
4668 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4669 }
4670
4671 if (vmx->nested.update_vmcs01_apicv_status) {
4672 vmx->nested.update_vmcs01_apicv_status = false;
4673 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4674 }
4675
4676 if ((vm_exit_reason != -1) &&
4677 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
4678 vmx->nested.need_vmcs12_to_shadow_sync = true;
4679
4680 /* in case we halted in L2 */
4681 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4682
4683 if (likely(!vmx->fail)) {
4684 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4685 nested_exit_intr_ack_set(vcpu)) {
4686 int irq = kvm_cpu_get_interrupt(vcpu);
4687 WARN_ON(irq < 0);
4688 vmcs12->vm_exit_intr_info = irq |
4689 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4690 }
4691
4692 if (vm_exit_reason != -1)
4693 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4694 vmcs12->exit_qualification,
4695 vmcs12->idt_vectoring_info_field,
4696 vmcs12->vm_exit_intr_info,
4697 vmcs12->vm_exit_intr_error_code,
4698 KVM_ISA_VMX);
4699
4700 load_vmcs12_host_state(vcpu, vmcs12);
4701
4702 return;
4703 }
4704
4705 /*
4706 * After an early L2 VM-entry failure, we're now back
4707 * in L1 which thinks it just finished a VMLAUNCH or
4708 * VMRESUME instruction, so we need to set the failure
4709 * flag and the VM-instruction error field of the VMCS
4710 * accordingly, and skip the emulated instruction.
4711 */
4712 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4713
4714 /*
4715 * Restore L1's host state to KVM's software model. We're here
4716 * because a consistency check was caught by hardware, which
4717 * means some amount of guest state has been propagated to KVM's
4718 * model and needs to be unwound to the host's state.
4719 */
4720 nested_vmx_restore_host_state(vcpu);
4721
4722 vmx->fail = 0;
4723 }
4724
nested_vmx_triple_fault(struct kvm_vcpu * vcpu)4725 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
4726 {
4727 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
4728 }
4729
4730 /*
4731 * Decode the memory-address operand of a vmx instruction, as recorded on an
4732 * exit caused by such an instruction (run by a guest hypervisor).
4733 * On success, returns 0. When the operand is invalid, returns 1 and throws
4734 * #UD, #GP, or #SS.
4735 */
get_vmx_mem_address(struct kvm_vcpu * vcpu,unsigned long exit_qualification,u32 vmx_instruction_info,bool wr,int len,gva_t * ret)4736 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4737 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4738 {
4739 gva_t off;
4740 bool exn;
4741 struct kvm_segment s;
4742
4743 /*
4744 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4745 * Execution", on an exit, vmx_instruction_info holds most of the
4746 * addressing components of the operand. Only the displacement part
4747 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4748 * For how an actual address is calculated from all these components,
4749 * refer to Vol. 1, "Operand Addressing".
4750 */
4751 int scaling = vmx_instruction_info & 3;
4752 int addr_size = (vmx_instruction_info >> 7) & 7;
4753 bool is_reg = vmx_instruction_info & (1u << 10);
4754 int seg_reg = (vmx_instruction_info >> 15) & 7;
4755 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4756 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4757 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4758 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4759
4760 if (is_reg) {
4761 kvm_queue_exception(vcpu, UD_VECTOR);
4762 return 1;
4763 }
4764
4765 /* Addr = segment_base + offset */
4766 /* offset = base + [index * scale] + displacement */
4767 off = exit_qualification; /* holds the displacement */
4768 if (addr_size == 1)
4769 off = (gva_t)sign_extend64(off, 31);
4770 else if (addr_size == 0)
4771 off = (gva_t)sign_extend64(off, 15);
4772 if (base_is_valid)
4773 off += kvm_register_read(vcpu, base_reg);
4774 if (index_is_valid)
4775 off += kvm_register_read(vcpu, index_reg) << scaling;
4776 vmx_get_segment(vcpu, &s, seg_reg);
4777
4778 /*
4779 * The effective address, i.e. @off, of a memory operand is truncated
4780 * based on the address size of the instruction. Note that this is
4781 * the *effective address*, i.e. the address prior to accounting for
4782 * the segment's base.
4783 */
4784 if (addr_size == 1) /* 32 bit */
4785 off &= 0xffffffff;
4786 else if (addr_size == 0) /* 16 bit */
4787 off &= 0xffff;
4788
4789 /* Checks for #GP/#SS exceptions. */
4790 exn = false;
4791 if (is_long_mode(vcpu)) {
4792 /*
4793 * The virtual/linear address is never truncated in 64-bit
4794 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4795 * address when using FS/GS with a non-zero base.
4796 */
4797 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4798 *ret = s.base + off;
4799 else
4800 *ret = off;
4801
4802 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4803 * non-canonical form. This is the only check on the memory
4804 * destination for long mode!
4805 */
4806 exn = is_noncanonical_address(*ret, vcpu);
4807 } else {
4808 /*
4809 * When not in long mode, the virtual/linear address is
4810 * unconditionally truncated to 32 bits regardless of the
4811 * address size.
4812 */
4813 *ret = (s.base + off) & 0xffffffff;
4814
4815 /* Protected mode: apply checks for segment validity in the
4816 * following order:
4817 * - segment type check (#GP(0) may be thrown)
4818 * - usability check (#GP(0)/#SS(0))
4819 * - limit check (#GP(0)/#SS(0))
4820 */
4821 if (wr)
4822 /* #GP(0) if the destination operand is located in a
4823 * read-only data segment or any code segment.
4824 */
4825 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4826 else
4827 /* #GP(0) if the source operand is located in an
4828 * execute-only code segment
4829 */
4830 exn = ((s.type & 0xa) == 8);
4831 if (exn) {
4832 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4833 return 1;
4834 }
4835 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4836 */
4837 exn = (s.unusable != 0);
4838
4839 /*
4840 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4841 * outside the segment limit. All CPUs that support VMX ignore
4842 * limit checks for flat segments, i.e. segments with base==0,
4843 * limit==0xffffffff and of type expand-up data or code.
4844 */
4845 if (!(s.base == 0 && s.limit == 0xffffffff &&
4846 ((s.type & 8) || !(s.type & 4))))
4847 exn = exn || ((u64)off + len - 1 > s.limit);
4848 }
4849 if (exn) {
4850 kvm_queue_exception_e(vcpu,
4851 seg_reg == VCPU_SREG_SS ?
4852 SS_VECTOR : GP_VECTOR,
4853 0);
4854 return 1;
4855 }
4856
4857 return 0;
4858 }
4859
nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu * vcpu)4860 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
4861 {
4862 struct vcpu_vmx *vmx;
4863
4864 if (!nested_vmx_allowed(vcpu))
4865 return;
4866
4867 vmx = to_vmx(vcpu);
4868 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
4869 vmx->nested.msrs.entry_ctls_high |=
4870 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4871 vmx->nested.msrs.exit_ctls_high |=
4872 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4873 } else {
4874 vmx->nested.msrs.entry_ctls_high &=
4875 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4876 vmx->nested.msrs.exit_ctls_high &=
4877 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4878 }
4879 }
4880
nested_vmx_get_vmptr(struct kvm_vcpu * vcpu,gpa_t * vmpointer,int * ret)4881 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
4882 int *ret)
4883 {
4884 gva_t gva;
4885 struct x86_exception e;
4886 int r;
4887
4888 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
4889 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4890 sizeof(*vmpointer), &gva)) {
4891 *ret = 1;
4892 return -EINVAL;
4893 }
4894
4895 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
4896 if (r != X86EMUL_CONTINUE) {
4897 *ret = kvm_handle_memory_failure(vcpu, r, &e);
4898 return -EINVAL;
4899 }
4900
4901 return 0;
4902 }
4903
4904 /*
4905 * Allocate a shadow VMCS and associate it with the currently loaded
4906 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4907 * VMCS is also VMCLEARed, so that it is ready for use.
4908 */
alloc_shadow_vmcs(struct kvm_vcpu * vcpu)4909 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4910 {
4911 struct vcpu_vmx *vmx = to_vmx(vcpu);
4912 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4913
4914 /*
4915 * We should allocate a shadow vmcs for vmcs01 only when L1
4916 * executes VMXON and free it when L1 executes VMXOFF.
4917 * As it is invalid to execute VMXON twice, we shouldn't reach
4918 * here when vmcs01 already have an allocated shadow vmcs.
4919 */
4920 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4921
4922 if (!loaded_vmcs->shadow_vmcs) {
4923 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4924 if (loaded_vmcs->shadow_vmcs)
4925 vmcs_clear(loaded_vmcs->shadow_vmcs);
4926 }
4927 return loaded_vmcs->shadow_vmcs;
4928 }
4929
enter_vmx_operation(struct kvm_vcpu * vcpu)4930 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4931 {
4932 struct vcpu_vmx *vmx = to_vmx(vcpu);
4933 int r;
4934
4935 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4936 if (r < 0)
4937 goto out_vmcs02;
4938
4939 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4940 if (!vmx->nested.cached_vmcs12)
4941 goto out_cached_vmcs12;
4942
4943 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4944 if (!vmx->nested.cached_shadow_vmcs12)
4945 goto out_cached_shadow_vmcs12;
4946
4947 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4948 goto out_shadow_vmcs;
4949
4950 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4951 HRTIMER_MODE_ABS_PINNED);
4952 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4953
4954 vmx->nested.vpid02 = allocate_vpid();
4955
4956 vmx->nested.vmcs02_initialized = false;
4957 vmx->nested.vmxon = true;
4958
4959 if (vmx_pt_mode_is_host_guest()) {
4960 vmx->pt_desc.guest.ctl = 0;
4961 pt_update_intercept_for_msr(vcpu);
4962 }
4963
4964 return 0;
4965
4966 out_shadow_vmcs:
4967 kfree(vmx->nested.cached_shadow_vmcs12);
4968
4969 out_cached_shadow_vmcs12:
4970 kfree(vmx->nested.cached_vmcs12);
4971
4972 out_cached_vmcs12:
4973 free_loaded_vmcs(&vmx->nested.vmcs02);
4974
4975 out_vmcs02:
4976 return -ENOMEM;
4977 }
4978
4979 /* Emulate the VMXON instruction. */
handle_vmon(struct kvm_vcpu * vcpu)4980 static int handle_vmon(struct kvm_vcpu *vcpu)
4981 {
4982 int ret;
4983 gpa_t vmptr;
4984 uint32_t revision;
4985 struct vcpu_vmx *vmx = to_vmx(vcpu);
4986 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
4987 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
4988
4989 /*
4990 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter
4991 * the guest and so cannot rely on hardware to perform the check,
4992 * which has higher priority than VM-Exit (see Intel SDM's pseudocode
4993 * for VMXON).
4994 *
4995 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86
4996 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't
4997 * force any of the relevant guest state. For a restricted guest, KVM
4998 * does force CR0.PE=1, but only to also force VM86 in order to emulate
4999 * Real Mode, and so there's no need to check CR0.PE manually.
5000 */
5001 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
5002 kvm_queue_exception(vcpu, UD_VECTOR);
5003 return 1;
5004 }
5005
5006 /*
5007 * The CPL is checked for "not in VMX operation" and for "in VMX root",
5008 * and has higher priority than the VM-Fail due to being post-VMXON,
5009 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root,
5010 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits
5011 * from L2 to L1, i.e. there's no need to check for the vCPU being in
5012 * VMX non-root.
5013 *
5014 * Forwarding the VM-Exit unconditionally, i.e. without performing the
5015 * #UD checks (see above), is functionally ok because KVM doesn't allow
5016 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's
5017 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are
5018 * missed by hardware due to shadowing CR0 and/or CR4.
5019 */
5020 if (vmx_get_cpl(vcpu)) {
5021 kvm_inject_gp(vcpu, 0);
5022 return 1;
5023 }
5024
5025 if (vmx->nested.vmxon)
5026 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
5027
5028 /*
5029 * Invalid CR0/CR4 generates #GP. These checks are performed if and
5030 * only if the vCPU isn't already in VMX operation, i.e. effectively
5031 * have lower priority than the VM-Fail above.
5032 */
5033 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
5034 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
5035 kvm_inject_gp(vcpu, 0);
5036 return 1;
5037 }
5038
5039 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5040 != VMXON_NEEDED_FEATURES) {
5041 kvm_inject_gp(vcpu, 0);
5042 return 1;
5043 }
5044
5045 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
5046 return ret;
5047
5048 /*
5049 * SDM 3: 24.11.5
5050 * The first 4 bytes of VMXON region contain the supported
5051 * VMCS revision identifier
5052 *
5053 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
5054 * which replaces physical address width with 32
5055 */
5056 if (!page_address_valid(vcpu, vmptr))
5057 return nested_vmx_failInvalid(vcpu);
5058
5059 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
5060 revision != VMCS12_REVISION)
5061 return nested_vmx_failInvalid(vcpu);
5062
5063 vmx->nested.vmxon_ptr = vmptr;
5064 ret = enter_vmx_operation(vcpu);
5065 if (ret)
5066 return ret;
5067
5068 return nested_vmx_succeed(vcpu);
5069 }
5070
nested_release_vmcs12(struct kvm_vcpu * vcpu)5071 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
5072 {
5073 struct vcpu_vmx *vmx = to_vmx(vcpu);
5074
5075 if (vmx->nested.current_vmptr == -1ull)
5076 return;
5077
5078 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
5079
5080 if (enable_shadow_vmcs) {
5081 /* copy to memory all shadowed fields in case
5082 they were modified */
5083 copy_shadow_to_vmcs12(vmx);
5084 vmx_disable_shadow_vmcs(vmx);
5085 }
5086 vmx->nested.posted_intr_nv = -1;
5087
5088 /* Flush VMCS12 to guest memory */
5089 kvm_vcpu_write_guest_page(vcpu,
5090 vmx->nested.current_vmptr >> PAGE_SHIFT,
5091 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5092
5093 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5094
5095 vmx->nested.current_vmptr = -1ull;
5096 }
5097
5098 /* Emulate the VMXOFF instruction */
handle_vmoff(struct kvm_vcpu * vcpu)5099 static int handle_vmoff(struct kvm_vcpu *vcpu)
5100 {
5101 if (!nested_vmx_check_permission(vcpu))
5102 return 1;
5103
5104 free_nested(vcpu);
5105
5106 /* Process a latched INIT during time CPU was in VMX operation */
5107 kvm_make_request(KVM_REQ_EVENT, vcpu);
5108
5109 return nested_vmx_succeed(vcpu);
5110 }
5111
5112 /* Emulate the VMCLEAR instruction */
handle_vmclear(struct kvm_vcpu * vcpu)5113 static int handle_vmclear(struct kvm_vcpu *vcpu)
5114 {
5115 struct vcpu_vmx *vmx = to_vmx(vcpu);
5116 u32 zero = 0;
5117 gpa_t vmptr;
5118 u64 evmcs_gpa;
5119 int r;
5120
5121 if (!nested_vmx_check_permission(vcpu))
5122 return 1;
5123
5124 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5125 return r;
5126
5127 if (!page_address_valid(vcpu, vmptr))
5128 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5129
5130 if (vmptr == vmx->nested.vmxon_ptr)
5131 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
5132
5133 /*
5134 * When Enlightened VMEntry is enabled on the calling CPU we treat
5135 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5136 * way to distinguish it from VMCS12) and we must not corrupt it by
5137 * writing to the non-existent 'launch_state' field. The area doesn't
5138 * have to be the currently active EVMCS on the calling CPU and there's
5139 * nothing KVM has to do to transition it from 'active' to 'non-active'
5140 * state. It is possible that the area will stay mapped as
5141 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5142 */
5143 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
5144 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
5145 if (vmptr == vmx->nested.current_vmptr)
5146 nested_release_vmcs12(vcpu);
5147
5148 kvm_vcpu_write_guest(vcpu,
5149 vmptr + offsetof(struct vmcs12,
5150 launch_state),
5151 &zero, sizeof(zero));
5152 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
5153 nested_release_evmcs(vcpu);
5154 }
5155
5156 return nested_vmx_succeed(vcpu);
5157 }
5158
5159 /* Emulate the VMLAUNCH instruction */
handle_vmlaunch(struct kvm_vcpu * vcpu)5160 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5161 {
5162 return nested_vmx_run(vcpu, true);
5163 }
5164
5165 /* Emulate the VMRESUME instruction */
handle_vmresume(struct kvm_vcpu * vcpu)5166 static int handle_vmresume(struct kvm_vcpu *vcpu)
5167 {
5168
5169 return nested_vmx_run(vcpu, false);
5170 }
5171
handle_vmread(struct kvm_vcpu * vcpu)5172 static int handle_vmread(struct kvm_vcpu *vcpu)
5173 {
5174 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5175 : get_vmcs12(vcpu);
5176 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5177 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5178 struct vcpu_vmx *vmx = to_vmx(vcpu);
5179 struct x86_exception e;
5180 unsigned long field;
5181 u64 value;
5182 gva_t gva = 0;
5183 short offset;
5184 int len, r;
5185
5186 if (!nested_vmx_check_permission(vcpu))
5187 return 1;
5188
5189 /*
5190 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5191 * any VMREAD sets the ALU flags for VMfailInvalid.
5192 */
5193 if (vmx->nested.current_vmptr == -1ull ||
5194 (is_guest_mode(vcpu) &&
5195 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
5196 return nested_vmx_failInvalid(vcpu);
5197
5198 /* Decode instruction info and find the field to read */
5199 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5200
5201 offset = vmcs_field_to_offset(field);
5202 if (offset < 0)
5203 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5204
5205 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5206 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5207
5208 /* Read the field, zero-extended to a u64 value */
5209 value = vmcs12_read_any(vmcs12, field, offset);
5210
5211 /*
5212 * Now copy part of this value to register or memory, as requested.
5213 * Note that the number of bits actually copied is 32 or 64 depending
5214 * on the guest's mode (32 or 64 bit), not on the given field's length.
5215 */
5216 if (instr_info & BIT(10)) {
5217 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
5218 } else {
5219 len = is_64_bit_mode(vcpu) ? 8 : 4;
5220 if (get_vmx_mem_address(vcpu, exit_qualification,
5221 instr_info, true, len, &gva))
5222 return 1;
5223 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5224 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5225 if (r != X86EMUL_CONTINUE)
5226 return kvm_handle_memory_failure(vcpu, r, &e);
5227 }
5228
5229 return nested_vmx_succeed(vcpu);
5230 }
5231
is_shadow_field_rw(unsigned long field)5232 static bool is_shadow_field_rw(unsigned long field)
5233 {
5234 switch (field) {
5235 #define SHADOW_FIELD_RW(x, y) case x:
5236 #include "vmcs_shadow_fields.h"
5237 return true;
5238 default:
5239 break;
5240 }
5241 return false;
5242 }
5243
is_shadow_field_ro(unsigned long field)5244 static bool is_shadow_field_ro(unsigned long field)
5245 {
5246 switch (field) {
5247 #define SHADOW_FIELD_RO(x, y) case x:
5248 #include "vmcs_shadow_fields.h"
5249 return true;
5250 default:
5251 break;
5252 }
5253 return false;
5254 }
5255
handle_vmwrite(struct kvm_vcpu * vcpu)5256 static int handle_vmwrite(struct kvm_vcpu *vcpu)
5257 {
5258 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5259 : get_vmcs12(vcpu);
5260 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5261 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5262 struct vcpu_vmx *vmx = to_vmx(vcpu);
5263 struct x86_exception e;
5264 unsigned long field;
5265 short offset;
5266 gva_t gva;
5267 int len, r;
5268
5269 /*
5270 * The value to write might be 32 or 64 bits, depending on L1's long
5271 * mode, and eventually we need to write that into a field of several
5272 * possible lengths. The code below first zero-extends the value to 64
5273 * bit (value), and then copies only the appropriate number of
5274 * bits into the vmcs12 field.
5275 */
5276 u64 value = 0;
5277
5278 if (!nested_vmx_check_permission(vcpu))
5279 return 1;
5280
5281 /*
5282 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5283 * any VMWRITE sets the ALU flags for VMfailInvalid.
5284 */
5285 if (vmx->nested.current_vmptr == -1ull ||
5286 (is_guest_mode(vcpu) &&
5287 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
5288 return nested_vmx_failInvalid(vcpu);
5289
5290 if (instr_info & BIT(10))
5291 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
5292 else {
5293 len = is_64_bit_mode(vcpu) ? 8 : 4;
5294 if (get_vmx_mem_address(vcpu, exit_qualification,
5295 instr_info, false, len, &gva))
5296 return 1;
5297 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5298 if (r != X86EMUL_CONTINUE)
5299 return kvm_handle_memory_failure(vcpu, r, &e);
5300 }
5301
5302 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5303
5304 offset = vmcs_field_to_offset(field);
5305 if (offset < 0)
5306 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5307
5308 /*
5309 * If the vCPU supports "VMWRITE to any supported field in the
5310 * VMCS," then the "read-only" fields are actually read/write.
5311 */
5312 if (vmcs_field_readonly(field) &&
5313 !nested_cpu_has_vmwrite_any_field(vcpu))
5314 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5315
5316 /*
5317 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5318 * vmcs12, else we may crush a field or consume a stale value.
5319 */
5320 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5321 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5322
5323 /*
5324 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5325 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5326 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5327 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5328 * from L1 will return a different value than VMREAD from L2 (L1 sees
5329 * the stripped down value, L2 sees the full value as stored by KVM).
5330 */
5331 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
5332 value &= 0x1f0ff;
5333
5334 vmcs12_write_any(vmcs12, field, offset, value);
5335
5336 /*
5337 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5338 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5339 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5340 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5341 */
5342 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5343 /*
5344 * L1 can read these fields without exiting, ensure the
5345 * shadow VMCS is up-to-date.
5346 */
5347 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5348 preempt_disable();
5349 vmcs_load(vmx->vmcs01.shadow_vmcs);
5350
5351 __vmcs_writel(field, value);
5352
5353 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5354 vmcs_load(vmx->loaded_vmcs->vmcs);
5355 preempt_enable();
5356 }
5357 vmx->nested.dirty_vmcs12 = true;
5358 }
5359
5360 return nested_vmx_succeed(vcpu);
5361 }
5362
set_current_vmptr(struct vcpu_vmx * vmx,gpa_t vmptr)5363 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5364 {
5365 vmx->nested.current_vmptr = vmptr;
5366 if (enable_shadow_vmcs) {
5367 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5368 vmcs_write64(VMCS_LINK_POINTER,
5369 __pa(vmx->vmcs01.shadow_vmcs));
5370 vmx->nested.need_vmcs12_to_shadow_sync = true;
5371 }
5372 vmx->nested.dirty_vmcs12 = true;
5373 }
5374
5375 /* Emulate the VMPTRLD instruction */
handle_vmptrld(struct kvm_vcpu * vcpu)5376 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5377 {
5378 struct vcpu_vmx *vmx = to_vmx(vcpu);
5379 gpa_t vmptr;
5380 int r;
5381
5382 if (!nested_vmx_check_permission(vcpu))
5383 return 1;
5384
5385 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5386 return r;
5387
5388 if (!page_address_valid(vcpu, vmptr))
5389 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5390
5391 if (vmptr == vmx->nested.vmxon_ptr)
5392 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
5393
5394 /* Forbid normal VMPTRLD if Enlightened version was used */
5395 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
5396 return 1;
5397
5398 if (vmx->nested.current_vmptr != vmptr) {
5399 struct kvm_host_map map;
5400 struct vmcs12 *new_vmcs12;
5401
5402 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
5403 /*
5404 * Reads from an unbacked page return all 1s,
5405 * which means that the 32 bits located at the
5406 * given physical address won't match the required
5407 * VMCS12_REVISION identifier.
5408 */
5409 return nested_vmx_fail(vcpu,
5410 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5411 }
5412
5413 new_vmcs12 = map.hva;
5414
5415 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5416 (new_vmcs12->hdr.shadow_vmcs &&
5417 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
5418 kvm_vcpu_unmap(vcpu, &map, false);
5419 return nested_vmx_fail(vcpu,
5420 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5421 }
5422
5423 nested_release_vmcs12(vcpu);
5424
5425 /*
5426 * Load VMCS12 from guest memory since it is not already
5427 * cached.
5428 */
5429 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
5430 kvm_vcpu_unmap(vcpu, &map, false);
5431
5432 set_current_vmptr(vmx, vmptr);
5433 }
5434
5435 return nested_vmx_succeed(vcpu);
5436 }
5437
5438 /* Emulate the VMPTRST instruction */
handle_vmptrst(struct kvm_vcpu * vcpu)5439 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5440 {
5441 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
5442 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5443 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5444 struct x86_exception e;
5445 gva_t gva;
5446 int r;
5447
5448 if (!nested_vmx_check_permission(vcpu))
5449 return 1;
5450
5451 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
5452 return 1;
5453
5454 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5455 true, sizeof(gpa_t), &gva))
5456 return 1;
5457 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5458 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
5459 sizeof(gpa_t), &e);
5460 if (r != X86EMUL_CONTINUE)
5461 return kvm_handle_memory_failure(vcpu, r, &e);
5462
5463 return nested_vmx_succeed(vcpu);
5464 }
5465
5466 /* Emulate the INVEPT instruction */
handle_invept(struct kvm_vcpu * vcpu)5467 static int handle_invept(struct kvm_vcpu *vcpu)
5468 {
5469 struct vcpu_vmx *vmx = to_vmx(vcpu);
5470 u32 vmx_instruction_info, types;
5471 unsigned long type, roots_to_free;
5472 struct kvm_mmu *mmu;
5473 gva_t gva;
5474 struct x86_exception e;
5475 struct {
5476 u64 eptp, gpa;
5477 } operand;
5478 int i, r;
5479
5480 if (!(vmx->nested.msrs.secondary_ctls_high &
5481 SECONDARY_EXEC_ENABLE_EPT) ||
5482 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5483 kvm_queue_exception(vcpu, UD_VECTOR);
5484 return 1;
5485 }
5486
5487 if (!nested_vmx_check_permission(vcpu))
5488 return 1;
5489
5490 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5491 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
5492
5493 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5494
5495 if (type >= 32 || !(types & (1 << type)))
5496 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5497
5498 /* According to the Intel VMX instruction reference, the memory
5499 * operand is read even if it isn't needed (e.g., for type==global)
5500 */
5501 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5502 vmx_instruction_info, false, sizeof(operand), &gva))
5503 return 1;
5504 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5505 if (r != X86EMUL_CONTINUE)
5506 return kvm_handle_memory_failure(vcpu, r, &e);
5507
5508 /*
5509 * Nested EPT roots are always held through guest_mmu,
5510 * not root_mmu.
5511 */
5512 mmu = &vcpu->arch.guest_mmu;
5513
5514 switch (type) {
5515 case VMX_EPT_EXTENT_CONTEXT:
5516 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
5517 return nested_vmx_fail(vcpu,
5518 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5519
5520 roots_to_free = 0;
5521 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd,
5522 operand.eptp))
5523 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5524
5525 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5526 if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
5527 mmu->prev_roots[i].pgd,
5528 operand.eptp))
5529 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5530 }
5531 break;
5532 case VMX_EPT_EXTENT_GLOBAL:
5533 roots_to_free = KVM_MMU_ROOTS_ALL;
5534 break;
5535 default:
5536 BUG();
5537 break;
5538 }
5539
5540 if (roots_to_free)
5541 kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
5542
5543 return nested_vmx_succeed(vcpu);
5544 }
5545
handle_invvpid(struct kvm_vcpu * vcpu)5546 static int handle_invvpid(struct kvm_vcpu *vcpu)
5547 {
5548 struct vcpu_vmx *vmx = to_vmx(vcpu);
5549 u32 vmx_instruction_info;
5550 unsigned long type, types;
5551 gva_t gva;
5552 struct x86_exception e;
5553 struct {
5554 u64 vpid;
5555 u64 gla;
5556 } operand;
5557 u16 vpid02;
5558 int r;
5559
5560 if (!(vmx->nested.msrs.secondary_ctls_high &
5561 SECONDARY_EXEC_ENABLE_VPID) ||
5562 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5563 kvm_queue_exception(vcpu, UD_VECTOR);
5564 return 1;
5565 }
5566
5567 if (!nested_vmx_check_permission(vcpu))
5568 return 1;
5569
5570 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5571 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
5572
5573 types = (vmx->nested.msrs.vpid_caps &
5574 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5575
5576 if (type >= 32 || !(types & (1 << type)))
5577 return nested_vmx_fail(vcpu,
5578 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5579
5580 /* according to the intel vmx instruction reference, the memory
5581 * operand is read even if it isn't needed (e.g., for type==global)
5582 */
5583 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5584 vmx_instruction_info, false, sizeof(operand), &gva))
5585 return 1;
5586 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5587 if (r != X86EMUL_CONTINUE)
5588 return kvm_handle_memory_failure(vcpu, r, &e);
5589
5590 if (operand.vpid >> 16)
5591 return nested_vmx_fail(vcpu,
5592 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5593
5594 vpid02 = nested_get_vpid02(vcpu);
5595 switch (type) {
5596 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5597 if (!operand.vpid ||
5598 is_noncanonical_address(operand.gla, vcpu))
5599 return nested_vmx_fail(vcpu,
5600 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5601 vpid_sync_vcpu_addr(vpid02, operand.gla);
5602 break;
5603 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5604 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5605 if (!operand.vpid)
5606 return nested_vmx_fail(vcpu,
5607 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5608 vpid_sync_context(vpid02);
5609 break;
5610 case VMX_VPID_EXTENT_ALL_CONTEXT:
5611 vpid_sync_context(vpid02);
5612 break;
5613 default:
5614 WARN_ON_ONCE(1);
5615 return kvm_skip_emulated_instruction(vcpu);
5616 }
5617
5618 /*
5619 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5620 * linear mappings for L2 (tagged with L2's VPID). Free all guest
5621 * roots as VPIDs are not tracked in the MMU role.
5622 *
5623 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5624 * an MMU when EPT is disabled.
5625 *
5626 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5627 */
5628 if (!enable_ept)
5629 kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu);
5630
5631 return nested_vmx_succeed(vcpu);
5632 }
5633
nested_vmx_eptp_switching(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5634 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5635 struct vmcs12 *vmcs12)
5636 {
5637 u32 index = kvm_rcx_read(vcpu);
5638 u64 new_eptp;
5639
5640 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
5641 return 1;
5642 if (index >= VMFUNC_EPTP_ENTRIES)
5643 return 1;
5644
5645 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5646 &new_eptp, index * 8, 8))
5647 return 1;
5648
5649 /*
5650 * If the (L2) guest does a vmfunc to the currently
5651 * active ept pointer, we don't have to do anything else
5652 */
5653 if (vmcs12->ept_pointer != new_eptp) {
5654 if (!nested_vmx_check_eptp(vcpu, new_eptp))
5655 return 1;
5656
5657 vmcs12->ept_pointer = new_eptp;
5658 nested_ept_new_eptp(vcpu);
5659
5660 if (!nested_cpu_has_vpid(vmcs12))
5661 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
5662 }
5663
5664 return 0;
5665 }
5666
handle_vmfunc(struct kvm_vcpu * vcpu)5667 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5668 {
5669 struct vcpu_vmx *vmx = to_vmx(vcpu);
5670 struct vmcs12 *vmcs12;
5671 u32 function = kvm_rax_read(vcpu);
5672
5673 /*
5674 * VMFUNC is only supported for nested guests, but we always enable the
5675 * secondary control for simplicity; for non-nested mode, fake that we
5676 * didn't by injecting #UD.
5677 */
5678 if (!is_guest_mode(vcpu)) {
5679 kvm_queue_exception(vcpu, UD_VECTOR);
5680 return 1;
5681 }
5682
5683 vmcs12 = get_vmcs12(vcpu);
5684
5685 /*
5686 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5687 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5688 */
5689 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5690 kvm_queue_exception(vcpu, UD_VECTOR);
5691 return 1;
5692 }
5693
5694 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5695 goto fail;
5696
5697 switch (function) {
5698 case 0:
5699 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5700 goto fail;
5701 break;
5702 default:
5703 goto fail;
5704 }
5705 return kvm_skip_emulated_instruction(vcpu);
5706
5707 fail:
5708 /*
5709 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5710 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5711 * EXIT_REASON_VMFUNC as the exit reason.
5712 */
5713 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
5714 vmx_get_intr_info(vcpu),
5715 vmx_get_exit_qual(vcpu));
5716 return 1;
5717 }
5718
5719 /*
5720 * Return true if an IO instruction with the specified port and size should cause
5721 * a VM-exit into L1.
5722 */
nested_vmx_check_io_bitmaps(struct kvm_vcpu * vcpu,unsigned int port,int size)5723 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5724 int size)
5725 {
5726 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5727 gpa_t bitmap, last_bitmap;
5728 u8 b;
5729
5730 last_bitmap = (gpa_t)-1;
5731 b = -1;
5732
5733 while (size > 0) {
5734 if (port < 0x8000)
5735 bitmap = vmcs12->io_bitmap_a;
5736 else if (port < 0x10000)
5737 bitmap = vmcs12->io_bitmap_b;
5738 else
5739 return true;
5740 bitmap += (port & 0x7fff) / 8;
5741
5742 if (last_bitmap != bitmap)
5743 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5744 return true;
5745 if (b & (1 << (port & 7)))
5746 return true;
5747
5748 port++;
5749 size--;
5750 last_bitmap = bitmap;
5751 }
5752
5753 return false;
5754 }
5755
nested_vmx_exit_handled_io(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5756 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5757 struct vmcs12 *vmcs12)
5758 {
5759 unsigned long exit_qualification;
5760 unsigned short port;
5761 int size;
5762
5763 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5764 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5765
5766 exit_qualification = vmx_get_exit_qual(vcpu);
5767
5768 port = exit_qualification >> 16;
5769 size = (exit_qualification & 7) + 1;
5770
5771 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5772 }
5773
5774 /*
5775 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5776 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5777 * disinterest in the current event (read or write a specific MSR) by using an
5778 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5779 */
nested_vmx_exit_handled_msr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,union vmx_exit_reason exit_reason)5780 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5781 struct vmcs12 *vmcs12,
5782 union vmx_exit_reason exit_reason)
5783 {
5784 u32 msr_index = kvm_rcx_read(vcpu);
5785 gpa_t bitmap;
5786
5787 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5788 return true;
5789
5790 /*
5791 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5792 * for the four combinations of read/write and low/high MSR numbers.
5793 * First we need to figure out which of the four to use:
5794 */
5795 bitmap = vmcs12->msr_bitmap;
5796 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
5797 bitmap += 2048;
5798 if (msr_index >= 0xc0000000) {
5799 msr_index -= 0xc0000000;
5800 bitmap += 1024;
5801 }
5802
5803 /* Then read the msr_index'th bit from this bitmap: */
5804 if (msr_index < 1024*8) {
5805 unsigned char b;
5806 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5807 return true;
5808 return 1 & (b >> (msr_index & 7));
5809 } else
5810 return true; /* let L1 handle the wrong parameter */
5811 }
5812
5813 /*
5814 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5815 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5816 * intercept (via guest_host_mask etc.) the current event.
5817 */
nested_vmx_exit_handled_cr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5818 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5819 struct vmcs12 *vmcs12)
5820 {
5821 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5822 int cr = exit_qualification & 15;
5823 int reg;
5824 unsigned long val;
5825
5826 switch ((exit_qualification >> 4) & 3) {
5827 case 0: /* mov to cr */
5828 reg = (exit_qualification >> 8) & 15;
5829 val = kvm_register_read(vcpu, reg);
5830 switch (cr) {
5831 case 0:
5832 if (vmcs12->cr0_guest_host_mask &
5833 (val ^ vmcs12->cr0_read_shadow))
5834 return true;
5835 break;
5836 case 3:
5837 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5838 return true;
5839 break;
5840 case 4:
5841 if (vmcs12->cr4_guest_host_mask &
5842 (vmcs12->cr4_read_shadow ^ val))
5843 return true;
5844 break;
5845 case 8:
5846 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5847 return true;
5848 break;
5849 }
5850 break;
5851 case 2: /* clts */
5852 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5853 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5854 return true;
5855 break;
5856 case 1: /* mov from cr */
5857 switch (cr) {
5858 case 3:
5859 if (vmcs12->cpu_based_vm_exec_control &
5860 CPU_BASED_CR3_STORE_EXITING)
5861 return true;
5862 break;
5863 case 8:
5864 if (vmcs12->cpu_based_vm_exec_control &
5865 CPU_BASED_CR8_STORE_EXITING)
5866 return true;
5867 break;
5868 }
5869 break;
5870 case 3: /* lmsw */
5871 /*
5872 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5873 * cr0. Other attempted changes are ignored, with no exit.
5874 */
5875 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5876 if (vmcs12->cr0_guest_host_mask & 0xe &
5877 (val ^ vmcs12->cr0_read_shadow))
5878 return true;
5879 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5880 !(vmcs12->cr0_read_shadow & 0x1) &&
5881 (val & 0x1))
5882 return true;
5883 break;
5884 }
5885 return false;
5886 }
5887
nested_vmx_exit_handled_encls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5888 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
5889 struct vmcs12 *vmcs12)
5890 {
5891 u32 encls_leaf;
5892
5893 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
5894 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
5895 return false;
5896
5897 encls_leaf = kvm_rax_read(vcpu);
5898 if (encls_leaf > 62)
5899 encls_leaf = 63;
5900 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
5901 }
5902
nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,gpa_t bitmap)5903 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5904 struct vmcs12 *vmcs12, gpa_t bitmap)
5905 {
5906 u32 vmx_instruction_info;
5907 unsigned long field;
5908 u8 b;
5909
5910 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5911 return true;
5912
5913 /* Decode instruction info and find the field to access */
5914 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5915 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5916
5917 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5918 if (field >> 15)
5919 return true;
5920
5921 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5922 return true;
5923
5924 return 1 & (b >> (field & 7));
5925 }
5926
nested_vmx_exit_handled_mtf(struct vmcs12 * vmcs12)5927 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5928 {
5929 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5930
5931 if (nested_cpu_has_mtf(vmcs12))
5932 return true;
5933
5934 /*
5935 * An MTF VM-exit may be injected into the guest by setting the
5936 * interruption-type to 7 (other event) and the vector field to 0. Such
5937 * is the case regardless of the 'monitor trap flag' VM-execution
5938 * control.
5939 */
5940 return entry_intr_info == (INTR_INFO_VALID_MASK
5941 | INTR_TYPE_OTHER_EVENT);
5942 }
5943
5944 /*
5945 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5946 * L1 wants the exit. Only call this when in is_guest_mode (L2).
5947 */
nested_vmx_l0_wants_exit(struct kvm_vcpu * vcpu,union vmx_exit_reason exit_reason)5948 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
5949 union vmx_exit_reason exit_reason)
5950 {
5951 u32 intr_info;
5952
5953 switch ((u16)exit_reason.basic) {
5954 case EXIT_REASON_EXCEPTION_NMI:
5955 intr_info = vmx_get_intr_info(vcpu);
5956 if (is_nmi(intr_info))
5957 return true;
5958 else if (is_page_fault(intr_info))
5959 return vcpu->arch.apf.host_apf_flags ||
5960 vmx_need_pf_intercept(vcpu);
5961 else if (is_debug(intr_info) &&
5962 vcpu->guest_debug &
5963 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5964 return true;
5965 else if (is_breakpoint(intr_info) &&
5966 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5967 return true;
5968 else if (is_alignment_check(intr_info) &&
5969 !vmx_guest_inject_ac(vcpu))
5970 return true;
5971 return false;
5972 case EXIT_REASON_EXTERNAL_INTERRUPT:
5973 return true;
5974 case EXIT_REASON_MCE_DURING_VMENTRY:
5975 return true;
5976 case EXIT_REASON_EPT_VIOLATION:
5977 /*
5978 * L0 always deals with the EPT violation. If nested EPT is
5979 * used, and the nested mmu code discovers that the address is
5980 * missing in the guest EPT table (EPT12), the EPT violation
5981 * will be injected with nested_ept_inject_page_fault()
5982 */
5983 return true;
5984 case EXIT_REASON_EPT_MISCONFIG:
5985 /*
5986 * L2 never uses directly L1's EPT, but rather L0's own EPT
5987 * table (shadow on EPT) or a merged EPT table that L0 built
5988 * (EPT on EPT). So any problems with the structure of the
5989 * table is L0's fault.
5990 */
5991 return true;
5992 case EXIT_REASON_PREEMPTION_TIMER:
5993 return true;
5994 case EXIT_REASON_PML_FULL:
5995 /*
5996 * PML is emulated for an L1 VMM and should never be enabled in
5997 * vmcs02, always "handle" PML_FULL by exiting to userspace.
5998 */
5999 return true;
6000 case EXIT_REASON_VMFUNC:
6001 /* VM functions are emulated through L2->L0 vmexits. */
6002 return true;
6003 case EXIT_REASON_BUS_LOCK:
6004 /*
6005 * At present, bus lock VM exit is never exposed to L1.
6006 * Handle L2's bus locks in L0 directly.
6007 */
6008 return true;
6009 default:
6010 break;
6011 }
6012 return false;
6013 }
6014
6015 /*
6016 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
6017 * is_guest_mode (L2).
6018 */
nested_vmx_l1_wants_exit(struct kvm_vcpu * vcpu,union vmx_exit_reason exit_reason)6019 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
6020 union vmx_exit_reason exit_reason)
6021 {
6022 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6023 u32 intr_info;
6024
6025 switch ((u16)exit_reason.basic) {
6026 case EXIT_REASON_EXCEPTION_NMI:
6027 intr_info = vmx_get_intr_info(vcpu);
6028 if (is_nmi(intr_info))
6029 return true;
6030 else if (is_page_fault(intr_info))
6031 return true;
6032 return vmcs12->exception_bitmap &
6033 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6034 case EXIT_REASON_EXTERNAL_INTERRUPT:
6035 return nested_exit_on_intr(vcpu);
6036 case EXIT_REASON_TRIPLE_FAULT:
6037 return true;
6038 case EXIT_REASON_INTERRUPT_WINDOW:
6039 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
6040 case EXIT_REASON_NMI_WINDOW:
6041 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
6042 case EXIT_REASON_TASK_SWITCH:
6043 return true;
6044 case EXIT_REASON_CPUID:
6045 return true;
6046 case EXIT_REASON_HLT:
6047 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6048 case EXIT_REASON_INVD:
6049 return true;
6050 case EXIT_REASON_INVLPG:
6051 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6052 case EXIT_REASON_RDPMC:
6053 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6054 case EXIT_REASON_RDRAND:
6055 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
6056 case EXIT_REASON_RDSEED:
6057 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
6058 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
6059 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6060 case EXIT_REASON_VMREAD:
6061 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6062 vmcs12->vmread_bitmap);
6063 case EXIT_REASON_VMWRITE:
6064 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6065 vmcs12->vmwrite_bitmap);
6066 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6067 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6068 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
6069 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6070 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
6071 /*
6072 * VMX instructions trap unconditionally. This allows L1 to
6073 * emulate them for its L2 guest, i.e., allows 3-level nesting!
6074 */
6075 return true;
6076 case EXIT_REASON_CR_ACCESS:
6077 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6078 case EXIT_REASON_DR_ACCESS:
6079 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6080 case EXIT_REASON_IO_INSTRUCTION:
6081 return nested_vmx_exit_handled_io(vcpu, vmcs12);
6082 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
6083 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
6084 case EXIT_REASON_MSR_READ:
6085 case EXIT_REASON_MSR_WRITE:
6086 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6087 case EXIT_REASON_INVALID_STATE:
6088 return true;
6089 case EXIT_REASON_MWAIT_INSTRUCTION:
6090 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6091 case EXIT_REASON_MONITOR_TRAP_FLAG:
6092 return nested_vmx_exit_handled_mtf(vmcs12);
6093 case EXIT_REASON_MONITOR_INSTRUCTION:
6094 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6095 case EXIT_REASON_PAUSE_INSTRUCTION:
6096 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6097 nested_cpu_has2(vmcs12,
6098 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6099 case EXIT_REASON_MCE_DURING_VMENTRY:
6100 return true;
6101 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6102 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6103 case EXIT_REASON_APIC_ACCESS:
6104 case EXIT_REASON_APIC_WRITE:
6105 case EXIT_REASON_EOI_INDUCED:
6106 /*
6107 * The controls for "virtualize APIC accesses," "APIC-
6108 * register virtualization," and "virtual-interrupt
6109 * delivery" only come from vmcs12.
6110 */
6111 return true;
6112 case EXIT_REASON_INVPCID:
6113 return
6114 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6115 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6116 case EXIT_REASON_WBINVD:
6117 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6118 case EXIT_REASON_XSETBV:
6119 return true;
6120 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6121 /*
6122 * This should never happen, since it is not possible to
6123 * set XSS to a non-zero value---neither in L1 nor in L2.
6124 * If if it were, XSS would have to be checked against
6125 * the XSS exit bitmap in vmcs12.
6126 */
6127 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
6128 case EXIT_REASON_UMWAIT:
6129 case EXIT_REASON_TPAUSE:
6130 return nested_cpu_has2(vmcs12,
6131 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
6132 case EXIT_REASON_ENCLS:
6133 return nested_vmx_exit_handled_encls(vcpu, vmcs12);
6134 default:
6135 return true;
6136 }
6137 }
6138
6139 /*
6140 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6141 * reflected into L1.
6142 */
nested_vmx_reflect_vmexit(struct kvm_vcpu * vcpu)6143 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
6144 {
6145 struct vcpu_vmx *vmx = to_vmx(vcpu);
6146 union vmx_exit_reason exit_reason = vmx->exit_reason;
6147 unsigned long exit_qual;
6148 u32 exit_intr_info;
6149
6150 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6151
6152 /*
6153 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6154 * has already loaded L2's state.
6155 */
6156 if (unlikely(vmx->fail)) {
6157 trace_kvm_nested_vmenter_failed(
6158 "hardware VM-instruction error: ",
6159 vmcs_read32(VM_INSTRUCTION_ERROR));
6160 exit_intr_info = 0;
6161 exit_qual = 0;
6162 goto reflect_vmexit;
6163 }
6164
6165 trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
6166
6167 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6168 if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6169 return false;
6170
6171 /* If L1 doesn't want the exit, handle it in L0. */
6172 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
6173 return false;
6174
6175 /*
6176 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6177 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6178 * need to be synthesized by querying the in-kernel LAPIC, but external
6179 * interrupts are never reflected to L1 so it's a non-issue.
6180 */
6181 exit_intr_info = vmx_get_intr_info(vcpu);
6182 if (is_exception_with_error_code(exit_intr_info)) {
6183 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6184
6185 vmcs12->vm_exit_intr_error_code =
6186 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6187 }
6188 exit_qual = vmx_get_exit_qual(vcpu);
6189
6190 reflect_vmexit:
6191 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
6192 return true;
6193 }
6194
vmx_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)6195 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
6196 struct kvm_nested_state __user *user_kvm_nested_state,
6197 u32 user_data_size)
6198 {
6199 struct vcpu_vmx *vmx;
6200 struct vmcs12 *vmcs12;
6201 struct kvm_nested_state kvm_state = {
6202 .flags = 0,
6203 .format = KVM_STATE_NESTED_FORMAT_VMX,
6204 .size = sizeof(kvm_state),
6205 .hdr.vmx.flags = 0,
6206 .hdr.vmx.vmxon_pa = -1ull,
6207 .hdr.vmx.vmcs12_pa = -1ull,
6208 .hdr.vmx.preemption_timer_deadline = 0,
6209 };
6210 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6211 &user_kvm_nested_state->data.vmx[0];
6212
6213 if (!vcpu)
6214 return kvm_state.size + sizeof(*user_vmx_nested_state);
6215
6216 vmx = to_vmx(vcpu);
6217 vmcs12 = get_vmcs12(vcpu);
6218
6219 if (nested_vmx_allowed(vcpu) &&
6220 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6221 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6222 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6223
6224 if (vmx_has_valid_vmcs12(vcpu)) {
6225 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6226
6227 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6228 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
6229 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6230
6231 if (is_guest_mode(vcpu) &&
6232 nested_cpu_has_shadow_vmcs(vmcs12) &&
6233 vmcs12->vmcs_link_pointer != -1ull)
6234 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
6235 }
6236
6237 if (vmx->nested.smm.vmxon)
6238 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6239
6240 if (vmx->nested.smm.guest_mode)
6241 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6242
6243 if (is_guest_mode(vcpu)) {
6244 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6245
6246 if (vmx->nested.nested_run_pending)
6247 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
6248
6249 if (vmx->nested.mtf_pending)
6250 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
6251
6252 if (nested_cpu_has_preemption_timer(vmcs12) &&
6253 vmx->nested.has_preemption_timer_deadline) {
6254 kvm_state.hdr.vmx.flags |=
6255 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6256 kvm_state.hdr.vmx.preemption_timer_deadline =
6257 vmx->nested.preemption_timer_deadline;
6258 }
6259 }
6260 }
6261
6262 if (user_data_size < kvm_state.size)
6263 goto out;
6264
6265 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6266 return -EFAULT;
6267
6268 if (!vmx_has_valid_vmcs12(vcpu))
6269 goto out;
6270
6271 /*
6272 * When running L2, the authoritative vmcs12 state is in the
6273 * vmcs02. When running L1, the authoritative vmcs12 state is
6274 * in the shadow or enlightened vmcs linked to vmcs01, unless
6275 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6276 * vmcs12 state is in the vmcs12 already.
6277 */
6278 if (is_guest_mode(vcpu)) {
6279 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
6280 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
6281 } else {
6282 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
6283 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6284 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
6285 /*
6286 * L1 hypervisor is not obliged to keep eVMCS
6287 * clean fields data always up-to-date while
6288 * not in guest mode, 'hv_clean_fields' is only
6289 * supposed to be actual upon vmentry so we need
6290 * to ignore it here and do full copy.
6291 */
6292 copy_enlightened_to_vmcs12(vmx, 0);
6293 else if (enable_shadow_vmcs)
6294 copy_shadow_to_vmcs12(vmx);
6295 }
6296 }
6297
6298 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6299 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6300
6301 /*
6302 * Copy over the full allocated size of vmcs12 rather than just the size
6303 * of the struct.
6304 */
6305 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6306 return -EFAULT;
6307
6308 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6309 vmcs12->vmcs_link_pointer != -1ull) {
6310 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
6311 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
6312 return -EFAULT;
6313 }
6314 out:
6315 return kvm_state.size;
6316 }
6317
vmx_leave_nested(struct kvm_vcpu * vcpu)6318 void vmx_leave_nested(struct kvm_vcpu *vcpu)
6319 {
6320 if (is_guest_mode(vcpu)) {
6321 to_vmx(vcpu)->nested.nested_run_pending = 0;
6322 nested_vmx_vmexit(vcpu, -1, 0, 0);
6323 }
6324 free_nested(vcpu);
6325 }
6326
vmx_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)6327 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6328 struct kvm_nested_state __user *user_kvm_nested_state,
6329 struct kvm_nested_state *kvm_state)
6330 {
6331 struct vcpu_vmx *vmx = to_vmx(vcpu);
6332 struct vmcs12 *vmcs12;
6333 enum vm_entry_failure_code ignored;
6334 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6335 &user_kvm_nested_state->data.vmx[0];
6336 int ret;
6337
6338 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
6339 return -EINVAL;
6340
6341 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
6342 if (kvm_state->hdr.vmx.smm.flags)
6343 return -EINVAL;
6344
6345 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
6346 return -EINVAL;
6347
6348 /*
6349 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6350 * enable eVMCS capability on vCPU. However, since then
6351 * code was changed such that flag signals vmcs12 should
6352 * be copied into eVMCS in guest memory.
6353 *
6354 * To preserve backwards compatability, allow user
6355 * to set this flag even when there is no VMXON region.
6356 */
6357 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6358 return -EINVAL;
6359 } else {
6360 if (!nested_vmx_allowed(vcpu))
6361 return -EINVAL;
6362
6363 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6364 return -EINVAL;
6365 }
6366
6367 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6368 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6369 return -EINVAL;
6370
6371 if (kvm_state->hdr.vmx.smm.flags &
6372 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6373 return -EINVAL;
6374
6375 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6376 return -EINVAL;
6377
6378 /*
6379 * SMM temporarily disables VMX, so we cannot be in guest mode,
6380 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6381 * must be zero.
6382 */
6383 if (is_smm(vcpu) ?
6384 (kvm_state->flags &
6385 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6386 : kvm_state->hdr.vmx.smm.flags)
6387 return -EINVAL;
6388
6389 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6390 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6391 return -EINVAL;
6392
6393 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6394 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
6395 return -EINVAL;
6396
6397 vmx_leave_nested(vcpu);
6398
6399 if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
6400 return 0;
6401
6402 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6403 ret = enter_vmx_operation(vcpu);
6404 if (ret)
6405 return ret;
6406
6407 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6408 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6409 /* See vmx_has_valid_vmcs12. */
6410 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6411 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
6412 (kvm_state->hdr.vmx.vmcs12_pa != -1ull))
6413 return -EINVAL;
6414 else
6415 return 0;
6416 }
6417
6418 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
6419 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6420 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
6421 return -EINVAL;
6422
6423 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
6424 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6425 /*
6426 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6427 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6428 * restored yet. EVMCS will be mapped from
6429 * nested_get_vmcs12_pages().
6430 */
6431 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6432 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
6433 } else {
6434 return -EINVAL;
6435 }
6436
6437 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6438 vmx->nested.smm.vmxon = true;
6439 vmx->nested.vmxon = false;
6440
6441 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6442 vmx->nested.smm.guest_mode = true;
6443 }
6444
6445 vmcs12 = get_vmcs12(vcpu);
6446 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6447 return -EFAULT;
6448
6449 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6450 return -EINVAL;
6451
6452 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6453 return 0;
6454
6455 vmx->nested.nested_run_pending =
6456 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6457
6458 vmx->nested.mtf_pending =
6459 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6460
6461 ret = -EINVAL;
6462 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6463 vmcs12->vmcs_link_pointer != -1ull) {
6464 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6465
6466 if (kvm_state->size <
6467 sizeof(*kvm_state) +
6468 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6469 goto error_guest_mode;
6470
6471 if (copy_from_user(shadow_vmcs12,
6472 user_vmx_nested_state->shadow_vmcs12,
6473 sizeof(*shadow_vmcs12))) {
6474 ret = -EFAULT;
6475 goto error_guest_mode;
6476 }
6477
6478 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6479 !shadow_vmcs12->hdr.shadow_vmcs)
6480 goto error_guest_mode;
6481 }
6482
6483 vmx->nested.has_preemption_timer_deadline = false;
6484 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6485 vmx->nested.has_preemption_timer_deadline = true;
6486 vmx->nested.preemption_timer_deadline =
6487 kvm_state->hdr.vmx.preemption_timer_deadline;
6488 }
6489
6490 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6491 nested_vmx_check_host_state(vcpu, vmcs12) ||
6492 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6493 goto error_guest_mode;
6494
6495 vmx->nested.dirty_vmcs12 = true;
6496 ret = nested_vmx_enter_non_root_mode(vcpu, false);
6497 if (ret)
6498 goto error_guest_mode;
6499
6500 return 0;
6501
6502 error_guest_mode:
6503 vmx->nested.nested_run_pending = 0;
6504 return ret;
6505 }
6506
nested_vmx_set_vmcs_shadowing_bitmap(void)6507 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6508 {
6509 if (enable_shadow_vmcs) {
6510 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6511 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6512 }
6513 }
6514
6515 /*
6516 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6517 * that madness to get the encoding for comparison.
6518 */
6519 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6520
nested_vmx_calc_vmcs_enum_msr(void)6521 static u64 nested_vmx_calc_vmcs_enum_msr(void)
6522 {
6523 /*
6524 * Note these are the so called "index" of the VMCS field encoding, not
6525 * the index into vmcs12.
6526 */
6527 unsigned int max_idx, idx;
6528 int i;
6529
6530 /*
6531 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6532 * vmcs12, regardless of whether or not the associated feature is
6533 * exposed to L1. Simply find the field with the highest index.
6534 */
6535 max_idx = 0;
6536 for (i = 0; i < nr_vmcs12_fields; i++) {
6537 /* The vmcs12 table is very, very sparsely populated. */
6538 if (!vmcs_field_to_offset_table[i])
6539 continue;
6540
6541 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
6542 if (idx > max_idx)
6543 max_idx = idx;
6544 }
6545
6546 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
6547 }
6548
6549 /*
6550 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6551 * returned for the various VMX controls MSRs when nested VMX is enabled.
6552 * The same values should also be used to verify that vmcs12 control fields are
6553 * valid during nested entry from L1 to L2.
6554 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6555 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6556 * bit in the high half is on if the corresponding bit in the control field
6557 * may be on. See also vmx_control_verify().
6558 */
nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs * msrs,u32 ept_caps)6559 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
6560 {
6561 /*
6562 * Note that as a general rule, the high half of the MSRs (bits in
6563 * the control fields which may be 1) should be initialized by the
6564 * intersection of the underlying hardware's MSR (i.e., features which
6565 * can be supported) and the list of features we want to expose -
6566 * because they are known to be properly supported in our code.
6567 * Also, usually, the low half of the MSRs (bits which must be 1) can
6568 * be set to 0, meaning that L1 may turn off any of these bits. The
6569 * reason is that if one of these bits is necessary, it will appear
6570 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6571 * fields of vmcs01 and vmcs02, will turn these bits off - and
6572 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
6573 * These rules have exceptions below.
6574 */
6575
6576 /* pin-based controls */
6577 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
6578 msrs->pinbased_ctls_low,
6579 msrs->pinbased_ctls_high);
6580 msrs->pinbased_ctls_low |=
6581 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6582 msrs->pinbased_ctls_high &=
6583 PIN_BASED_EXT_INTR_MASK |
6584 PIN_BASED_NMI_EXITING |
6585 PIN_BASED_VIRTUAL_NMIS |
6586 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
6587 msrs->pinbased_ctls_high |=
6588 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6589 PIN_BASED_VMX_PREEMPTION_TIMER;
6590
6591 /* exit controls */
6592 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
6593 msrs->exit_ctls_low,
6594 msrs->exit_ctls_high);
6595 msrs->exit_ctls_low =
6596 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6597
6598 msrs->exit_ctls_high &=
6599 #ifdef CONFIG_X86_64
6600 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6601 #endif
6602 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6603 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
6604 msrs->exit_ctls_high |=
6605 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6606 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6607 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
6608
6609 /* We support free control of debug control saving. */
6610 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6611
6612 /* entry controls */
6613 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
6614 msrs->entry_ctls_low,
6615 msrs->entry_ctls_high);
6616 msrs->entry_ctls_low =
6617 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6618 msrs->entry_ctls_high &=
6619 #ifdef CONFIG_X86_64
6620 VM_ENTRY_IA32E_MODE |
6621 #endif
6622 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
6623 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
6624 msrs->entry_ctls_high |=
6625 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
6626
6627 /* We support free control of debug control loading. */
6628 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6629
6630 /* cpu-based controls */
6631 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
6632 msrs->procbased_ctls_low,
6633 msrs->procbased_ctls_high);
6634 msrs->procbased_ctls_low =
6635 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6636 msrs->procbased_ctls_high &=
6637 CPU_BASED_INTR_WINDOW_EXITING |
6638 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6639 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6640 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6641 CPU_BASED_CR3_STORE_EXITING |
6642 #ifdef CONFIG_X86_64
6643 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6644 #endif
6645 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6646 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6647 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6648 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6649 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6650 /*
6651 * We can allow some features even when not supported by the
6652 * hardware. For example, L1 can specify an MSR bitmap - and we
6653 * can use it to avoid exits to L1 - even when L0 runs L2
6654 * without MSR bitmaps.
6655 */
6656 msrs->procbased_ctls_high |=
6657 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6658 CPU_BASED_USE_MSR_BITMAPS;
6659
6660 /* We support free control of CR3 access interception. */
6661 msrs->procbased_ctls_low &=
6662 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6663
6664 /*
6665 * secondary cpu-based controls. Do not include those that
6666 * depend on CPUID bits, they are added later by
6667 * vmx_vcpu_after_set_cpuid.
6668 */
6669 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
6670 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
6671 msrs->secondary_ctls_low,
6672 msrs->secondary_ctls_high);
6673
6674 msrs->secondary_ctls_low = 0;
6675 msrs->secondary_ctls_high &=
6676 SECONDARY_EXEC_DESC |
6677 SECONDARY_EXEC_ENABLE_RDTSCP |
6678 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6679 SECONDARY_EXEC_WBINVD_EXITING |
6680 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6681 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6682 SECONDARY_EXEC_RDRAND_EXITING |
6683 SECONDARY_EXEC_ENABLE_INVPCID |
6684 SECONDARY_EXEC_RDSEED_EXITING |
6685 SECONDARY_EXEC_XSAVES |
6686 SECONDARY_EXEC_TSC_SCALING |
6687 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
6688
6689 /*
6690 * We can emulate "VMCS shadowing," even if the hardware
6691 * doesn't support it.
6692 */
6693 msrs->secondary_ctls_high |=
6694 SECONDARY_EXEC_SHADOW_VMCS;
6695
6696 if (enable_ept) {
6697 /* nested EPT: emulate EPT also to L1 */
6698 msrs->secondary_ctls_high |=
6699 SECONDARY_EXEC_ENABLE_EPT;
6700 msrs->ept_caps =
6701 VMX_EPT_PAGE_WALK_4_BIT |
6702 VMX_EPT_PAGE_WALK_5_BIT |
6703 VMX_EPTP_WB_BIT |
6704 VMX_EPT_INVEPT_BIT |
6705 VMX_EPT_EXECUTE_ONLY_BIT;
6706
6707 msrs->ept_caps &= ept_caps;
6708 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6709 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6710 VMX_EPT_1GB_PAGE_BIT;
6711 if (enable_ept_ad_bits) {
6712 msrs->secondary_ctls_high |=
6713 SECONDARY_EXEC_ENABLE_PML;
6714 msrs->ept_caps |= VMX_EPT_AD_BIT;
6715 }
6716 }
6717
6718 if (cpu_has_vmx_vmfunc()) {
6719 msrs->secondary_ctls_high |=
6720 SECONDARY_EXEC_ENABLE_VMFUNC;
6721 /*
6722 * Advertise EPTP switching unconditionally
6723 * since we emulate it
6724 */
6725 if (enable_ept)
6726 msrs->vmfunc_controls =
6727 VMX_VMFUNC_EPTP_SWITCHING;
6728 }
6729
6730 /*
6731 * Old versions of KVM use the single-context version without
6732 * checking for support, so declare that it is supported even
6733 * though it is treated as global context. The alternative is
6734 * not failing the single-context invvpid, and it is worse.
6735 */
6736 if (enable_vpid) {
6737 msrs->secondary_ctls_high |=
6738 SECONDARY_EXEC_ENABLE_VPID;
6739 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6740 VMX_VPID_EXTENT_SUPPORTED_MASK;
6741 }
6742
6743 if (enable_unrestricted_guest)
6744 msrs->secondary_ctls_high |=
6745 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6746
6747 if (flexpriority_enabled)
6748 msrs->secondary_ctls_high |=
6749 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6750
6751 if (enable_sgx)
6752 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
6753
6754 /* miscellaneous data */
6755 rdmsr(MSR_IA32_VMX_MISC,
6756 msrs->misc_low,
6757 msrs->misc_high);
6758 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
6759 msrs->misc_low |=
6760 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6761 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
6762 VMX_MISC_ACTIVITY_HLT |
6763 VMX_MISC_ACTIVITY_WAIT_SIPI;
6764 msrs->misc_high = 0;
6765
6766 /*
6767 * This MSR reports some information about VMX support. We
6768 * should return information about the VMX we emulate for the
6769 * guest, and the VMCS structure we give it - not about the
6770 * VMX support of the underlying hardware.
6771 */
6772 msrs->basic =
6773 VMCS12_REVISION |
6774 VMX_BASIC_TRUE_CTLS |
6775 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6776 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6777
6778 if (cpu_has_vmx_basic_inout())
6779 msrs->basic |= VMX_BASIC_INOUT;
6780
6781 /*
6782 * These MSRs specify bits which the guest must keep fixed on
6783 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6784 * We picked the standard core2 setting.
6785 */
6786 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6787 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6788 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6789 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6790
6791 /* These MSRs specify bits which the guest must keep fixed off. */
6792 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6793 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6794
6795 if (vmx_umip_emulated())
6796 msrs->cr4_fixed1 |= X86_CR4_UMIP;
6797
6798 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
6799 }
6800
nested_vmx_hardware_unsetup(void)6801 void nested_vmx_hardware_unsetup(void)
6802 {
6803 int i;
6804
6805 if (enable_shadow_vmcs) {
6806 for (i = 0; i < VMX_BITMAP_NR; i++)
6807 free_page((unsigned long)vmx_bitmap[i]);
6808 }
6809 }
6810
nested_vmx_hardware_setup(int (* exit_handlers[])(struct kvm_vcpu *))6811 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
6812 {
6813 int i;
6814
6815 if (!cpu_has_vmx_shadow_vmcs())
6816 enable_shadow_vmcs = 0;
6817 if (enable_shadow_vmcs) {
6818 for (i = 0; i < VMX_BITMAP_NR; i++) {
6819 /*
6820 * The vmx_bitmap is not tied to a VM and so should
6821 * not be charged to a memcg.
6822 */
6823 vmx_bitmap[i] = (unsigned long *)
6824 __get_free_page(GFP_KERNEL);
6825 if (!vmx_bitmap[i]) {
6826 nested_vmx_hardware_unsetup();
6827 return -ENOMEM;
6828 }
6829 }
6830
6831 init_vmcs_shadow_fields();
6832 }
6833
6834 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
6835 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
6836 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
6837 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
6838 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
6839 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
6840 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
6841 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
6842 exit_handlers[EXIT_REASON_VMON] = handle_vmon;
6843 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
6844 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
6845 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
6846
6847 return 0;
6848 }
6849
6850 struct kvm_x86_nested_ops vmx_nested_ops = {
6851 .leave_nested = vmx_leave_nested,
6852 .check_events = vmx_check_nested_events,
6853 .hv_timer_pending = nested_vmx_preemption_timer_pending,
6854 .triple_fault = nested_vmx_triple_fault,
6855 .get_state = vmx_get_nested_state,
6856 .set_state = vmx_set_nested_state,
6857 .get_nested_state_pages = vmx_get_nested_state_pages,
6858 .write_log_dirty = nested_vmx_write_pml_buffer,
6859 .enable_evmcs = nested_enable_evmcs,
6860 .get_evmcs_version = nested_get_evmcs_version,
6861 };
6862