1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
5
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
8
9 #include "cpuid.h"
10 #include "hyperv.h"
11 #include "mmu.h"
12 #include "nested.h"
13 #include "trace.h"
14 #include "x86.h"
15
16 static bool __read_mostly enable_shadow_vmcs = 1;
17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
18
19 static bool __read_mostly nested_early_check = 0;
20 module_param(nested_early_check, bool, S_IRUGO);
21
22 #define CC(consistency_check) \
23 ({ \
24 bool failed = (consistency_check); \
25 if (failed) \
26 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
27 failed; \
28 })
29
30 /*
31 * Hyper-V requires all of these, so mark them as supported even though
32 * they are just treated the same as all-context.
33 */
34 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
35 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
36 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
37 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
38 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
39
40 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
41
42 enum {
43 VMX_VMREAD_BITMAP,
44 VMX_VMWRITE_BITMAP,
45 VMX_BITMAP_NR
46 };
47 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
48
49 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
50 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
51
52 struct shadow_vmcs_field {
53 u16 encoding;
54 u16 offset;
55 };
56 static struct shadow_vmcs_field shadow_read_only_fields[] = {
57 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
58 #include "vmcs_shadow_fields.h"
59 };
60 static int max_shadow_read_only_fields =
61 ARRAY_SIZE(shadow_read_only_fields);
62
63 static struct shadow_vmcs_field shadow_read_write_fields[] = {
64 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
65 #include "vmcs_shadow_fields.h"
66 };
67 static int max_shadow_read_write_fields =
68 ARRAY_SIZE(shadow_read_write_fields);
69
init_vmcs_shadow_fields(void)70 static void init_vmcs_shadow_fields(void)
71 {
72 int i, j;
73
74 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
75 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
76
77 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
78 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
79 u16 field = entry.encoding;
80
81 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
82 (i + 1 == max_shadow_read_only_fields ||
83 shadow_read_only_fields[i + 1].encoding != field + 1))
84 pr_err("Missing field from shadow_read_only_field %x\n",
85 field + 1);
86
87 clear_bit(field, vmx_vmread_bitmap);
88 if (field & 1)
89 #ifdef CONFIG_X86_64
90 continue;
91 #else
92 entry.offset += sizeof(u32);
93 #endif
94 shadow_read_only_fields[j++] = entry;
95 }
96 max_shadow_read_only_fields = j;
97
98 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
99 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
100 u16 field = entry.encoding;
101
102 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
103 (i + 1 == max_shadow_read_write_fields ||
104 shadow_read_write_fields[i + 1].encoding != field + 1))
105 pr_err("Missing field from shadow_read_write_field %x\n",
106 field + 1);
107
108 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
109 field <= GUEST_TR_AR_BYTES,
110 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
111
112 /*
113 * PML and the preemption timer can be emulated, but the
114 * processor cannot vmwrite to fields that don't exist
115 * on bare metal.
116 */
117 switch (field) {
118 case GUEST_PML_INDEX:
119 if (!cpu_has_vmx_pml())
120 continue;
121 break;
122 case VMX_PREEMPTION_TIMER_VALUE:
123 if (!cpu_has_vmx_preemption_timer())
124 continue;
125 break;
126 case GUEST_INTR_STATUS:
127 if (!cpu_has_vmx_apicv())
128 continue;
129 break;
130 default:
131 break;
132 }
133
134 clear_bit(field, vmx_vmwrite_bitmap);
135 clear_bit(field, vmx_vmread_bitmap);
136 if (field & 1)
137 #ifdef CONFIG_X86_64
138 continue;
139 #else
140 entry.offset += sizeof(u32);
141 #endif
142 shadow_read_write_fields[j++] = entry;
143 }
144 max_shadow_read_write_fields = j;
145 }
146
147 /*
148 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
149 * set the success or error code of an emulated VMX instruction (as specified
150 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
151 * instruction.
152 */
nested_vmx_succeed(struct kvm_vcpu * vcpu)153 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
154 {
155 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
156 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
157 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
158 return kvm_skip_emulated_instruction(vcpu);
159 }
160
nested_vmx_failInvalid(struct kvm_vcpu * vcpu)161 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
162 {
163 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
164 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
165 X86_EFLAGS_SF | X86_EFLAGS_OF))
166 | X86_EFLAGS_CF);
167 return kvm_skip_emulated_instruction(vcpu);
168 }
169
nested_vmx_failValid(struct kvm_vcpu * vcpu,u32 vm_instruction_error)170 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
171 u32 vm_instruction_error)
172 {
173 struct vcpu_vmx *vmx = to_vmx(vcpu);
174
175 /*
176 * failValid writes the error number to the current VMCS, which
177 * can't be done if there isn't a current VMCS.
178 */
179 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
180 return nested_vmx_failInvalid(vcpu);
181
182 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
183 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
184 X86_EFLAGS_SF | X86_EFLAGS_OF))
185 | X86_EFLAGS_ZF);
186 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
187 /*
188 * We don't need to force a shadow sync because
189 * VM_INSTRUCTION_ERROR is not shadowed
190 */
191 return kvm_skip_emulated_instruction(vcpu);
192 }
193
nested_vmx_abort(struct kvm_vcpu * vcpu,u32 indicator)194 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
195 {
196 /* TODO: not to reset guest simply here. */
197 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
198 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
199 }
200
vmx_control_verify(u32 control,u32 low,u32 high)201 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
202 {
203 return fixed_bits_valid(control, low, high);
204 }
205
vmx_control_msr(u32 low,u32 high)206 static inline u64 vmx_control_msr(u32 low, u32 high)
207 {
208 return low | ((u64)high << 32);
209 }
210
vmx_disable_shadow_vmcs(struct vcpu_vmx * vmx)211 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
212 {
213 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
214 vmcs_write64(VMCS_LINK_POINTER, -1ull);
215 vmx->nested.need_vmcs12_to_shadow_sync = false;
216 }
217
nested_release_evmcs(struct kvm_vcpu * vcpu)218 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
219 {
220 struct vcpu_vmx *vmx = to_vmx(vcpu);
221
222 if (!vmx->nested.hv_evmcs)
223 return;
224
225 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
226 vmx->nested.hv_evmcs_vmptr = -1ull;
227 vmx->nested.hv_evmcs = NULL;
228 }
229
230 /*
231 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
232 * just stops using VMX.
233 */
free_nested(struct kvm_vcpu * vcpu)234 static void free_nested(struct kvm_vcpu *vcpu)
235 {
236 struct vcpu_vmx *vmx = to_vmx(vcpu);
237
238 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
239 return;
240
241 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
242
243 vmx->nested.vmxon = false;
244 vmx->nested.smm.vmxon = false;
245 free_vpid(vmx->nested.vpid02);
246 vmx->nested.posted_intr_nv = -1;
247 vmx->nested.current_vmptr = -1ull;
248 if (enable_shadow_vmcs) {
249 vmx_disable_shadow_vmcs(vmx);
250 vmcs_clear(vmx->vmcs01.shadow_vmcs);
251 free_vmcs(vmx->vmcs01.shadow_vmcs);
252 vmx->vmcs01.shadow_vmcs = NULL;
253 }
254 kfree(vmx->nested.cached_vmcs12);
255 vmx->nested.cached_vmcs12 = NULL;
256 kfree(vmx->nested.cached_shadow_vmcs12);
257 vmx->nested.cached_shadow_vmcs12 = NULL;
258 /* Unpin physical memory we referred to in the vmcs02 */
259 if (vmx->nested.apic_access_page) {
260 kvm_release_page_dirty(vmx->nested.apic_access_page);
261 vmx->nested.apic_access_page = NULL;
262 }
263 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
264 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
265 vmx->nested.pi_desc = NULL;
266
267 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
268
269 nested_release_evmcs(vcpu);
270
271 free_loaded_vmcs(&vmx->nested.vmcs02);
272 }
273
vmx_sync_vmcs_host_state(struct vcpu_vmx * vmx,struct loaded_vmcs * prev)274 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
275 struct loaded_vmcs *prev)
276 {
277 struct vmcs_host_state *dest, *src;
278
279 if (unlikely(!vmx->guest_state_loaded))
280 return;
281
282 src = &prev->host_state;
283 dest = &vmx->loaded_vmcs->host_state;
284
285 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
286 dest->ldt_sel = src->ldt_sel;
287 #ifdef CONFIG_X86_64
288 dest->ds_sel = src->ds_sel;
289 dest->es_sel = src->es_sel;
290 #endif
291 }
292
vmx_switch_vmcs(struct kvm_vcpu * vcpu,struct loaded_vmcs * vmcs)293 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
294 {
295 struct vcpu_vmx *vmx = to_vmx(vcpu);
296 struct loaded_vmcs *prev;
297 int cpu;
298
299 if (vmx->loaded_vmcs == vmcs)
300 return;
301
302 cpu = get_cpu();
303 prev = vmx->loaded_vmcs;
304 vmx->loaded_vmcs = vmcs;
305 vmx_vcpu_load_vmcs(vcpu, cpu);
306 vmx_sync_vmcs_host_state(vmx, prev);
307 put_cpu();
308
309 vmx_segment_cache_clear(vmx);
310 }
311
312 /*
313 * Ensure that the current vmcs of the logical processor is the
314 * vmcs01 of the vcpu before calling free_nested().
315 */
nested_vmx_free_vcpu(struct kvm_vcpu * vcpu)316 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
317 {
318 vcpu_load(vcpu);
319 vmx_leave_nested(vcpu);
320 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
321 free_nested(vcpu);
322 vcpu_put(vcpu);
323 }
324
nested_ept_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)325 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
326 struct x86_exception *fault)
327 {
328 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
329 struct vcpu_vmx *vmx = to_vmx(vcpu);
330 u32 exit_reason;
331 unsigned long exit_qualification = vcpu->arch.exit_qualification;
332
333 if (vmx->nested.pml_full) {
334 exit_reason = EXIT_REASON_PML_FULL;
335 vmx->nested.pml_full = false;
336 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
337 } else if (fault->error_code & PFERR_RSVD_MASK)
338 exit_reason = EXIT_REASON_EPT_MISCONFIG;
339 else
340 exit_reason = EXIT_REASON_EPT_VIOLATION;
341
342 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
343 vmcs12->guest_physical_address = fault->address;
344 }
345
nested_ept_init_mmu_context(struct kvm_vcpu * vcpu)346 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
347 {
348 WARN_ON(mmu_is_nested(vcpu));
349
350 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
351 kvm_init_shadow_ept_mmu(vcpu,
352 to_vmx(vcpu)->nested.msrs.ept_caps &
353 VMX_EPT_EXECUTE_ONLY_BIT,
354 nested_ept_ad_enabled(vcpu),
355 nested_ept_get_cr3(vcpu));
356 vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
357 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
358 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
359 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
360
361 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
362 }
363
nested_ept_uninit_mmu_context(struct kvm_vcpu * vcpu)364 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
365 {
366 vcpu->arch.mmu = &vcpu->arch.root_mmu;
367 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
368 }
369
nested_vmx_is_page_fault_vmexit(struct vmcs12 * vmcs12,u16 error_code)370 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
371 u16 error_code)
372 {
373 bool inequality, bit;
374
375 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
376 inequality =
377 (error_code & vmcs12->page_fault_error_code_mask) !=
378 vmcs12->page_fault_error_code_match;
379 return inequality ^ bit;
380 }
381
382
383 /*
384 * KVM wants to inject page-faults which it got to the guest. This function
385 * checks whether in a nested guest, we need to inject them to L1 or L2.
386 */
nested_vmx_check_exception(struct kvm_vcpu * vcpu,unsigned long * exit_qual)387 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
388 {
389 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
390 unsigned int nr = vcpu->arch.exception.nr;
391 bool has_payload = vcpu->arch.exception.has_payload;
392 unsigned long payload = vcpu->arch.exception.payload;
393
394 if (nr == PF_VECTOR) {
395 if (vcpu->arch.exception.nested_apf) {
396 *exit_qual = vcpu->arch.apf.nested_apf_token;
397 return 1;
398 }
399 if (nested_vmx_is_page_fault_vmexit(vmcs12,
400 vcpu->arch.exception.error_code)) {
401 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
402 return 1;
403 }
404 } else if (vmcs12->exception_bitmap & (1u << nr)) {
405 if (nr == DB_VECTOR) {
406 if (!has_payload) {
407 payload = vcpu->arch.dr6;
408 payload &= ~(DR6_FIXED_1 | DR6_BT);
409 payload ^= DR6_RTM;
410 }
411 *exit_qual = payload;
412 } else
413 *exit_qual = 0;
414 return 1;
415 }
416
417 return 0;
418 }
419
420
vmx_inject_page_fault_nested(struct kvm_vcpu * vcpu,struct x86_exception * fault)421 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
422 struct x86_exception *fault)
423 {
424 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
425
426 WARN_ON(!is_guest_mode(vcpu));
427
428 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
429 !to_vmx(vcpu)->nested.nested_run_pending) {
430 vmcs12->vm_exit_intr_error_code = fault->error_code;
431 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
432 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
433 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
434 fault->address);
435 } else {
436 kvm_inject_page_fault(vcpu, fault);
437 }
438 }
439
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)440 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
441 {
442 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
443 }
444
nested_vmx_check_io_bitmap_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)445 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
446 struct vmcs12 *vmcs12)
447 {
448 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
449 return 0;
450
451 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
452 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
453 return -EINVAL;
454
455 return 0;
456 }
457
nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)458 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
459 struct vmcs12 *vmcs12)
460 {
461 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
462 return 0;
463
464 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
465 return -EINVAL;
466
467 return 0;
468 }
469
nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)470 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
471 struct vmcs12 *vmcs12)
472 {
473 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
474 return 0;
475
476 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
477 return -EINVAL;
478
479 return 0;
480 }
481
482 /*
483 * Check if MSR is intercepted for L01 MSR bitmap.
484 */
msr_write_intercepted_l01(struct kvm_vcpu * vcpu,u32 msr)485 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
486 {
487 unsigned long *msr_bitmap;
488 int f = sizeof(unsigned long);
489
490 if (!cpu_has_vmx_msr_bitmap())
491 return true;
492
493 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
494
495 if (msr <= 0x1fff) {
496 return !!test_bit(msr, msr_bitmap + 0x800 / f);
497 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
498 msr &= 0x1fff;
499 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
500 }
501
502 return true;
503 }
504
505 /*
506 * If a msr is allowed by L0, we should check whether it is allowed by L1.
507 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
508 */
nested_vmx_disable_intercept_for_msr(unsigned long * msr_bitmap_l1,unsigned long * msr_bitmap_nested,u32 msr,int type)509 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
510 unsigned long *msr_bitmap_nested,
511 u32 msr, int type)
512 {
513 int f = sizeof(unsigned long);
514
515 /*
516 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
517 * have the write-low and read-high bitmap offsets the wrong way round.
518 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
519 */
520 if (msr <= 0x1fff) {
521 if (type & MSR_TYPE_R &&
522 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
523 /* read-low */
524 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
525
526 if (type & MSR_TYPE_W &&
527 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
528 /* write-low */
529 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
530
531 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
532 msr &= 0x1fff;
533 if (type & MSR_TYPE_R &&
534 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
535 /* read-high */
536 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
537
538 if (type & MSR_TYPE_W &&
539 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
540 /* write-high */
541 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
542
543 }
544 }
545
enable_x2apic_msr_intercepts(unsigned long * msr_bitmap)546 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
547 int msr;
548
549 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
550 unsigned word = msr / BITS_PER_LONG;
551
552 msr_bitmap[word] = ~0;
553 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
554 }
555 }
556
557 /*
558 * Merge L0's and L1's MSR bitmap, return false to indicate that
559 * we do not use the hardware.
560 */
nested_vmx_prepare_msr_bitmap(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)561 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
562 struct vmcs12 *vmcs12)
563 {
564 int msr;
565 unsigned long *msr_bitmap_l1;
566 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
567 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
568
569 /* Nothing to do if the MSR bitmap is not in use. */
570 if (!cpu_has_vmx_msr_bitmap() ||
571 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
572 return false;
573
574 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
575 return false;
576
577 msr_bitmap_l1 = (unsigned long *)map->hva;
578
579 /*
580 * To keep the control flow simple, pay eight 8-byte writes (sixteen
581 * 4-byte writes on 32-bit systems) up front to enable intercepts for
582 * the x2APIC MSR range and selectively disable them below.
583 */
584 enable_x2apic_msr_intercepts(msr_bitmap_l0);
585
586 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
587 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
588 /*
589 * L0 need not intercept reads for MSRs between 0x800
590 * and 0x8ff, it just lets the processor take the value
591 * from the virtual-APIC page; take those 256 bits
592 * directly from the L1 bitmap.
593 */
594 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
595 unsigned word = msr / BITS_PER_LONG;
596
597 msr_bitmap_l0[word] = msr_bitmap_l1[word];
598 }
599 }
600
601 nested_vmx_disable_intercept_for_msr(
602 msr_bitmap_l1, msr_bitmap_l0,
603 X2APIC_MSR(APIC_TASKPRI),
604 MSR_TYPE_R | MSR_TYPE_W);
605
606 if (nested_cpu_has_vid(vmcs12)) {
607 nested_vmx_disable_intercept_for_msr(
608 msr_bitmap_l1, msr_bitmap_l0,
609 X2APIC_MSR(APIC_EOI),
610 MSR_TYPE_W);
611 nested_vmx_disable_intercept_for_msr(
612 msr_bitmap_l1, msr_bitmap_l0,
613 X2APIC_MSR(APIC_SELF_IPI),
614 MSR_TYPE_W);
615 }
616 }
617
618 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
619 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
620 MSR_FS_BASE, MSR_TYPE_RW);
621
622 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
623 MSR_GS_BASE, MSR_TYPE_RW);
624
625 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
626 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
627
628 /*
629 * Checking the L0->L1 bitmap is trying to verify two things:
630 *
631 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
632 * ensures that we do not accidentally generate an L02 MSR bitmap
633 * from the L12 MSR bitmap that is too permissive.
634 * 2. That L1 or L2s have actually used the MSR. This avoids
635 * unnecessarily merging of the bitmap if the MSR is unused. This
636 * works properly because we only update the L01 MSR bitmap lazily.
637 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
638 * updated to reflect this when L1 (or its L2s) actually write to
639 * the MSR.
640 */
641 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
642 nested_vmx_disable_intercept_for_msr(
643 msr_bitmap_l1, msr_bitmap_l0,
644 MSR_IA32_SPEC_CTRL,
645 MSR_TYPE_R | MSR_TYPE_W);
646
647 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
648 nested_vmx_disable_intercept_for_msr(
649 msr_bitmap_l1, msr_bitmap_l0,
650 MSR_IA32_PRED_CMD,
651 MSR_TYPE_W);
652
653 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
654
655 return true;
656 }
657
nested_cache_shadow_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)658 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
659 struct vmcs12 *vmcs12)
660 {
661 struct kvm_host_map map;
662 struct vmcs12 *shadow;
663
664 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
665 vmcs12->vmcs_link_pointer == -1ull)
666 return;
667
668 shadow = get_shadow_vmcs12(vcpu);
669
670 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
671 return;
672
673 memcpy(shadow, map.hva, VMCS12_SIZE);
674 kvm_vcpu_unmap(vcpu, &map, false);
675 }
676
nested_flush_cached_shadow_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)677 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
678 struct vmcs12 *vmcs12)
679 {
680 struct vcpu_vmx *vmx = to_vmx(vcpu);
681
682 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
683 vmcs12->vmcs_link_pointer == -1ull)
684 return;
685
686 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
687 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
688 }
689
690 /*
691 * In nested virtualization, check if L1 has set
692 * VM_EXIT_ACK_INTR_ON_EXIT
693 */
nested_exit_intr_ack_set(struct kvm_vcpu * vcpu)694 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
695 {
696 return get_vmcs12(vcpu)->vm_exit_controls &
697 VM_EXIT_ACK_INTR_ON_EXIT;
698 }
699
nested_exit_on_nmi(struct kvm_vcpu * vcpu)700 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
701 {
702 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
703 }
704
nested_vmx_check_apic_access_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)705 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
706 struct vmcs12 *vmcs12)
707 {
708 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
709 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
710 return -EINVAL;
711 else
712 return 0;
713 }
714
nested_vmx_check_apicv_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)715 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
716 struct vmcs12 *vmcs12)
717 {
718 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
719 !nested_cpu_has_apic_reg_virt(vmcs12) &&
720 !nested_cpu_has_vid(vmcs12) &&
721 !nested_cpu_has_posted_intr(vmcs12))
722 return 0;
723
724 /*
725 * If virtualize x2apic mode is enabled,
726 * virtualize apic access must be disabled.
727 */
728 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
729 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
730 return -EINVAL;
731
732 /*
733 * If virtual interrupt delivery is enabled,
734 * we must exit on external interrupts.
735 */
736 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
737 return -EINVAL;
738
739 /*
740 * bits 15:8 should be zero in posted_intr_nv,
741 * the descriptor address has been already checked
742 * in nested_get_vmcs12_pages.
743 *
744 * bits 5:0 of posted_intr_desc_addr should be zero.
745 */
746 if (nested_cpu_has_posted_intr(vmcs12) &&
747 (CC(!nested_cpu_has_vid(vmcs12)) ||
748 CC(!nested_exit_intr_ack_set(vcpu)) ||
749 CC((vmcs12->posted_intr_nv & 0xff00)) ||
750 CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
751 CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
752 return -EINVAL;
753
754 /* tpr shadow is needed by all apicv features. */
755 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
756 return -EINVAL;
757
758 return 0;
759 }
760
nested_vmx_check_msr_switch(struct kvm_vcpu * vcpu,u32 count,u64 addr)761 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
762 u32 count, u64 addr)
763 {
764 int maxphyaddr;
765
766 if (count == 0)
767 return 0;
768 maxphyaddr = cpuid_maxphyaddr(vcpu);
769 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
770 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
771 return -EINVAL;
772
773 return 0;
774 }
775
nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)776 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
777 struct vmcs12 *vmcs12)
778 {
779 if (CC(nested_vmx_check_msr_switch(vcpu,
780 vmcs12->vm_exit_msr_load_count,
781 vmcs12->vm_exit_msr_load_addr)) ||
782 CC(nested_vmx_check_msr_switch(vcpu,
783 vmcs12->vm_exit_msr_store_count,
784 vmcs12->vm_exit_msr_store_addr)))
785 return -EINVAL;
786
787 return 0;
788 }
789
nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)790 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
791 struct vmcs12 *vmcs12)
792 {
793 if (CC(nested_vmx_check_msr_switch(vcpu,
794 vmcs12->vm_entry_msr_load_count,
795 vmcs12->vm_entry_msr_load_addr)))
796 return -EINVAL;
797
798 return 0;
799 }
800
nested_vmx_check_pml_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)801 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
802 struct vmcs12 *vmcs12)
803 {
804 if (!nested_cpu_has_pml(vmcs12))
805 return 0;
806
807 if (CC(!nested_cpu_has_ept(vmcs12)) ||
808 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
809 return -EINVAL;
810
811 return 0;
812 }
813
nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)814 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
815 struct vmcs12 *vmcs12)
816 {
817 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
818 !nested_cpu_has_ept(vmcs12)))
819 return -EINVAL;
820 return 0;
821 }
822
nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)823 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
824 struct vmcs12 *vmcs12)
825 {
826 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
827 !nested_cpu_has_ept(vmcs12)))
828 return -EINVAL;
829 return 0;
830 }
831
nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)832 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
833 struct vmcs12 *vmcs12)
834 {
835 if (!nested_cpu_has_shadow_vmcs(vmcs12))
836 return 0;
837
838 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
839 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
840 return -EINVAL;
841
842 return 0;
843 }
844
nested_vmx_msr_check_common(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)845 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
846 struct vmx_msr_entry *e)
847 {
848 /* x2APIC MSR accesses are not allowed */
849 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
850 return -EINVAL;
851 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
852 CC(e->index == MSR_IA32_UCODE_REV))
853 return -EINVAL;
854 if (CC(e->reserved != 0))
855 return -EINVAL;
856 return 0;
857 }
858
nested_vmx_load_msr_check(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)859 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
860 struct vmx_msr_entry *e)
861 {
862 if (CC(e->index == MSR_FS_BASE) ||
863 CC(e->index == MSR_GS_BASE) ||
864 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
865 nested_vmx_msr_check_common(vcpu, e))
866 return -EINVAL;
867 return 0;
868 }
869
nested_vmx_store_msr_check(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)870 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
871 struct vmx_msr_entry *e)
872 {
873 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
874 nested_vmx_msr_check_common(vcpu, e))
875 return -EINVAL;
876 return 0;
877 }
878
nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu * vcpu)879 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
880 {
881 struct vcpu_vmx *vmx = to_vmx(vcpu);
882 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
883 vmx->nested.msrs.misc_high);
884
885 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
886 }
887
888 /*
889 * Load guest's/host's msr at nested entry/exit.
890 * return 0 for success, entry index for failure.
891 *
892 * One of the failure modes for MSR load/store is when a list exceeds the
893 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
894 * as possible, process all valid entries before failing rather than precheck
895 * for a capacity violation.
896 */
nested_vmx_load_msr(struct kvm_vcpu * vcpu,u64 gpa,u32 count)897 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
898 {
899 u32 i;
900 struct vmx_msr_entry e;
901 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
902
903 for (i = 0; i < count; i++) {
904 if (unlikely(i >= max_msr_list_size))
905 goto fail;
906
907 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
908 &e, sizeof(e))) {
909 pr_debug_ratelimited(
910 "%s cannot read MSR entry (%u, 0x%08llx)\n",
911 __func__, i, gpa + i * sizeof(e));
912 goto fail;
913 }
914 if (nested_vmx_load_msr_check(vcpu, &e)) {
915 pr_debug_ratelimited(
916 "%s check failed (%u, 0x%x, 0x%x)\n",
917 __func__, i, e.index, e.reserved);
918 goto fail;
919 }
920 if (kvm_set_msr(vcpu, e.index, e.value)) {
921 pr_debug_ratelimited(
922 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
923 __func__, i, e.index, e.value);
924 goto fail;
925 }
926 }
927 return 0;
928 fail:
929 return i + 1;
930 }
931
nested_vmx_store_msr(struct kvm_vcpu * vcpu,u64 gpa,u32 count)932 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
933 {
934 u64 data;
935 u32 i;
936 struct vmx_msr_entry e;
937 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
938
939 for (i = 0; i < count; i++) {
940 if (unlikely(i >= max_msr_list_size))
941 return -EINVAL;
942
943 if (kvm_vcpu_read_guest(vcpu,
944 gpa + i * sizeof(e),
945 &e, 2 * sizeof(u32))) {
946 pr_debug_ratelimited(
947 "%s cannot read MSR entry (%u, 0x%08llx)\n",
948 __func__, i, gpa + i * sizeof(e));
949 return -EINVAL;
950 }
951 if (nested_vmx_store_msr_check(vcpu, &e)) {
952 pr_debug_ratelimited(
953 "%s check failed (%u, 0x%x, 0x%x)\n",
954 __func__, i, e.index, e.reserved);
955 return -EINVAL;
956 }
957 if (kvm_get_msr(vcpu, e.index, &data)) {
958 pr_debug_ratelimited(
959 "%s cannot read MSR (%u, 0x%x)\n",
960 __func__, i, e.index);
961 return -EINVAL;
962 }
963 if (kvm_vcpu_write_guest(vcpu,
964 gpa + i * sizeof(e) +
965 offsetof(struct vmx_msr_entry, value),
966 &data, sizeof(data))) {
967 pr_debug_ratelimited(
968 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
969 __func__, i, e.index, data);
970 return -EINVAL;
971 }
972 }
973 return 0;
974 }
975
nested_cr3_valid(struct kvm_vcpu * vcpu,unsigned long val)976 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
977 {
978 unsigned long invalid_mask;
979
980 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
981 return (val & invalid_mask) == 0;
982 }
983
984 /*
985 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
986 * emulating VM entry into a guest with EPT enabled.
987 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
988 * is assigned to entry_failure_code on failure.
989 */
nested_vmx_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_ept,u32 * entry_failure_code)990 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
991 u32 *entry_failure_code)
992 {
993 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
994 if (CC(!nested_cr3_valid(vcpu, cr3))) {
995 *entry_failure_code = ENTRY_FAIL_DEFAULT;
996 return -EINVAL;
997 }
998
999 /*
1000 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1001 * must not be dereferenced.
1002 */
1003 if (is_pae_paging(vcpu) && !nested_ept) {
1004 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
1005 *entry_failure_code = ENTRY_FAIL_PDPTE;
1006 return -EINVAL;
1007 }
1008 }
1009 }
1010
1011 if (!nested_ept)
1012 kvm_mmu_new_cr3(vcpu, cr3, false);
1013
1014 vcpu->arch.cr3 = cr3;
1015 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
1016
1017 kvm_init_mmu(vcpu, false);
1018
1019 return 0;
1020 }
1021
1022 /*
1023 * Returns if KVM is able to config CPU to tag TLB entries
1024 * populated by L2 differently than TLB entries populated
1025 * by L1.
1026 *
1027 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
1028 *
1029 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1030 * with different VPID (L1 entries are tagged with vmx->vpid
1031 * while L2 entries are tagged with vmx->nested.vpid02).
1032 */
nested_has_guest_tlb_tag(struct kvm_vcpu * vcpu)1033 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1034 {
1035 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1036
1037 return nested_cpu_has_ept(vmcs12) ||
1038 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1039 }
1040
nested_get_vpid02(struct kvm_vcpu * vcpu)1041 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
1042 {
1043 struct vcpu_vmx *vmx = to_vmx(vcpu);
1044
1045 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
1046 }
1047
is_bitwise_subset(u64 superset,u64 subset,u64 mask)1048 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1049 {
1050 superset &= mask;
1051 subset &= mask;
1052
1053 return (superset | subset) == superset;
1054 }
1055
vmx_restore_vmx_basic(struct vcpu_vmx * vmx,u64 data)1056 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1057 {
1058 const u64 feature_and_reserved =
1059 /* feature (except bit 48; see below) */
1060 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1061 /* reserved */
1062 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1063 u64 vmx_basic = vmx->nested.msrs.basic;
1064
1065 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1066 return -EINVAL;
1067
1068 /*
1069 * KVM does not emulate a version of VMX that constrains physical
1070 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1071 */
1072 if (data & BIT_ULL(48))
1073 return -EINVAL;
1074
1075 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1076 vmx_basic_vmcs_revision_id(data))
1077 return -EINVAL;
1078
1079 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1080 return -EINVAL;
1081
1082 vmx->nested.msrs.basic = data;
1083 return 0;
1084 }
1085
1086 static int
vmx_restore_control_msr(struct vcpu_vmx * vmx,u32 msr_index,u64 data)1087 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1088 {
1089 u64 supported;
1090 u32 *lowp, *highp;
1091
1092 switch (msr_index) {
1093 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1094 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1095 highp = &vmx->nested.msrs.pinbased_ctls_high;
1096 break;
1097 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1098 lowp = &vmx->nested.msrs.procbased_ctls_low;
1099 highp = &vmx->nested.msrs.procbased_ctls_high;
1100 break;
1101 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1102 lowp = &vmx->nested.msrs.exit_ctls_low;
1103 highp = &vmx->nested.msrs.exit_ctls_high;
1104 break;
1105 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1106 lowp = &vmx->nested.msrs.entry_ctls_low;
1107 highp = &vmx->nested.msrs.entry_ctls_high;
1108 break;
1109 case MSR_IA32_VMX_PROCBASED_CTLS2:
1110 lowp = &vmx->nested.msrs.secondary_ctls_low;
1111 highp = &vmx->nested.msrs.secondary_ctls_high;
1112 break;
1113 default:
1114 BUG();
1115 }
1116
1117 supported = vmx_control_msr(*lowp, *highp);
1118
1119 /* Check must-be-1 bits are still 1. */
1120 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1121 return -EINVAL;
1122
1123 /* Check must-be-0 bits are still 0. */
1124 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1125 return -EINVAL;
1126
1127 *lowp = data;
1128 *highp = data >> 32;
1129 return 0;
1130 }
1131
vmx_restore_vmx_misc(struct vcpu_vmx * vmx,u64 data)1132 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1133 {
1134 const u64 feature_and_reserved_bits =
1135 /* feature */
1136 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1137 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1138 /* reserved */
1139 GENMASK_ULL(13, 9) | BIT_ULL(31);
1140 u64 vmx_misc;
1141
1142 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1143 vmx->nested.msrs.misc_high);
1144
1145 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1146 return -EINVAL;
1147
1148 if ((vmx->nested.msrs.pinbased_ctls_high &
1149 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1150 vmx_misc_preemption_timer_rate(data) !=
1151 vmx_misc_preemption_timer_rate(vmx_misc))
1152 return -EINVAL;
1153
1154 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1155 return -EINVAL;
1156
1157 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1158 return -EINVAL;
1159
1160 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1161 return -EINVAL;
1162
1163 vmx->nested.msrs.misc_low = data;
1164 vmx->nested.msrs.misc_high = data >> 32;
1165
1166 return 0;
1167 }
1168
vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx * vmx,u64 data)1169 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1170 {
1171 u64 vmx_ept_vpid_cap;
1172
1173 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1174 vmx->nested.msrs.vpid_caps);
1175
1176 /* Every bit is either reserved or a feature bit. */
1177 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1178 return -EINVAL;
1179
1180 vmx->nested.msrs.ept_caps = data;
1181 vmx->nested.msrs.vpid_caps = data >> 32;
1182 return 0;
1183 }
1184
vmx_restore_fixed0_msr(struct vcpu_vmx * vmx,u32 msr_index,u64 data)1185 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1186 {
1187 u64 *msr;
1188
1189 switch (msr_index) {
1190 case MSR_IA32_VMX_CR0_FIXED0:
1191 msr = &vmx->nested.msrs.cr0_fixed0;
1192 break;
1193 case MSR_IA32_VMX_CR4_FIXED0:
1194 msr = &vmx->nested.msrs.cr4_fixed0;
1195 break;
1196 default:
1197 BUG();
1198 }
1199
1200 /*
1201 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1202 * must be 1 in the restored value.
1203 */
1204 if (!is_bitwise_subset(data, *msr, -1ULL))
1205 return -EINVAL;
1206
1207 *msr = data;
1208 return 0;
1209 }
1210
1211 /*
1212 * Called when userspace is restoring VMX MSRs.
1213 *
1214 * Returns 0 on success, non-0 otherwise.
1215 */
vmx_set_vmx_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 data)1216 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1217 {
1218 struct vcpu_vmx *vmx = to_vmx(vcpu);
1219
1220 /*
1221 * Don't allow changes to the VMX capability MSRs while the vCPU
1222 * is in VMX operation.
1223 */
1224 if (vmx->nested.vmxon)
1225 return -EBUSY;
1226
1227 switch (msr_index) {
1228 case MSR_IA32_VMX_BASIC:
1229 return vmx_restore_vmx_basic(vmx, data);
1230 case MSR_IA32_VMX_PINBASED_CTLS:
1231 case MSR_IA32_VMX_PROCBASED_CTLS:
1232 case MSR_IA32_VMX_EXIT_CTLS:
1233 case MSR_IA32_VMX_ENTRY_CTLS:
1234 /*
1235 * The "non-true" VMX capability MSRs are generated from the
1236 * "true" MSRs, so we do not support restoring them directly.
1237 *
1238 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1239 * should restore the "true" MSRs with the must-be-1 bits
1240 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1241 * DEFAULT SETTINGS".
1242 */
1243 return -EINVAL;
1244 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1245 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1246 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1247 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1248 case MSR_IA32_VMX_PROCBASED_CTLS2:
1249 return vmx_restore_control_msr(vmx, msr_index, data);
1250 case MSR_IA32_VMX_MISC:
1251 return vmx_restore_vmx_misc(vmx, data);
1252 case MSR_IA32_VMX_CR0_FIXED0:
1253 case MSR_IA32_VMX_CR4_FIXED0:
1254 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1255 case MSR_IA32_VMX_CR0_FIXED1:
1256 case MSR_IA32_VMX_CR4_FIXED1:
1257 /*
1258 * These MSRs are generated based on the vCPU's CPUID, so we
1259 * do not support restoring them directly.
1260 */
1261 return -EINVAL;
1262 case MSR_IA32_VMX_EPT_VPID_CAP:
1263 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1264 case MSR_IA32_VMX_VMCS_ENUM:
1265 vmx->nested.msrs.vmcs_enum = data;
1266 return 0;
1267 case MSR_IA32_VMX_VMFUNC:
1268 if (data & ~vmx->nested.msrs.vmfunc_controls)
1269 return -EINVAL;
1270 vmx->nested.msrs.vmfunc_controls = data;
1271 return 0;
1272 default:
1273 /*
1274 * The rest of the VMX capability MSRs do not support restore.
1275 */
1276 return -EINVAL;
1277 }
1278 }
1279
1280 /* Returns 0 on success, non-0 otherwise. */
vmx_get_vmx_msr(struct nested_vmx_msrs * msrs,u32 msr_index,u64 * pdata)1281 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1282 {
1283 switch (msr_index) {
1284 case MSR_IA32_VMX_BASIC:
1285 *pdata = msrs->basic;
1286 break;
1287 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1288 case MSR_IA32_VMX_PINBASED_CTLS:
1289 *pdata = vmx_control_msr(
1290 msrs->pinbased_ctls_low,
1291 msrs->pinbased_ctls_high);
1292 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1293 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1294 break;
1295 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1296 case MSR_IA32_VMX_PROCBASED_CTLS:
1297 *pdata = vmx_control_msr(
1298 msrs->procbased_ctls_low,
1299 msrs->procbased_ctls_high);
1300 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1301 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1302 break;
1303 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1304 case MSR_IA32_VMX_EXIT_CTLS:
1305 *pdata = vmx_control_msr(
1306 msrs->exit_ctls_low,
1307 msrs->exit_ctls_high);
1308 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1309 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1310 break;
1311 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1312 case MSR_IA32_VMX_ENTRY_CTLS:
1313 *pdata = vmx_control_msr(
1314 msrs->entry_ctls_low,
1315 msrs->entry_ctls_high);
1316 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1317 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1318 break;
1319 case MSR_IA32_VMX_MISC:
1320 *pdata = vmx_control_msr(
1321 msrs->misc_low,
1322 msrs->misc_high);
1323 break;
1324 case MSR_IA32_VMX_CR0_FIXED0:
1325 *pdata = msrs->cr0_fixed0;
1326 break;
1327 case MSR_IA32_VMX_CR0_FIXED1:
1328 *pdata = msrs->cr0_fixed1;
1329 break;
1330 case MSR_IA32_VMX_CR4_FIXED0:
1331 *pdata = msrs->cr4_fixed0;
1332 break;
1333 case MSR_IA32_VMX_CR4_FIXED1:
1334 *pdata = msrs->cr4_fixed1;
1335 break;
1336 case MSR_IA32_VMX_VMCS_ENUM:
1337 *pdata = msrs->vmcs_enum;
1338 break;
1339 case MSR_IA32_VMX_PROCBASED_CTLS2:
1340 *pdata = vmx_control_msr(
1341 msrs->secondary_ctls_low,
1342 msrs->secondary_ctls_high);
1343 break;
1344 case MSR_IA32_VMX_EPT_VPID_CAP:
1345 *pdata = msrs->ept_caps |
1346 ((u64)msrs->vpid_caps << 32);
1347 break;
1348 case MSR_IA32_VMX_VMFUNC:
1349 *pdata = msrs->vmfunc_controls;
1350 break;
1351 default:
1352 return 1;
1353 }
1354
1355 return 0;
1356 }
1357
1358 /*
1359 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1360 * been modified by the L1 guest. Note, "writable" in this context means
1361 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1362 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1363 * VM-exit information fields (which are actually writable if the vCPU is
1364 * configured to support "VMWRITE to any supported field in the VMCS").
1365 */
copy_shadow_to_vmcs12(struct vcpu_vmx * vmx)1366 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1367 {
1368 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1369 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1370 struct shadow_vmcs_field field;
1371 unsigned long val;
1372 int i;
1373
1374 if (WARN_ON(!shadow_vmcs))
1375 return;
1376
1377 preempt_disable();
1378
1379 vmcs_load(shadow_vmcs);
1380
1381 for (i = 0; i < max_shadow_read_write_fields; i++) {
1382 field = shadow_read_write_fields[i];
1383 val = __vmcs_readl(field.encoding);
1384 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1385 }
1386
1387 vmcs_clear(shadow_vmcs);
1388 vmcs_load(vmx->loaded_vmcs->vmcs);
1389
1390 preempt_enable();
1391 }
1392
copy_vmcs12_to_shadow(struct vcpu_vmx * vmx)1393 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1394 {
1395 const struct shadow_vmcs_field *fields[] = {
1396 shadow_read_write_fields,
1397 shadow_read_only_fields
1398 };
1399 const int max_fields[] = {
1400 max_shadow_read_write_fields,
1401 max_shadow_read_only_fields
1402 };
1403 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1404 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1405 struct shadow_vmcs_field field;
1406 unsigned long val;
1407 int i, q;
1408
1409 if (WARN_ON(!shadow_vmcs))
1410 return;
1411
1412 vmcs_load(shadow_vmcs);
1413
1414 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1415 for (i = 0; i < max_fields[q]; i++) {
1416 field = fields[q][i];
1417 val = vmcs12_read_any(vmcs12, field.encoding,
1418 field.offset);
1419 __vmcs_writel(field.encoding, val);
1420 }
1421 }
1422
1423 vmcs_clear(shadow_vmcs);
1424 vmcs_load(vmx->loaded_vmcs->vmcs);
1425 }
1426
copy_enlightened_to_vmcs12(struct vcpu_vmx * vmx)1427 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1428 {
1429 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1430 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1431
1432 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1433 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1434 vmcs12->guest_rip = evmcs->guest_rip;
1435
1436 if (unlikely(!(evmcs->hv_clean_fields &
1437 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1438 vmcs12->guest_rsp = evmcs->guest_rsp;
1439 vmcs12->guest_rflags = evmcs->guest_rflags;
1440 vmcs12->guest_interruptibility_info =
1441 evmcs->guest_interruptibility_info;
1442 }
1443
1444 if (unlikely(!(evmcs->hv_clean_fields &
1445 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1446 vmcs12->cpu_based_vm_exec_control =
1447 evmcs->cpu_based_vm_exec_control;
1448 }
1449
1450 if (unlikely(!(evmcs->hv_clean_fields &
1451 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1452 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1453 }
1454
1455 if (unlikely(!(evmcs->hv_clean_fields &
1456 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1457 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1458 }
1459
1460 if (unlikely(!(evmcs->hv_clean_fields &
1461 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1462 vmcs12->vm_entry_intr_info_field =
1463 evmcs->vm_entry_intr_info_field;
1464 vmcs12->vm_entry_exception_error_code =
1465 evmcs->vm_entry_exception_error_code;
1466 vmcs12->vm_entry_instruction_len =
1467 evmcs->vm_entry_instruction_len;
1468 }
1469
1470 if (unlikely(!(evmcs->hv_clean_fields &
1471 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1472 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1473 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1474 vmcs12->host_cr0 = evmcs->host_cr0;
1475 vmcs12->host_cr3 = evmcs->host_cr3;
1476 vmcs12->host_cr4 = evmcs->host_cr4;
1477 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1478 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1479 vmcs12->host_rip = evmcs->host_rip;
1480 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1481 vmcs12->host_es_selector = evmcs->host_es_selector;
1482 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1483 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1484 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1485 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1486 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1487 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1488 }
1489
1490 if (unlikely(!(evmcs->hv_clean_fields &
1491 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1492 vmcs12->pin_based_vm_exec_control =
1493 evmcs->pin_based_vm_exec_control;
1494 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1495 vmcs12->secondary_vm_exec_control =
1496 evmcs->secondary_vm_exec_control;
1497 }
1498
1499 if (unlikely(!(evmcs->hv_clean_fields &
1500 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1501 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1502 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1503 }
1504
1505 if (unlikely(!(evmcs->hv_clean_fields &
1506 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1507 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1508 }
1509
1510 if (unlikely(!(evmcs->hv_clean_fields &
1511 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1512 vmcs12->guest_es_base = evmcs->guest_es_base;
1513 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1514 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1515 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1516 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1517 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1518 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1519 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1520 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1521 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1522 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1523 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1524 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1525 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1526 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1527 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1528 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1529 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1530 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1531 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1532 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1533 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1534 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1535 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1536 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1537 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1538 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1539 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1540 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1541 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1542 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1543 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1544 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1545 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1546 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1547 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1548 }
1549
1550 if (unlikely(!(evmcs->hv_clean_fields &
1551 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1552 vmcs12->tsc_offset = evmcs->tsc_offset;
1553 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1554 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1555 }
1556
1557 if (unlikely(!(evmcs->hv_clean_fields &
1558 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1559 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1560 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1561 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1562 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1563 vmcs12->guest_cr0 = evmcs->guest_cr0;
1564 vmcs12->guest_cr3 = evmcs->guest_cr3;
1565 vmcs12->guest_cr4 = evmcs->guest_cr4;
1566 vmcs12->guest_dr7 = evmcs->guest_dr7;
1567 }
1568
1569 if (unlikely(!(evmcs->hv_clean_fields &
1570 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1571 vmcs12->host_fs_base = evmcs->host_fs_base;
1572 vmcs12->host_gs_base = evmcs->host_gs_base;
1573 vmcs12->host_tr_base = evmcs->host_tr_base;
1574 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1575 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1576 vmcs12->host_rsp = evmcs->host_rsp;
1577 }
1578
1579 if (unlikely(!(evmcs->hv_clean_fields &
1580 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1581 vmcs12->ept_pointer = evmcs->ept_pointer;
1582 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1583 }
1584
1585 if (unlikely(!(evmcs->hv_clean_fields &
1586 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1587 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1588 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1589 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1590 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1591 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1592 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1593 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1594 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1595 vmcs12->guest_pending_dbg_exceptions =
1596 evmcs->guest_pending_dbg_exceptions;
1597 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1598 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1599 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1600 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1601 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1602 }
1603
1604 /*
1605 * Not used?
1606 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1607 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1608 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1609 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1610 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1611 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1612 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1613 * vmcs12->page_fault_error_code_mask =
1614 * evmcs->page_fault_error_code_mask;
1615 * vmcs12->page_fault_error_code_match =
1616 * evmcs->page_fault_error_code_match;
1617 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1618 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1619 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1620 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1621 */
1622
1623 /*
1624 * Read only fields:
1625 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1626 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1627 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1628 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1629 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1630 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1631 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1632 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1633 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1634 * vmcs12->exit_qualification = evmcs->exit_qualification;
1635 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1636 *
1637 * Not present in struct vmcs12:
1638 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1639 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1640 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1641 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1642 */
1643
1644 return 0;
1645 }
1646
copy_vmcs12_to_enlightened(struct vcpu_vmx * vmx)1647 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1648 {
1649 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1650 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1651
1652 /*
1653 * Should not be changed by KVM:
1654 *
1655 * evmcs->host_es_selector = vmcs12->host_es_selector;
1656 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1657 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1658 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1659 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1660 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1661 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1662 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1663 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1664 * evmcs->host_cr0 = vmcs12->host_cr0;
1665 * evmcs->host_cr3 = vmcs12->host_cr3;
1666 * evmcs->host_cr4 = vmcs12->host_cr4;
1667 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1668 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1669 * evmcs->host_rip = vmcs12->host_rip;
1670 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1671 * evmcs->host_fs_base = vmcs12->host_fs_base;
1672 * evmcs->host_gs_base = vmcs12->host_gs_base;
1673 * evmcs->host_tr_base = vmcs12->host_tr_base;
1674 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1675 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1676 * evmcs->host_rsp = vmcs12->host_rsp;
1677 * sync_vmcs02_to_vmcs12() doesn't read these:
1678 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1679 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1680 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1681 * evmcs->ept_pointer = vmcs12->ept_pointer;
1682 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1683 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1684 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1685 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1686 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1687 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1688 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1689 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1690 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1691 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1692 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1693 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1694 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1695 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1696 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1697 * evmcs->page_fault_error_code_mask =
1698 * vmcs12->page_fault_error_code_mask;
1699 * evmcs->page_fault_error_code_match =
1700 * vmcs12->page_fault_error_code_match;
1701 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1702 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1703 * evmcs->tsc_offset = vmcs12->tsc_offset;
1704 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1705 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1706 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1707 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1708 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1709 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1710 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1711 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1712 *
1713 * Not present in struct vmcs12:
1714 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1715 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1716 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1717 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1718 */
1719
1720 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1721 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1722 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1723 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1724 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1725 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1726 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1727 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1728
1729 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1730 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1731 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1732 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1733 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1734 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1735 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1736 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1737 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1738 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1739
1740 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1741 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1742 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1743 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1744 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1745 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1746 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1747 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1748
1749 evmcs->guest_es_base = vmcs12->guest_es_base;
1750 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1751 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1752 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1753 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1754 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1755 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1756 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1757 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1758 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1759
1760 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1761 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1762
1763 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1764 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1765 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1766 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1767
1768 evmcs->guest_pending_dbg_exceptions =
1769 vmcs12->guest_pending_dbg_exceptions;
1770 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1771 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1772
1773 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1774 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1775
1776 evmcs->guest_cr0 = vmcs12->guest_cr0;
1777 evmcs->guest_cr3 = vmcs12->guest_cr3;
1778 evmcs->guest_cr4 = vmcs12->guest_cr4;
1779 evmcs->guest_dr7 = vmcs12->guest_dr7;
1780
1781 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1782
1783 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1784 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1785 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1786 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1787 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1788 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1789 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1790 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1791
1792 evmcs->exit_qualification = vmcs12->exit_qualification;
1793
1794 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1795 evmcs->guest_rsp = vmcs12->guest_rsp;
1796 evmcs->guest_rflags = vmcs12->guest_rflags;
1797
1798 evmcs->guest_interruptibility_info =
1799 vmcs12->guest_interruptibility_info;
1800 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1801 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1802 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1803 evmcs->vm_entry_exception_error_code =
1804 vmcs12->vm_entry_exception_error_code;
1805 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1806
1807 evmcs->guest_rip = vmcs12->guest_rip;
1808
1809 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1810
1811 return 0;
1812 }
1813
1814 /*
1815 * This is an equivalent of the nested hypervisor executing the vmptrld
1816 * instruction.
1817 */
nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu * vcpu,bool from_launch)1818 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1819 bool from_launch)
1820 {
1821 struct vcpu_vmx *vmx = to_vmx(vcpu);
1822 bool evmcs_gpa_changed = false;
1823 u64 evmcs_gpa;
1824
1825 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1826 return 1;
1827
1828 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
1829 return 1;
1830
1831 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
1832 if (!vmx->nested.hv_evmcs)
1833 vmx->nested.current_vmptr = -1ull;
1834
1835 nested_release_evmcs(vcpu);
1836
1837 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
1838 &vmx->nested.hv_evmcs_map))
1839 return 0;
1840
1841 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
1842
1843 /*
1844 * Currently, KVM only supports eVMCS version 1
1845 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1846 * value to first u32 field of eVMCS which should specify eVMCS
1847 * VersionNumber.
1848 *
1849 * Guest should be aware of supported eVMCS versions by host by
1850 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1851 * expected to set this CPUID leaf according to the value
1852 * returned in vmcs_version from nested_enable_evmcs().
1853 *
1854 * However, it turns out that Microsoft Hyper-V fails to comply
1855 * to their own invented interface: When Hyper-V use eVMCS, it
1856 * just sets first u32 field of eVMCS to revision_id specified
1857 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1858 * which is one of the supported versions specified in
1859 * CPUID.0x4000000A.EAX[0:15].
1860 *
1861 * To overcome Hyper-V bug, we accept here either a supported
1862 * eVMCS version or VMCS12 revision_id as valid values for first
1863 * u32 field of eVMCS.
1864 */
1865 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1866 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1867 nested_release_evmcs(vcpu);
1868 return 0;
1869 }
1870
1871 vmx->nested.dirty_vmcs12 = true;
1872 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
1873
1874 evmcs_gpa_changed = true;
1875 /*
1876 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1877 * reloaded from guest's memory (read only fields, fields not
1878 * present in struct hv_enlightened_vmcs, ...). Make sure there
1879 * are no leftovers.
1880 */
1881 if (from_launch) {
1882 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1883 memset(vmcs12, 0, sizeof(*vmcs12));
1884 vmcs12->hdr.revision_id = VMCS12_REVISION;
1885 }
1886
1887 }
1888
1889 /*
1890 * Clean fields data can't de used on VMLAUNCH and when we switch
1891 * between different L2 guests as KVM keeps a single VMCS12 per L1.
1892 */
1893 if (from_launch || evmcs_gpa_changed)
1894 vmx->nested.hv_evmcs->hv_clean_fields &=
1895 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1896
1897 return 1;
1898 }
1899
nested_sync_vmcs12_to_shadow(struct kvm_vcpu * vcpu)1900 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
1901 {
1902 struct vcpu_vmx *vmx = to_vmx(vcpu);
1903
1904 /*
1905 * hv_evmcs may end up being not mapped after migration (when
1906 * L2 was running), map it here to make sure vmcs12 changes are
1907 * properly reflected.
1908 */
1909 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1910 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1911
1912 if (vmx->nested.hv_evmcs) {
1913 copy_vmcs12_to_enlightened(vmx);
1914 /* All fields are clean */
1915 vmx->nested.hv_evmcs->hv_clean_fields |=
1916 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1917 } else {
1918 copy_vmcs12_to_shadow(vmx);
1919 }
1920
1921 vmx->nested.need_vmcs12_to_shadow_sync = false;
1922 }
1923
vmx_preemption_timer_fn(struct hrtimer * timer)1924 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1925 {
1926 struct vcpu_vmx *vmx =
1927 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1928
1929 vmx->nested.preemption_timer_expired = true;
1930 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1931 kvm_vcpu_kick(&vmx->vcpu);
1932
1933 return HRTIMER_NORESTART;
1934 }
1935
vmx_start_preemption_timer(struct kvm_vcpu * vcpu)1936 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1937 {
1938 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1939 struct vcpu_vmx *vmx = to_vmx(vcpu);
1940
1941 /*
1942 * A timer value of zero is architecturally guaranteed to cause
1943 * a VMExit prior to executing any instructions in the guest.
1944 */
1945 if (preemption_timeout == 0) {
1946 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1947 return;
1948 }
1949
1950 if (vcpu->arch.virtual_tsc_khz == 0)
1951 return;
1952
1953 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1954 preemption_timeout *= 1000000;
1955 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1956 hrtimer_start(&vmx->nested.preemption_timer,
1957 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1958 }
1959
nested_vmx_calc_efer(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)1960 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1961 {
1962 if (vmx->nested.nested_run_pending &&
1963 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1964 return vmcs12->guest_ia32_efer;
1965 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1966 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1967 else
1968 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1969 }
1970
prepare_vmcs02_constant_state(struct vcpu_vmx * vmx)1971 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1972 {
1973 /*
1974 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1975 * according to L0's settings (vmcs12 is irrelevant here). Host
1976 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1977 * will be set as needed prior to VMLAUNCH/VMRESUME.
1978 */
1979 if (vmx->nested.vmcs02_initialized)
1980 return;
1981 vmx->nested.vmcs02_initialized = true;
1982
1983 /*
1984 * We don't care what the EPTP value is we just need to guarantee
1985 * it's valid so we don't get a false positive when doing early
1986 * consistency checks.
1987 */
1988 if (enable_ept && nested_early_check)
1989 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1990
1991 /* All VMFUNCs are currently emulated through L0 vmexits. */
1992 if (cpu_has_vmx_vmfunc())
1993 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1994
1995 if (cpu_has_vmx_posted_intr())
1996 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1997
1998 if (cpu_has_vmx_msr_bitmap())
1999 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2000
2001 /*
2002 * The PML address never changes, so it is constant in vmcs02.
2003 * Conceptually we want to copy the PML index from vmcs01 here,
2004 * and then back to vmcs01 on nested vmexit. But since we flush
2005 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
2006 * index is also effectively constant in vmcs02.
2007 */
2008 if (enable_pml) {
2009 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
2010 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
2011 }
2012
2013 if (cpu_has_vmx_encls_vmexit())
2014 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2015
2016 /*
2017 * Set the MSR load/store lists to match L0's settings. Only the
2018 * addresses are constant (for vmcs02), the counts can change based
2019 * on L2's behavior, e.g. switching to/from long mode.
2020 */
2021 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2022 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2023 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2024
2025 vmx_set_constant_host_state(vmx);
2026 }
2027
prepare_vmcs02_early_rare(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2028 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2029 struct vmcs12 *vmcs12)
2030 {
2031 prepare_vmcs02_constant_state(vmx);
2032
2033 vmcs_write64(VMCS_LINK_POINTER, -1ull);
2034
2035 if (enable_vpid) {
2036 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2037 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2038 else
2039 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2040 }
2041 }
2042
prepare_vmcs02_early(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2043 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2044 {
2045 u32 exec_control, vmcs12_exec_ctrl;
2046 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2047
2048 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
2049 prepare_vmcs02_early_rare(vmx, vmcs12);
2050
2051 /*
2052 * PIN CONTROLS
2053 */
2054 exec_control = vmx_pin_based_exec_ctrl(vmx);
2055 exec_control |= (vmcs12->pin_based_vm_exec_control &
2056 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2057
2058 /* Posted interrupts setting is only taken from vmcs12. */
2059 if (nested_cpu_has_posted_intr(vmcs12)) {
2060 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2061 vmx->nested.pi_pending = false;
2062 } else {
2063 exec_control &= ~PIN_BASED_POSTED_INTR;
2064 }
2065 pin_controls_set(vmx, exec_control);
2066
2067 /*
2068 * EXEC CONTROLS
2069 */
2070 exec_control = vmx_exec_control(vmx); /* L0's desires */
2071 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2072 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2073 exec_control &= ~CPU_BASED_TPR_SHADOW;
2074 exec_control |= vmcs12->cpu_based_vm_exec_control;
2075
2076 if (exec_control & CPU_BASED_TPR_SHADOW)
2077 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2078 #ifdef CONFIG_X86_64
2079 else
2080 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2081 CPU_BASED_CR8_STORE_EXITING;
2082 #endif
2083
2084 /*
2085 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2086 * for I/O port accesses.
2087 */
2088 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2089 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2090
2091 /*
2092 * This bit will be computed in nested_get_vmcs12_pages, because
2093 * we do not have access to L1's MSR bitmap yet. For now, keep
2094 * the same bit as before, hoping to avoid multiple VMWRITEs that
2095 * only set/clear this bit.
2096 */
2097 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2098 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2099
2100 exec_controls_set(vmx, exec_control);
2101
2102 /*
2103 * SECONDARY EXEC CONTROLS
2104 */
2105 if (cpu_has_secondary_exec_ctrls()) {
2106 exec_control = vmx->secondary_exec_control;
2107
2108 /* Take the following fields only from vmcs12 */
2109 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2110 SECONDARY_EXEC_ENABLE_INVPCID |
2111 SECONDARY_EXEC_RDTSCP |
2112 SECONDARY_EXEC_XSAVES |
2113 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2114 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2115 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2116 SECONDARY_EXEC_ENABLE_VMFUNC);
2117 if (nested_cpu_has(vmcs12,
2118 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2119 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2120 ~SECONDARY_EXEC_ENABLE_PML;
2121 exec_control |= vmcs12_exec_ctrl;
2122 }
2123
2124 /* VMCS shadowing for L2 is emulated for now */
2125 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2126
2127 /*
2128 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2129 * will not have to rewrite the controls just for this bit.
2130 */
2131 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2132 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2133 exec_control |= SECONDARY_EXEC_DESC;
2134
2135 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2136 vmcs_write16(GUEST_INTR_STATUS,
2137 vmcs12->guest_intr_status);
2138
2139 secondary_exec_controls_set(vmx, exec_control);
2140 }
2141
2142 /*
2143 * ENTRY CONTROLS
2144 *
2145 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2146 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2147 * on the related bits (if supported by the CPU) in the hope that
2148 * we can avoid VMWrites during vmx_set_efer().
2149 */
2150 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2151 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2152 if (cpu_has_load_ia32_efer()) {
2153 if (guest_efer & EFER_LMA)
2154 exec_control |= VM_ENTRY_IA32E_MODE;
2155 if (guest_efer != host_efer)
2156 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2157 }
2158 vm_entry_controls_set(vmx, exec_control);
2159
2160 /*
2161 * EXIT CONTROLS
2162 *
2163 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2164 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2165 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2166 */
2167 exec_control = vmx_vmexit_ctrl();
2168 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2169 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2170 vm_exit_controls_set(vmx, exec_control);
2171
2172 /*
2173 * Interrupt/Exception Fields
2174 */
2175 if (vmx->nested.nested_run_pending) {
2176 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2177 vmcs12->vm_entry_intr_info_field);
2178 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2179 vmcs12->vm_entry_exception_error_code);
2180 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2181 vmcs12->vm_entry_instruction_len);
2182 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2183 vmcs12->guest_interruptibility_info);
2184 vmx->loaded_vmcs->nmi_known_unmasked =
2185 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2186 } else {
2187 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2188 }
2189 }
2190
prepare_vmcs02_rare(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2191 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2192 {
2193 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2194
2195 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2196 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2197 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2198 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2199 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2200 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2201 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2202 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2203 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2204 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2205 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2206 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2207 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2208 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2209 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2210 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2211 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2212 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2213 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2214 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2215 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2216 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2217 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2218 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2219 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2220 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2221 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2222 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2223 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2224 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2225 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2226 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2227 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2228 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2229 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2230 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2231 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2232 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2233 }
2234
2235 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2236 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2237 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2238 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2239 vmcs12->guest_pending_dbg_exceptions);
2240 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2241 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2242
2243 /*
2244 * L1 may access the L2's PDPTR, so save them to construct
2245 * vmcs12
2246 */
2247 if (enable_ept) {
2248 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2249 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2250 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2251 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2252 }
2253
2254 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2255 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2256 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2257 }
2258
2259 if (nested_cpu_has_xsaves(vmcs12))
2260 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2261
2262 /*
2263 * Whether page-faults are trapped is determined by a combination of
2264 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2265 * If enable_ept, L0 doesn't care about page faults and we should
2266 * set all of these to L1's desires. However, if !enable_ept, L0 does
2267 * care about (at least some) page faults, and because it is not easy
2268 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2269 * to exit on each and every L2 page fault. This is done by setting
2270 * MASK=MATCH=0 and (see below) EB.PF=1.
2271 * Note that below we don't need special code to set EB.PF beyond the
2272 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2273 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2274 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2275 */
2276 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2277 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2278 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2279 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2280
2281 if (cpu_has_vmx_apicv()) {
2282 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2283 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2284 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2285 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2286 }
2287
2288 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2289 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2290
2291 set_cr4_guest_host_mask(vmx);
2292 }
2293
2294 /*
2295 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2296 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2297 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2298 * guest in a way that will both be appropriate to L1's requests, and our
2299 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2300 * function also has additional necessary side-effects, like setting various
2301 * vcpu->arch fields.
2302 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2303 * is assigned to entry_failure_code on failure.
2304 */
prepare_vmcs02(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 * entry_failure_code)2305 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2306 u32 *entry_failure_code)
2307 {
2308 struct vcpu_vmx *vmx = to_vmx(vcpu);
2309 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2310 bool load_guest_pdptrs_vmcs12 = false;
2311
2312 if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
2313 prepare_vmcs02_rare(vmx, vmcs12);
2314 vmx->nested.dirty_vmcs12 = false;
2315
2316 load_guest_pdptrs_vmcs12 = !hv_evmcs ||
2317 !(hv_evmcs->hv_clean_fields &
2318 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2319 }
2320
2321 if (vmx->nested.nested_run_pending &&
2322 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2323 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2324 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2325 } else {
2326 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2327 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2328 }
2329 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2330 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2331 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2332 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2333
2334 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2335 * bitwise-or of what L1 wants to trap for L2, and what we want to
2336 * trap. Note that CR0.TS also needs updating - we do this later.
2337 */
2338 update_exception_bitmap(vcpu);
2339 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2340 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2341
2342 if (vmx->nested.nested_run_pending &&
2343 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2344 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2345 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2346 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2347 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2348 }
2349
2350 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2351
2352 if (kvm_has_tsc_control)
2353 decache_tsc_multiplier(vmx);
2354
2355 if (enable_vpid) {
2356 /*
2357 * There is no direct mapping between vpid02 and vpid12, the
2358 * vpid02 is per-vCPU for L0 and reused while the value of
2359 * vpid12 is changed w/ one invvpid during nested vmentry.
2360 * The vpid12 is allocated by L1 for L2, so it will not
2361 * influence global bitmap(for vpid01 and vpid02 allocation)
2362 * even if spawn a lot of nested vCPUs.
2363 */
2364 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2365 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2366 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2367 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2368 }
2369 } else {
2370 /*
2371 * If L1 use EPT, then L0 needs to execute INVEPT on
2372 * EPTP02 instead of EPTP01. Therefore, delay TLB
2373 * flush until vmcs02->eptp is fully updated by
2374 * KVM_REQ_LOAD_CR3. Note that this assumes
2375 * KVM_REQ_TLB_FLUSH is evaluated after
2376 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2377 */
2378 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2379 }
2380 }
2381
2382 if (nested_cpu_has_ept(vmcs12))
2383 nested_ept_init_mmu_context(vcpu);
2384 else if (nested_cpu_has2(vmcs12,
2385 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2386 vmx_flush_tlb(vcpu, true);
2387
2388 /*
2389 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2390 * bits which we consider mandatory enabled.
2391 * The CR0_READ_SHADOW is what L2 should have expected to read given
2392 * the specifications by L1; It's not enough to take
2393 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2394 * have more bits than L1 expected.
2395 */
2396 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2397 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2398
2399 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2400 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2401
2402 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2403 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2404 vmx_set_efer(vcpu, vcpu->arch.efer);
2405
2406 /*
2407 * Guest state is invalid and unrestricted guest is disabled,
2408 * which means L1 attempted VMEntry to L2 with invalid state.
2409 * Fail the VMEntry.
2410 */
2411 if (vmx->emulation_required) {
2412 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2413 return -EINVAL;
2414 }
2415
2416 /* Shadow page tables on either EPT or shadow page tables. */
2417 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2418 entry_failure_code))
2419 return -EINVAL;
2420
2421 /*
2422 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2423 * on nested VM-Exit, which can occur without actually running L2 and
2424 * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
2425 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2426 * transition to HLT instead of running L2.
2427 */
2428 if (enable_ept)
2429 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2430
2431 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2432 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2433 is_pae_paging(vcpu)) {
2434 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2435 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2436 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2437 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2438 }
2439
2440 if (!enable_ept)
2441 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2442
2443 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2444 kvm_rip_write(vcpu, vmcs12->guest_rip);
2445 return 0;
2446 }
2447
nested_vmx_check_nmi_controls(struct vmcs12 * vmcs12)2448 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2449 {
2450 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2451 nested_cpu_has_virtual_nmis(vmcs12)))
2452 return -EINVAL;
2453
2454 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2455 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)))
2456 return -EINVAL;
2457
2458 return 0;
2459 }
2460
valid_ept_address(struct kvm_vcpu * vcpu,u64 address)2461 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2462 {
2463 struct vcpu_vmx *vmx = to_vmx(vcpu);
2464 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2465
2466 /* Check for memory type validity */
2467 switch (address & VMX_EPTP_MT_MASK) {
2468 case VMX_EPTP_MT_UC:
2469 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2470 return false;
2471 break;
2472 case VMX_EPTP_MT_WB:
2473 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2474 return false;
2475 break;
2476 default:
2477 return false;
2478 }
2479
2480 /* only 4 levels page-walk length are valid */
2481 if (CC((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4))
2482 return false;
2483
2484 /* Reserved bits should not be set */
2485 if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f)))
2486 return false;
2487
2488 /* AD, if set, should be supported */
2489 if (address & VMX_EPTP_AD_ENABLE_BIT) {
2490 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2491 return false;
2492 }
2493
2494 return true;
2495 }
2496
2497 /*
2498 * Checks related to VM-Execution Control Fields
2499 */
nested_check_vm_execution_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2500 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2501 struct vmcs12 *vmcs12)
2502 {
2503 struct vcpu_vmx *vmx = to_vmx(vcpu);
2504
2505 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2506 vmx->nested.msrs.pinbased_ctls_low,
2507 vmx->nested.msrs.pinbased_ctls_high)) ||
2508 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2509 vmx->nested.msrs.procbased_ctls_low,
2510 vmx->nested.msrs.procbased_ctls_high)))
2511 return -EINVAL;
2512
2513 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2514 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2515 vmx->nested.msrs.secondary_ctls_low,
2516 vmx->nested.msrs.secondary_ctls_high)))
2517 return -EINVAL;
2518
2519 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2520 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2521 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2522 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2523 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2524 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2525 nested_vmx_check_nmi_controls(vmcs12) ||
2526 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2527 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2528 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2529 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2530 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2531 return -EINVAL;
2532
2533 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2534 nested_cpu_has_save_preemption_timer(vmcs12))
2535 return -EINVAL;
2536
2537 if (nested_cpu_has_ept(vmcs12) &&
2538 CC(!valid_ept_address(vcpu, vmcs12->ept_pointer)))
2539 return -EINVAL;
2540
2541 if (nested_cpu_has_vmfunc(vmcs12)) {
2542 if (CC(vmcs12->vm_function_control &
2543 ~vmx->nested.msrs.vmfunc_controls))
2544 return -EINVAL;
2545
2546 if (nested_cpu_has_eptp_switching(vmcs12)) {
2547 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2548 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2549 return -EINVAL;
2550 }
2551 }
2552
2553 return 0;
2554 }
2555
2556 /*
2557 * Checks related to VM-Exit Control Fields
2558 */
nested_check_vm_exit_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2559 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2560 struct vmcs12 *vmcs12)
2561 {
2562 struct vcpu_vmx *vmx = to_vmx(vcpu);
2563
2564 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2565 vmx->nested.msrs.exit_ctls_low,
2566 vmx->nested.msrs.exit_ctls_high)) ||
2567 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2568 return -EINVAL;
2569
2570 return 0;
2571 }
2572
2573 /*
2574 * Checks related to VM-Entry Control Fields
2575 */
nested_check_vm_entry_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2576 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2577 struct vmcs12 *vmcs12)
2578 {
2579 struct vcpu_vmx *vmx = to_vmx(vcpu);
2580
2581 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2582 vmx->nested.msrs.entry_ctls_low,
2583 vmx->nested.msrs.entry_ctls_high)))
2584 return -EINVAL;
2585
2586 /*
2587 * From the Intel SDM, volume 3:
2588 * Fields relevant to VM-entry event injection must be set properly.
2589 * These fields are the VM-entry interruption-information field, the
2590 * VM-entry exception error code, and the VM-entry instruction length.
2591 */
2592 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2593 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2594 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2595 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2596 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2597 bool should_have_error_code;
2598 bool urg = nested_cpu_has2(vmcs12,
2599 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2600 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2601
2602 /* VM-entry interruption-info field: interruption type */
2603 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2604 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2605 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2606 return -EINVAL;
2607
2608 /* VM-entry interruption-info field: vector */
2609 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2610 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2611 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2612 return -EINVAL;
2613
2614 /* VM-entry interruption-info field: deliver error code */
2615 should_have_error_code =
2616 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2617 x86_exception_has_error_code(vector);
2618 if (CC(has_error_code != should_have_error_code))
2619 return -EINVAL;
2620
2621 /* VM-entry exception error code */
2622 if (CC(has_error_code &&
2623 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2624 return -EINVAL;
2625
2626 /* VM-entry interruption-info field: reserved bits */
2627 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2628 return -EINVAL;
2629
2630 /* VM-entry instruction length */
2631 switch (intr_type) {
2632 case INTR_TYPE_SOFT_EXCEPTION:
2633 case INTR_TYPE_SOFT_INTR:
2634 case INTR_TYPE_PRIV_SW_EXCEPTION:
2635 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2636 CC(vmcs12->vm_entry_instruction_len == 0 &&
2637 CC(!nested_cpu_has_zero_length_injection(vcpu))))
2638 return -EINVAL;
2639 }
2640 }
2641
2642 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2643 return -EINVAL;
2644
2645 return 0;
2646 }
2647
nested_vmx_check_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2648 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2649 struct vmcs12 *vmcs12)
2650 {
2651 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2652 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2653 nested_check_vm_entry_controls(vcpu, vmcs12))
2654 return -EINVAL;
2655
2656 return 0;
2657 }
2658
nested_vmx_check_host_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2659 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2660 struct vmcs12 *vmcs12)
2661 {
2662 bool ia32e;
2663
2664 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2665 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2666 CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
2667 return -EINVAL;
2668
2669 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2670 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2671 return -EINVAL;
2672
2673 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2674 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2675 return -EINVAL;
2676
2677 #ifdef CONFIG_X86_64
2678 ia32e = !!(vcpu->arch.efer & EFER_LMA);
2679 #else
2680 ia32e = false;
2681 #endif
2682
2683 if (ia32e) {
2684 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
2685 CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2686 return -EINVAL;
2687 } else {
2688 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
2689 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2690 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2691 CC((vmcs12->host_rip) >> 32))
2692 return -EINVAL;
2693 }
2694
2695 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2696 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2697 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2698 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2699 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2700 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2701 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2702 CC(vmcs12->host_cs_selector == 0) ||
2703 CC(vmcs12->host_tr_selector == 0) ||
2704 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2705 return -EINVAL;
2706
2707 #ifdef CONFIG_X86_64
2708 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2709 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2710 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2711 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2712 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2713 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2714 return -EINVAL;
2715 #endif
2716
2717 /*
2718 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2719 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2720 * the values of the LMA and LME bits in the field must each be that of
2721 * the host address-space size VM-exit control.
2722 */
2723 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2724 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2725 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2726 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2727 return -EINVAL;
2728 }
2729
2730 return 0;
2731 }
2732
nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2733 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2734 struct vmcs12 *vmcs12)
2735 {
2736 int r = 0;
2737 struct vmcs12 *shadow;
2738 struct kvm_host_map map;
2739
2740 if (vmcs12->vmcs_link_pointer == -1ull)
2741 return 0;
2742
2743 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2744 return -EINVAL;
2745
2746 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
2747 return -EINVAL;
2748
2749 shadow = map.hva;
2750
2751 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
2752 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2753 r = -EINVAL;
2754
2755 kvm_vcpu_unmap(vcpu, &map, false);
2756 return r;
2757 }
2758
2759 /*
2760 * Checks related to Guest Non-register State
2761 */
nested_check_guest_non_reg_state(struct vmcs12 * vmcs12)2762 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2763 {
2764 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2765 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT))
2766 return -EINVAL;
2767
2768 return 0;
2769 }
2770
nested_vmx_check_guest_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 * exit_qual)2771 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2772 struct vmcs12 *vmcs12,
2773 u32 *exit_qual)
2774 {
2775 bool ia32e;
2776
2777 *exit_qual = ENTRY_FAIL_DEFAULT;
2778
2779 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
2780 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
2781 return -EINVAL;
2782
2783 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
2784 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
2785 return -EINVAL;
2786
2787 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2788 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2789 return -EINVAL;
2790 }
2791
2792 /*
2793 * If the load IA32_EFER VM-entry control is 1, the following checks
2794 * are performed on the field for the IA32_EFER MSR:
2795 * - Bits reserved in the IA32_EFER MSR must be 0.
2796 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2797 * the IA-32e mode guest VM-exit control. It must also be identical
2798 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2799 * CR0.PG) is 1.
2800 */
2801 if (to_vmx(vcpu)->nested.nested_run_pending &&
2802 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2803 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2804 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
2805 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
2806 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
2807 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
2808 return -EINVAL;
2809 }
2810
2811 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2812 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
2813 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
2814 return -EINVAL;
2815
2816 if (nested_check_guest_non_reg_state(vmcs12))
2817 return -EINVAL;
2818
2819 return 0;
2820 }
2821
nested_vmx_check_vmentry_hw(struct kvm_vcpu * vcpu)2822 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2823 {
2824 struct vcpu_vmx *vmx = to_vmx(vcpu);
2825 unsigned long cr3, cr4;
2826 bool vm_fail;
2827
2828 if (!nested_early_check)
2829 return 0;
2830
2831 if (vmx->msr_autoload.host.nr)
2832 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2833 if (vmx->msr_autoload.guest.nr)
2834 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2835
2836 preempt_disable();
2837
2838 vmx_prepare_switch_to_guest(vcpu);
2839
2840 /*
2841 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2842 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2843 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2844 * there is no need to preserve other bits or save/restore the field.
2845 */
2846 vmcs_writel(GUEST_RFLAGS, 0);
2847
2848 cr3 = __get_current_cr3_fast();
2849 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2850 vmcs_writel(HOST_CR3, cr3);
2851 vmx->loaded_vmcs->host_state.cr3 = cr3;
2852 }
2853
2854 cr4 = cr4_read_shadow();
2855 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2856 vmcs_writel(HOST_CR4, cr4);
2857 vmx->loaded_vmcs->host_state.cr4 = cr4;
2858 }
2859
2860 asm(
2861 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
2862 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2863 "je 1f \n\t"
2864 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
2865 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2866 "1: \n\t"
2867 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
2868
2869 /* Check if vmlaunch or vmresume is needed */
2870 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
2871
2872 /*
2873 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2874 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2875 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
2876 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
2877 */
2878 "call vmx_vmenter\n\t"
2879
2880 CC_SET(be)
2881 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
2882 : [HOST_RSP]"r"((unsigned long)HOST_RSP),
2883 [loaded_vmcs]"r"(vmx->loaded_vmcs),
2884 [launched]"i"(offsetof(struct loaded_vmcs, launched)),
2885 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
2886 [wordsize]"i"(sizeof(ulong))
2887 : "memory"
2888 );
2889
2890 if (vmx->msr_autoload.host.nr)
2891 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2892 if (vmx->msr_autoload.guest.nr)
2893 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2894
2895 if (vm_fail) {
2896 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
2897
2898 preempt_enable();
2899
2900 trace_kvm_nested_vmenter_failed(
2901 "early hardware check VM-instruction error: ", error);
2902 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2903 return 1;
2904 }
2905
2906 /*
2907 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2908 */
2909 local_irq_enable();
2910 if (hw_breakpoint_active())
2911 set_debugreg(__this_cpu_read(cpu_dr7), 7);
2912 preempt_enable();
2913
2914 /*
2915 * A non-failing VMEntry means we somehow entered guest mode with
2916 * an illegal RIP, and that's just the tip of the iceberg. There
2917 * is no telling what memory has been modified or what state has
2918 * been exposed to unknown code. Hitting this all but guarantees
2919 * a (very critical) hardware issue.
2920 */
2921 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2922 VMX_EXIT_REASONS_FAILED_VMENTRY));
2923
2924 return 0;
2925 }
2926
2927 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2928 struct vmcs12 *vmcs12);
2929
nested_get_vmcs12_pages(struct kvm_vcpu * vcpu)2930 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2931 {
2932 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2933 struct vcpu_vmx *vmx = to_vmx(vcpu);
2934 struct kvm_host_map *map;
2935 struct page *page;
2936 u64 hpa;
2937
2938 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2939 /*
2940 * Translate L1 physical address to host physical
2941 * address for vmcs02. Keep the page pinned, so this
2942 * physical address remains valid. We keep a reference
2943 * to it so we can release it later.
2944 */
2945 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2946 kvm_release_page_dirty(vmx->nested.apic_access_page);
2947 vmx->nested.apic_access_page = NULL;
2948 }
2949 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2950 if (!is_error_page(page)) {
2951 vmx->nested.apic_access_page = page;
2952 hpa = page_to_phys(vmx->nested.apic_access_page);
2953 vmcs_write64(APIC_ACCESS_ADDR, hpa);
2954 } else {
2955 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
2956 __func__);
2957 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2958 vcpu->run->internal.suberror =
2959 KVM_INTERNAL_ERROR_EMULATION;
2960 vcpu->run->internal.ndata = 0;
2961 return false;
2962 }
2963 }
2964
2965 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
2966 map = &vmx->nested.virtual_apic_map;
2967
2968 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
2969 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
2970 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
2971 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
2972 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2973 /*
2974 * The processor will never use the TPR shadow, simply
2975 * clear the bit from the execution control. Such a
2976 * configuration is useless, but it happens in tests.
2977 * For any other configuration, failing the vm entry is
2978 * _not_ what the processor does but it's basically the
2979 * only possibility we have.
2980 */
2981 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
2982 } else {
2983 /*
2984 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
2985 * force VM-Entry to fail.
2986 */
2987 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
2988 }
2989 }
2990
2991 if (nested_cpu_has_posted_intr(vmcs12)) {
2992 map = &vmx->nested.pi_desc_map;
2993
2994 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
2995 vmx->nested.pi_desc =
2996 (struct pi_desc *)(((void *)map->hva) +
2997 offset_in_page(vmcs12->posted_intr_desc_addr));
2998 vmcs_write64(POSTED_INTR_DESC_ADDR,
2999 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3000 }
3001 }
3002 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3003 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3004 else
3005 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3006 return true;
3007 }
3008
3009 /*
3010 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3011 * for running VMX instructions (except VMXON, whose prerequisites are
3012 * slightly different). It also specifies what exception to inject otherwise.
3013 * Note that many of these exceptions have priority over VM exits, so they
3014 * don't have to be checked again here.
3015 */
nested_vmx_check_permission(struct kvm_vcpu * vcpu)3016 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3017 {
3018 if (!to_vmx(vcpu)->nested.vmxon) {
3019 kvm_queue_exception(vcpu, UD_VECTOR);
3020 return 0;
3021 }
3022
3023 if (vmx_get_cpl(vcpu)) {
3024 kvm_inject_gp(vcpu, 0);
3025 return 0;
3026 }
3027
3028 return 1;
3029 }
3030
vmx_has_apicv_interrupt(struct kvm_vcpu * vcpu)3031 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3032 {
3033 u8 rvi = vmx_get_rvi();
3034 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3035
3036 return ((rvi & 0xf0) > (vppr & 0xf0));
3037 }
3038
3039 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3040 struct vmcs12 *vmcs12);
3041
3042 /*
3043 * If from_vmentry is false, this is being called from state restore (either RSM
3044 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3045 *
3046 * Returns:
3047 * NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
3048 * NVMX_ENTRY_VMFAIL: Consistency check VMFail
3049 * NVMX_ENTRY_VMEXIT: Consistency check VMExit
3050 * NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
3051 */
nested_vmx_enter_non_root_mode(struct kvm_vcpu * vcpu,bool from_vmentry)3052 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3053 bool from_vmentry)
3054 {
3055 struct vcpu_vmx *vmx = to_vmx(vcpu);
3056 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3057 bool evaluate_pending_interrupts;
3058 u32 exit_reason = EXIT_REASON_INVALID_STATE;
3059 u32 exit_qual;
3060
3061 evaluate_pending_interrupts = exec_controls_get(vmx) &
3062 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
3063 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3064 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3065
3066 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3067 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3068 if (kvm_mpx_supported() &&
3069 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
3070 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3071
3072 /*
3073 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3074 * nested early checks are disabled. In the event of a "late" VM-Fail,
3075 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3076 * software model to the pre-VMEntry host state. When EPT is disabled,
3077 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3078 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3079 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3080 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3081 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3082 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3083 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3084 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3085 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3086 * path would need to manually save/restore vmcs01.GUEST_CR3.
3087 */
3088 if (!enable_ept && !nested_early_check)
3089 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3090
3091 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3092
3093 prepare_vmcs02_early(vmx, vmcs12);
3094
3095 if (from_vmentry) {
3096 if (unlikely(!nested_get_vmcs12_pages(vcpu)))
3097 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3098
3099 if (nested_vmx_check_vmentry_hw(vcpu)) {
3100 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3101 return NVMX_VMENTRY_VMFAIL;
3102 }
3103
3104 if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
3105 goto vmentry_fail_vmexit;
3106 }
3107
3108 enter_guest_mode(vcpu);
3109 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3110 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
3111
3112 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
3113 goto vmentry_fail_vmexit_guest_mode;
3114
3115 if (from_vmentry) {
3116 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
3117 exit_qual = nested_vmx_load_msr(vcpu,
3118 vmcs12->vm_entry_msr_load_addr,
3119 vmcs12->vm_entry_msr_load_count);
3120 if (exit_qual)
3121 goto vmentry_fail_vmexit_guest_mode;
3122 } else {
3123 /*
3124 * The MMU is not initialized to point at the right entities yet and
3125 * "get pages" would need to read data from the guest (i.e. we will
3126 * need to perform gpa to hpa translation). Request a call
3127 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3128 * have already been set at vmentry time and should not be reset.
3129 */
3130 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
3131 }
3132
3133 /*
3134 * If L1 had a pending IRQ/NMI until it executed
3135 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3136 * disallowed (e.g. interrupts disabled), L0 needs to
3137 * evaluate if this pending event should cause an exit from L2
3138 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3139 * intercept EXTERNAL_INTERRUPT).
3140 *
3141 * Usually this would be handled by the processor noticing an
3142 * IRQ/NMI window request, or checking RVI during evaluation of
3143 * pending virtual interrupts. However, this setting was done
3144 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3145 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3146 */
3147 if (unlikely(evaluate_pending_interrupts))
3148 kvm_make_request(KVM_REQ_EVENT, vcpu);
3149
3150 /*
3151 * Do not start the preemption timer hrtimer until after we know
3152 * we are successful, so that only nested_vmx_vmexit needs to cancel
3153 * the timer.
3154 */
3155 vmx->nested.preemption_timer_expired = false;
3156 if (nested_cpu_has_preemption_timer(vmcs12))
3157 vmx_start_preemption_timer(vcpu);
3158
3159 /*
3160 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3161 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3162 * returned as far as L1 is concerned. It will only return (and set
3163 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3164 */
3165 return NVMX_VMENTRY_SUCCESS;
3166
3167 /*
3168 * A failed consistency check that leads to a VMExit during L1's
3169 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3170 * 26.7 "VM-entry failures during or after loading guest state".
3171 */
3172 vmentry_fail_vmexit_guest_mode:
3173 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3174 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3175 leave_guest_mode(vcpu);
3176
3177 vmentry_fail_vmexit:
3178 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3179
3180 if (!from_vmentry)
3181 return NVMX_VMENTRY_VMEXIT;
3182
3183 load_vmcs12_host_state(vcpu, vmcs12);
3184 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3185 vmcs12->exit_qualification = exit_qual;
3186 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3187 vmx->nested.need_vmcs12_to_shadow_sync = true;
3188 return NVMX_VMENTRY_VMEXIT;
3189 }
3190
3191 /*
3192 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3193 * for running an L2 nested guest.
3194 */
nested_vmx_run(struct kvm_vcpu * vcpu,bool launch)3195 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3196 {
3197 struct vmcs12 *vmcs12;
3198 enum nvmx_vmentry_status status;
3199 struct vcpu_vmx *vmx = to_vmx(vcpu);
3200 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3201
3202 if (!nested_vmx_check_permission(vcpu))
3203 return 1;
3204
3205 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, launch))
3206 return 1;
3207
3208 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3209 return nested_vmx_failInvalid(vcpu);
3210
3211 vmcs12 = get_vmcs12(vcpu);
3212
3213 /*
3214 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3215 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3216 * rather than RFLAGS.ZF, and no error number is stored to the
3217 * VM-instruction error field.
3218 */
3219 if (vmcs12->hdr.shadow_vmcs)
3220 return nested_vmx_failInvalid(vcpu);
3221
3222 if (vmx->nested.hv_evmcs) {
3223 copy_enlightened_to_vmcs12(vmx);
3224 /* Enlightened VMCS doesn't have launch state */
3225 vmcs12->launch_state = !launch;
3226 } else if (enable_shadow_vmcs) {
3227 copy_shadow_to_vmcs12(vmx);
3228 }
3229
3230 /*
3231 * The nested entry process starts with enforcing various prerequisites
3232 * on vmcs12 as required by the Intel SDM, and act appropriately when
3233 * they fail: As the SDM explains, some conditions should cause the
3234 * instruction to fail, while others will cause the instruction to seem
3235 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3236 * To speed up the normal (success) code path, we should avoid checking
3237 * for misconfigurations which will anyway be caught by the processor
3238 * when using the merged vmcs02.
3239 */
3240 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3241 return nested_vmx_failValid(vcpu,
3242 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3243
3244 if (vmcs12->launch_state == launch)
3245 return nested_vmx_failValid(vcpu,
3246 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3247 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3248
3249 if (nested_vmx_check_controls(vcpu, vmcs12))
3250 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3251
3252 if (nested_vmx_check_host_state(vcpu, vmcs12))
3253 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3254
3255 /*
3256 * We're finally done with prerequisite checking, and can start with
3257 * the nested entry.
3258 */
3259 vmx->nested.nested_run_pending = 1;
3260 status = nested_vmx_enter_non_root_mode(vcpu, true);
3261 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3262 goto vmentry_failed;
3263
3264 /* Hide L1D cache contents from the nested guest. */
3265 vmx->vcpu.arch.l1tf_flush_l1d = true;
3266
3267 /*
3268 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3269 * also be used as part of restoring nVMX state for
3270 * snapshot restore (migration).
3271 *
3272 * In this flow, it is assumed that vmcs12 cache was
3273 * trasferred as part of captured nVMX state and should
3274 * therefore not be read from guest memory (which may not
3275 * exist on destination host yet).
3276 */
3277 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3278
3279 /*
3280 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3281 * awakened by event injection or by an NMI-window VM-exit or
3282 * by an interrupt-window VM-exit, halt the vcpu.
3283 */
3284 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3285 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3286 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
3287 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
3288 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3289 vmx->nested.nested_run_pending = 0;
3290 return kvm_vcpu_halt(vcpu);
3291 }
3292 return 1;
3293
3294 vmentry_failed:
3295 vmx->nested.nested_run_pending = 0;
3296 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3297 return 0;
3298 if (status == NVMX_VMENTRY_VMEXIT)
3299 return 1;
3300 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3301 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3302 }
3303
3304 /*
3305 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3306 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3307 * This function returns the new value we should put in vmcs12.guest_cr0.
3308 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3309 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3310 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3311 * didn't trap the bit, because if L1 did, so would L0).
3312 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3313 * been modified by L2, and L1 knows it. So just leave the old value of
3314 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3315 * isn't relevant, because if L0 traps this bit it can set it to anything.
3316 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3317 * changed these bits, and therefore they need to be updated, but L0
3318 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3319 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3320 */
3321 static inline unsigned long
vmcs12_guest_cr0(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3322 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3323 {
3324 return
3325 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3326 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3327 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3328 vcpu->arch.cr0_guest_owned_bits));
3329 }
3330
3331 static inline unsigned long
vmcs12_guest_cr4(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3332 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3333 {
3334 return
3335 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3336 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3337 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3338 vcpu->arch.cr4_guest_owned_bits));
3339 }
3340
vmcs12_save_pending_event(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3341 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3342 struct vmcs12 *vmcs12)
3343 {
3344 u32 idt_vectoring;
3345 unsigned int nr;
3346
3347 if (vcpu->arch.exception.injected) {
3348 nr = vcpu->arch.exception.nr;
3349 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3350
3351 if (kvm_exception_is_soft(nr)) {
3352 vmcs12->vm_exit_instruction_len =
3353 vcpu->arch.event_exit_inst_len;
3354 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3355 } else
3356 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3357
3358 if (vcpu->arch.exception.has_error_code) {
3359 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3360 vmcs12->idt_vectoring_error_code =
3361 vcpu->arch.exception.error_code;
3362 }
3363
3364 vmcs12->idt_vectoring_info_field = idt_vectoring;
3365 } else if (vcpu->arch.nmi_injected) {
3366 vmcs12->idt_vectoring_info_field =
3367 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3368 } else if (vcpu->arch.interrupt.injected) {
3369 nr = vcpu->arch.interrupt.nr;
3370 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3371
3372 if (vcpu->arch.interrupt.soft) {
3373 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3374 vmcs12->vm_entry_instruction_len =
3375 vcpu->arch.event_exit_inst_len;
3376 } else
3377 idt_vectoring |= INTR_TYPE_EXT_INTR;
3378
3379 vmcs12->idt_vectoring_info_field = idt_vectoring;
3380 }
3381 }
3382
3383
nested_mark_vmcs12_pages_dirty(struct kvm_vcpu * vcpu)3384 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3385 {
3386 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3387 gfn_t gfn;
3388
3389 /*
3390 * Don't need to mark the APIC access page dirty; it is never
3391 * written to by the CPU during APIC virtualization.
3392 */
3393
3394 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3395 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3396 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3397 }
3398
3399 if (nested_cpu_has_posted_intr(vmcs12)) {
3400 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3401 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3402 }
3403 }
3404
vmx_complete_nested_posted_interrupt(struct kvm_vcpu * vcpu)3405 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3406 {
3407 struct vcpu_vmx *vmx = to_vmx(vcpu);
3408 int max_irr;
3409 void *vapic_page;
3410 u16 status;
3411
3412 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3413 return;
3414
3415 vmx->nested.pi_pending = false;
3416 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3417 return;
3418
3419 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3420 if (max_irr != 256) {
3421 vapic_page = vmx->nested.virtual_apic_map.hva;
3422 if (!vapic_page)
3423 return;
3424
3425 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3426 vapic_page, &max_irr);
3427 status = vmcs_read16(GUEST_INTR_STATUS);
3428 if ((u8)max_irr > ((u8)status & 0xff)) {
3429 status &= ~0xff;
3430 status |= (u8)max_irr;
3431 vmcs_write16(GUEST_INTR_STATUS, status);
3432 }
3433 }
3434
3435 nested_mark_vmcs12_pages_dirty(vcpu);
3436 }
3437
nested_vmx_inject_exception_vmexit(struct kvm_vcpu * vcpu,unsigned long exit_qual)3438 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3439 unsigned long exit_qual)
3440 {
3441 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3442 unsigned int nr = vcpu->arch.exception.nr;
3443 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3444
3445 if (vcpu->arch.exception.has_error_code) {
3446 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3447 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3448 }
3449
3450 if (kvm_exception_is_soft(nr))
3451 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3452 else
3453 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3454
3455 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3456 vmx_get_nmi_mask(vcpu))
3457 intr_info |= INTR_INFO_UNBLOCK_NMI;
3458
3459 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3460 }
3461
vmx_check_nested_events(struct kvm_vcpu * vcpu,bool external_intr)3462 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3463 {
3464 struct vcpu_vmx *vmx = to_vmx(vcpu);
3465 unsigned long exit_qual;
3466 bool block_nested_events =
3467 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3468 struct kvm_lapic *apic = vcpu->arch.apic;
3469
3470 if (lapic_in_kernel(vcpu) &&
3471 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3472 if (block_nested_events)
3473 return -EBUSY;
3474 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3475 return 0;
3476 }
3477
3478 if (vcpu->arch.exception.pending &&
3479 nested_vmx_check_exception(vcpu, &exit_qual)) {
3480 if (block_nested_events)
3481 return -EBUSY;
3482 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3483 return 0;
3484 }
3485
3486 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3487 vmx->nested.preemption_timer_expired) {
3488 if (block_nested_events)
3489 return -EBUSY;
3490 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3491 return 0;
3492 }
3493
3494 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3495 if (block_nested_events)
3496 return -EBUSY;
3497 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3498 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3499 INTR_INFO_VALID_MASK, 0);
3500 /*
3501 * The NMI-triggered VM exit counts as injection:
3502 * clear this one and block further NMIs.
3503 */
3504 vcpu->arch.nmi_pending = 0;
3505 vmx_set_nmi_mask(vcpu, true);
3506 return 0;
3507 }
3508
3509 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3510 nested_exit_on_intr(vcpu)) {
3511 if (block_nested_events)
3512 return -EBUSY;
3513 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3514 return 0;
3515 }
3516
3517 vmx_complete_nested_posted_interrupt(vcpu);
3518 return 0;
3519 }
3520
vmx_get_preemption_timer_value(struct kvm_vcpu * vcpu)3521 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3522 {
3523 ktime_t remaining =
3524 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3525 u64 value;
3526
3527 if (ktime_to_ns(remaining) <= 0)
3528 return 0;
3529
3530 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3531 do_div(value, 1000000);
3532 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3533 }
3534
is_vmcs12_ext_field(unsigned long field)3535 static bool is_vmcs12_ext_field(unsigned long field)
3536 {
3537 switch (field) {
3538 case GUEST_ES_SELECTOR:
3539 case GUEST_CS_SELECTOR:
3540 case GUEST_SS_SELECTOR:
3541 case GUEST_DS_SELECTOR:
3542 case GUEST_FS_SELECTOR:
3543 case GUEST_GS_SELECTOR:
3544 case GUEST_LDTR_SELECTOR:
3545 case GUEST_TR_SELECTOR:
3546 case GUEST_ES_LIMIT:
3547 case GUEST_CS_LIMIT:
3548 case GUEST_SS_LIMIT:
3549 case GUEST_DS_LIMIT:
3550 case GUEST_FS_LIMIT:
3551 case GUEST_GS_LIMIT:
3552 case GUEST_LDTR_LIMIT:
3553 case GUEST_TR_LIMIT:
3554 case GUEST_GDTR_LIMIT:
3555 case GUEST_IDTR_LIMIT:
3556 case GUEST_ES_AR_BYTES:
3557 case GUEST_DS_AR_BYTES:
3558 case GUEST_FS_AR_BYTES:
3559 case GUEST_GS_AR_BYTES:
3560 case GUEST_LDTR_AR_BYTES:
3561 case GUEST_TR_AR_BYTES:
3562 case GUEST_ES_BASE:
3563 case GUEST_CS_BASE:
3564 case GUEST_SS_BASE:
3565 case GUEST_DS_BASE:
3566 case GUEST_FS_BASE:
3567 case GUEST_GS_BASE:
3568 case GUEST_LDTR_BASE:
3569 case GUEST_TR_BASE:
3570 case GUEST_GDTR_BASE:
3571 case GUEST_IDTR_BASE:
3572 case GUEST_PENDING_DBG_EXCEPTIONS:
3573 case GUEST_BNDCFGS:
3574 return true;
3575 default:
3576 break;
3577 }
3578
3579 return false;
3580 }
3581
sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3582 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3583 struct vmcs12 *vmcs12)
3584 {
3585 struct vcpu_vmx *vmx = to_vmx(vcpu);
3586
3587 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3588 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3589 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3590 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3591 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3592 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3593 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3594 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3595 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3596 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3597 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3598 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3599 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3600 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3601 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3602 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3603 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3604 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3605 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3606 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3607 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3608 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3609 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3610 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3611 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3612 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3613 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3614 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3615 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3616 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3617 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3618 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3619 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3620 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3621 vmcs12->guest_pending_dbg_exceptions =
3622 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3623 if (kvm_mpx_supported())
3624 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3625
3626 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
3627 }
3628
copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3629 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3630 struct vmcs12 *vmcs12)
3631 {
3632 struct vcpu_vmx *vmx = to_vmx(vcpu);
3633 int cpu;
3634
3635 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
3636 return;
3637
3638
3639 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
3640
3641 cpu = get_cpu();
3642 vmx->loaded_vmcs = &vmx->nested.vmcs02;
3643 vmx_vcpu_load(&vmx->vcpu, cpu);
3644
3645 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3646
3647 vmx->loaded_vmcs = &vmx->vmcs01;
3648 vmx_vcpu_load(&vmx->vcpu, cpu);
3649 put_cpu();
3650 }
3651
3652 /*
3653 * Update the guest state fields of vmcs12 to reflect changes that
3654 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3655 * VM-entry controls is also updated, since this is really a guest
3656 * state bit.)
3657 */
sync_vmcs02_to_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3658 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3659 {
3660 struct vcpu_vmx *vmx = to_vmx(vcpu);
3661
3662 if (vmx->nested.hv_evmcs)
3663 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3664
3665 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
3666
3667 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3668 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3669
3670 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
3671 vmcs12->guest_rip = kvm_rip_read(vcpu);
3672 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3673
3674 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3675 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3676
3677 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3678 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3679 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3680
3681 vmcs12->guest_interruptibility_info =
3682 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3683
3684 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3685 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3686 else
3687 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3688
3689 if (nested_cpu_has_preemption_timer(vmcs12) &&
3690 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3691 vmcs12->vmx_preemption_timer_value =
3692 vmx_get_preemption_timer_value(vcpu);
3693
3694 /*
3695 * In some cases (usually, nested EPT), L2 is allowed to change its
3696 * own CR3 without exiting. If it has changed it, we must keep it.
3697 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3698 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3699 *
3700 * Additionally, restore L2's PDPTR to vmcs12.
3701 */
3702 if (enable_ept) {
3703 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3704 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3705 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3706 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3707 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3708 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3709 }
3710 }
3711
3712 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3713
3714 if (nested_cpu_has_vid(vmcs12))
3715 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3716
3717 vmcs12->vm_entry_controls =
3718 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3719 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3720
3721 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
3722 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3723
3724 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3725 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3726 }
3727
3728 /*
3729 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3730 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3731 * and this function updates it to reflect the changes to the guest state while
3732 * L2 was running (and perhaps made some exits which were handled directly by L0
3733 * without going back to L1), and to reflect the exit reason.
3734 * Note that we do not have to copy here all VMCS fields, just those that
3735 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3736 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3737 * which already writes to vmcs12 directly.
3738 */
prepare_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 exit_reason,u32 exit_intr_info,unsigned long exit_qualification)3739 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3740 u32 exit_reason, u32 exit_intr_info,
3741 unsigned long exit_qualification)
3742 {
3743 /* update exit information fields: */
3744 vmcs12->vm_exit_reason = exit_reason;
3745 vmcs12->exit_qualification = exit_qualification;
3746 vmcs12->vm_exit_intr_info = exit_intr_info;
3747
3748 vmcs12->idt_vectoring_info_field = 0;
3749 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3750 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3751
3752 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3753 vmcs12->launch_state = 1;
3754
3755 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3756 * instead of reading the real value. */
3757 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3758
3759 /*
3760 * Transfer the event that L0 or L1 may wanted to inject into
3761 * L2 to IDT_VECTORING_INFO_FIELD.
3762 */
3763 vmcs12_save_pending_event(vcpu, vmcs12);
3764
3765 /*
3766 * According to spec, there's no need to store the guest's
3767 * MSRs if the exit is due to a VM-entry failure that occurs
3768 * during or after loading the guest state. Since this exit
3769 * does not fall in that category, we need to save the MSRs.
3770 */
3771 if (nested_vmx_store_msr(vcpu,
3772 vmcs12->vm_exit_msr_store_addr,
3773 vmcs12->vm_exit_msr_store_count))
3774 nested_vmx_abort(vcpu,
3775 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
3776 }
3777
3778 /*
3779 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3780 * preserved above and would only end up incorrectly in L1.
3781 */
3782 vcpu->arch.nmi_injected = false;
3783 kvm_clear_exception_queue(vcpu);
3784 kvm_clear_interrupt_queue(vcpu);
3785 }
3786
3787 /*
3788 * A part of what we need to when the nested L2 guest exits and we want to
3789 * run its L1 parent, is to reset L1's guest state to the host state specified
3790 * in vmcs12.
3791 * This function is to be called not only on normal nested exit, but also on
3792 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3793 * Failures During or After Loading Guest State").
3794 * This function should be called when the active VMCS is L1's (vmcs01).
3795 */
load_vmcs12_host_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3796 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3797 struct vmcs12 *vmcs12)
3798 {
3799 struct kvm_segment seg;
3800 u32 entry_failure_code;
3801
3802 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3803 vcpu->arch.efer = vmcs12->host_ia32_efer;
3804 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3805 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3806 else
3807 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3808 vmx_set_efer(vcpu, vcpu->arch.efer);
3809
3810 kvm_rsp_write(vcpu, vmcs12->host_rsp);
3811 kvm_rip_write(vcpu, vmcs12->host_rip);
3812 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3813 vmx_set_interrupt_shadow(vcpu, 0);
3814
3815 /*
3816 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3817 * actually changed, because vmx_set_cr0 refers to efer set above.
3818 *
3819 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3820 * (KVM doesn't change it);
3821 */
3822 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3823 vmx_set_cr0(vcpu, vmcs12->host_cr0);
3824
3825 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3826 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3827 vmx_set_cr4(vcpu, vmcs12->host_cr4);
3828
3829 nested_ept_uninit_mmu_context(vcpu);
3830
3831 /*
3832 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3833 * couldn't have changed.
3834 */
3835 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3836 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3837
3838 if (!enable_ept)
3839 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3840
3841 /*
3842 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3843 * VMEntry/VMExit. Thus, no need to flush TLB.
3844 *
3845 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3846 * flushed on every VMEntry/VMExit.
3847 *
3848 * Otherwise, we can preserve TLB entries as long as we are
3849 * able to tag L1 TLB entries differently than L2 TLB entries.
3850 *
3851 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3852 * and therefore we request the TLB flush to happen only after VMCS EPTP
3853 * has been set by KVM_REQ_LOAD_CR3.
3854 */
3855 if (enable_vpid &&
3856 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3857 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3858 }
3859
3860 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3861 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3862 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3863 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3864 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3865 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3866 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3867
3868 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3869 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3870 vmcs_write64(GUEST_BNDCFGS, 0);
3871
3872 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3873 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3874 vcpu->arch.pat = vmcs12->host_ia32_pat;
3875 }
3876 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3877 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3878 vmcs12->host_ia32_perf_global_ctrl);
3879
3880 /* Set L1 segment info according to Intel SDM
3881 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3882 seg = (struct kvm_segment) {
3883 .base = 0,
3884 .limit = 0xFFFFFFFF,
3885 .selector = vmcs12->host_cs_selector,
3886 .type = 11,
3887 .present = 1,
3888 .s = 1,
3889 .g = 1
3890 };
3891 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3892 seg.l = 1;
3893 else
3894 seg.db = 1;
3895 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3896 seg = (struct kvm_segment) {
3897 .base = 0,
3898 .limit = 0xFFFFFFFF,
3899 .type = 3,
3900 .present = 1,
3901 .s = 1,
3902 .db = 1,
3903 .g = 1
3904 };
3905 seg.selector = vmcs12->host_ds_selector;
3906 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3907 seg.selector = vmcs12->host_es_selector;
3908 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3909 seg.selector = vmcs12->host_ss_selector;
3910 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3911 seg.selector = vmcs12->host_fs_selector;
3912 seg.base = vmcs12->host_fs_base;
3913 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3914 seg.selector = vmcs12->host_gs_selector;
3915 seg.base = vmcs12->host_gs_base;
3916 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3917 seg = (struct kvm_segment) {
3918 .base = vmcs12->host_tr_base,
3919 .limit = 0x67,
3920 .selector = vmcs12->host_tr_selector,
3921 .type = 11,
3922 .present = 1
3923 };
3924 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3925
3926 kvm_set_dr(vcpu, 7, 0x400);
3927 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3928
3929 if (cpu_has_vmx_msr_bitmap())
3930 vmx_update_msr_bitmap(vcpu);
3931
3932 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3933 vmcs12->vm_exit_msr_load_count))
3934 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3935 }
3936
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx * vmx)3937 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3938 {
3939 struct shared_msr_entry *efer_msr;
3940 unsigned int i;
3941
3942 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3943 return vmcs_read64(GUEST_IA32_EFER);
3944
3945 if (cpu_has_load_ia32_efer())
3946 return host_efer;
3947
3948 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3949 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3950 return vmx->msr_autoload.guest.val[i].value;
3951 }
3952
3953 efer_msr = find_msr_entry(vmx, MSR_EFER);
3954 if (efer_msr)
3955 return efer_msr->data;
3956
3957 return host_efer;
3958 }
3959
nested_vmx_restore_host_state(struct kvm_vcpu * vcpu)3960 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3961 {
3962 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3963 struct vcpu_vmx *vmx = to_vmx(vcpu);
3964 struct vmx_msr_entry g, h;
3965 gpa_t gpa;
3966 u32 i, j;
3967
3968 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3969
3970 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3971 /*
3972 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3973 * as vmcs01.GUEST_DR7 contains a userspace defined value
3974 * and vcpu->arch.dr7 is not squirreled away before the
3975 * nested VMENTER (not worth adding a variable in nested_vmx).
3976 */
3977 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3978 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3979 else
3980 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3981 }
3982
3983 /*
3984 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3985 * handle a variety of side effects to KVM's software model.
3986 */
3987 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3988
3989 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3990 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3991
3992 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3993 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3994
3995 nested_ept_uninit_mmu_context(vcpu);
3996 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3997 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3998
3999 /*
4000 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4001 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4002 * VMFail, like everything else we just need to ensure our
4003 * software model is up-to-date.
4004 */
4005 if (enable_ept)
4006 ept_save_pdptrs(vcpu);
4007
4008 kvm_mmu_reset_context(vcpu);
4009
4010 if (cpu_has_vmx_msr_bitmap())
4011 vmx_update_msr_bitmap(vcpu);
4012
4013 /*
4014 * This nasty bit of open coding is a compromise between blindly
4015 * loading L1's MSRs using the exit load lists (incorrect emulation
4016 * of VMFail), leaving the nested VM's MSRs in the software model
4017 * (incorrect behavior) and snapshotting the modified MSRs (too
4018 * expensive since the lists are unbound by hardware). For each
4019 * MSR that was (prematurely) loaded from the nested VMEntry load
4020 * list, reload it from the exit load list if it exists and differs
4021 * from the guest value. The intent is to stuff host state as
4022 * silently as possible, not to fully process the exit load list.
4023 */
4024 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4025 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4026 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4027 pr_debug_ratelimited(
4028 "%s read MSR index failed (%u, 0x%08llx)\n",
4029 __func__, i, gpa);
4030 goto vmabort;
4031 }
4032
4033 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4034 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4035 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4036 pr_debug_ratelimited(
4037 "%s read MSR failed (%u, 0x%08llx)\n",
4038 __func__, j, gpa);
4039 goto vmabort;
4040 }
4041 if (h.index != g.index)
4042 continue;
4043 if (h.value == g.value)
4044 break;
4045
4046 if (nested_vmx_load_msr_check(vcpu, &h)) {
4047 pr_debug_ratelimited(
4048 "%s check failed (%u, 0x%x, 0x%x)\n",
4049 __func__, j, h.index, h.reserved);
4050 goto vmabort;
4051 }
4052
4053 if (kvm_set_msr(vcpu, h.index, h.value)) {
4054 pr_debug_ratelimited(
4055 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4056 __func__, j, h.index, h.value);
4057 goto vmabort;
4058 }
4059 }
4060 }
4061
4062 return;
4063
4064 vmabort:
4065 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4066 }
4067
4068 /*
4069 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4070 * and modify vmcs12 to make it see what it would expect to see there if
4071 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4072 */
nested_vmx_vmexit(struct kvm_vcpu * vcpu,u32 exit_reason,u32 exit_intr_info,unsigned long exit_qualification)4073 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
4074 u32 exit_intr_info, unsigned long exit_qualification)
4075 {
4076 struct vcpu_vmx *vmx = to_vmx(vcpu);
4077 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4078
4079 /* trying to cancel vmlaunch/vmresume is a bug */
4080 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4081
4082 leave_guest_mode(vcpu);
4083
4084 if (nested_cpu_has_preemption_timer(vmcs12))
4085 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4086
4087 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
4088 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
4089
4090 if (likely(!vmx->fail)) {
4091 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4092
4093 if (exit_reason != -1)
4094 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
4095 exit_qualification);
4096
4097 /*
4098 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4099 * also be used to capture vmcs12 cache as part of
4100 * capturing nVMX state for snapshot (migration).
4101 *
4102 * Otherwise, this flush will dirty guest memory at a
4103 * point it is already assumed by user-space to be
4104 * immutable.
4105 */
4106 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4107 } else {
4108 /*
4109 * The only expected VM-instruction error is "VM entry with
4110 * invalid control field(s)." Anything else indicates a
4111 * problem with L0. And we should never get here with a
4112 * VMFail of any type if early consistency checks are enabled.
4113 */
4114 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4115 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4116 WARN_ON_ONCE(nested_early_check);
4117 }
4118
4119 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4120
4121 /* Update any VMCS fields that might have changed while L2 ran */
4122 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4123 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4124 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4125
4126 if (kvm_has_tsc_control)
4127 decache_tsc_multiplier(vmx);
4128
4129 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4130 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4131 vmx_set_virtual_apic_mode(vcpu);
4132 } else if (!nested_cpu_has_ept(vmcs12) &&
4133 nested_cpu_has2(vmcs12,
4134 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
4135 vmx_flush_tlb(vcpu, true);
4136 }
4137
4138 /* Unpin physical memory we referred to in vmcs02 */
4139 if (vmx->nested.apic_access_page) {
4140 kvm_release_page_dirty(vmx->nested.apic_access_page);
4141 vmx->nested.apic_access_page = NULL;
4142 }
4143 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4144 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4145 vmx->nested.pi_desc = NULL;
4146
4147 /*
4148 * We are now running in L2, mmu_notifier will force to reload the
4149 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
4150 */
4151 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4152
4153 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
4154 vmx->nested.need_vmcs12_to_shadow_sync = true;
4155
4156 /* in case we halted in L2 */
4157 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4158
4159 if (likely(!vmx->fail)) {
4160 /*
4161 * TODO: SDM says that with acknowledge interrupt on
4162 * exit, bit 31 of the VM-exit interrupt information
4163 * (valid interrupt) is always set to 1 on
4164 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
4165 * need kvm_cpu_has_interrupt(). See the commit
4166 * message for details.
4167 */
4168 if (nested_exit_intr_ack_set(vcpu) &&
4169 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4170 kvm_cpu_has_interrupt(vcpu)) {
4171 int irq = kvm_cpu_get_interrupt(vcpu);
4172 WARN_ON(irq < 0);
4173 vmcs12->vm_exit_intr_info = irq |
4174 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4175 }
4176
4177 if (exit_reason != -1)
4178 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4179 vmcs12->exit_qualification,
4180 vmcs12->idt_vectoring_info_field,
4181 vmcs12->vm_exit_intr_info,
4182 vmcs12->vm_exit_intr_error_code,
4183 KVM_ISA_VMX);
4184
4185 load_vmcs12_host_state(vcpu, vmcs12);
4186
4187 return;
4188 }
4189
4190 /*
4191 * After an early L2 VM-entry failure, we're now back
4192 * in L1 which thinks it just finished a VMLAUNCH or
4193 * VMRESUME instruction, so we need to set the failure
4194 * flag and the VM-instruction error field of the VMCS
4195 * accordingly, and skip the emulated instruction.
4196 */
4197 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4198
4199 /*
4200 * Restore L1's host state to KVM's software model. We're here
4201 * because a consistency check was caught by hardware, which
4202 * means some amount of guest state has been propagated to KVM's
4203 * model and needs to be unwound to the host's state.
4204 */
4205 nested_vmx_restore_host_state(vcpu);
4206
4207 vmx->fail = 0;
4208 }
4209
4210 /*
4211 * Decode the memory-address operand of a vmx instruction, as recorded on an
4212 * exit caused by such an instruction (run by a guest hypervisor).
4213 * On success, returns 0. When the operand is invalid, returns 1 and throws
4214 * #UD or #GP.
4215 */
get_vmx_mem_address(struct kvm_vcpu * vcpu,unsigned long exit_qualification,u32 vmx_instruction_info,bool wr,int len,gva_t * ret)4216 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4217 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4218 {
4219 gva_t off;
4220 bool exn;
4221 struct kvm_segment s;
4222
4223 /*
4224 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4225 * Execution", on an exit, vmx_instruction_info holds most of the
4226 * addressing components of the operand. Only the displacement part
4227 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4228 * For how an actual address is calculated from all these components,
4229 * refer to Vol. 1, "Operand Addressing".
4230 */
4231 int scaling = vmx_instruction_info & 3;
4232 int addr_size = (vmx_instruction_info >> 7) & 7;
4233 bool is_reg = vmx_instruction_info & (1u << 10);
4234 int seg_reg = (vmx_instruction_info >> 15) & 7;
4235 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4236 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4237 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4238 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4239
4240 if (is_reg) {
4241 kvm_queue_exception(vcpu, UD_VECTOR);
4242 return 1;
4243 }
4244
4245 /* Addr = segment_base + offset */
4246 /* offset = base + [index * scale] + displacement */
4247 off = exit_qualification; /* holds the displacement */
4248 if (addr_size == 1)
4249 off = (gva_t)sign_extend64(off, 31);
4250 else if (addr_size == 0)
4251 off = (gva_t)sign_extend64(off, 15);
4252 if (base_is_valid)
4253 off += kvm_register_read(vcpu, base_reg);
4254 if (index_is_valid)
4255 off += kvm_register_read(vcpu, index_reg)<<scaling;
4256 vmx_get_segment(vcpu, &s, seg_reg);
4257
4258 /*
4259 * The effective address, i.e. @off, of a memory operand is truncated
4260 * based on the address size of the instruction. Note that this is
4261 * the *effective address*, i.e. the address prior to accounting for
4262 * the segment's base.
4263 */
4264 if (addr_size == 1) /* 32 bit */
4265 off &= 0xffffffff;
4266 else if (addr_size == 0) /* 16 bit */
4267 off &= 0xffff;
4268
4269 /* Checks for #GP/#SS exceptions. */
4270 exn = false;
4271 if (is_long_mode(vcpu)) {
4272 /*
4273 * The virtual/linear address is never truncated in 64-bit
4274 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4275 * address when using FS/GS with a non-zero base.
4276 */
4277 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4278 *ret = s.base + off;
4279 else
4280 *ret = off;
4281
4282 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4283 * non-canonical form. This is the only check on the memory
4284 * destination for long mode!
4285 */
4286 exn = is_noncanonical_address(*ret, vcpu);
4287 } else {
4288 /*
4289 * When not in long mode, the virtual/linear address is
4290 * unconditionally truncated to 32 bits regardless of the
4291 * address size.
4292 */
4293 *ret = (s.base + off) & 0xffffffff;
4294
4295 /* Protected mode: apply checks for segment validity in the
4296 * following order:
4297 * - segment type check (#GP(0) may be thrown)
4298 * - usability check (#GP(0)/#SS(0))
4299 * - limit check (#GP(0)/#SS(0))
4300 */
4301 if (wr)
4302 /* #GP(0) if the destination operand is located in a
4303 * read-only data segment or any code segment.
4304 */
4305 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4306 else
4307 /* #GP(0) if the source operand is located in an
4308 * execute-only code segment
4309 */
4310 exn = ((s.type & 0xa) == 8);
4311 if (exn) {
4312 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4313 return 1;
4314 }
4315 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4316 */
4317 exn = (s.unusable != 0);
4318
4319 /*
4320 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4321 * outside the segment limit. All CPUs that support VMX ignore
4322 * limit checks for flat segments, i.e. segments with base==0,
4323 * limit==0xffffffff and of type expand-up data or code.
4324 */
4325 if (!(s.base == 0 && s.limit == 0xffffffff &&
4326 ((s.type & 8) || !(s.type & 4))))
4327 exn = exn || ((u64)off + len - 1 > s.limit);
4328 }
4329 if (exn) {
4330 kvm_queue_exception_e(vcpu,
4331 seg_reg == VCPU_SREG_SS ?
4332 SS_VECTOR : GP_VECTOR,
4333 0);
4334 return 1;
4335 }
4336
4337 return 0;
4338 }
4339
nested_vmx_get_vmptr(struct kvm_vcpu * vcpu,gpa_t * vmpointer)4340 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4341 {
4342 gva_t gva;
4343 struct x86_exception e;
4344
4345 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4346 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4347 sizeof(*vmpointer), &gva))
4348 return 1;
4349
4350 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4351 kvm_inject_page_fault(vcpu, &e);
4352 return 1;
4353 }
4354
4355 return 0;
4356 }
4357
4358 /*
4359 * Allocate a shadow VMCS and associate it with the currently loaded
4360 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4361 * VMCS is also VMCLEARed, so that it is ready for use.
4362 */
alloc_shadow_vmcs(struct kvm_vcpu * vcpu)4363 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4364 {
4365 struct vcpu_vmx *vmx = to_vmx(vcpu);
4366 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4367
4368 /*
4369 * We should allocate a shadow vmcs for vmcs01 only when L1
4370 * executes VMXON and free it when L1 executes VMXOFF.
4371 * As it is invalid to execute VMXON twice, we shouldn't reach
4372 * here when vmcs01 already have an allocated shadow vmcs.
4373 */
4374 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4375
4376 if (!loaded_vmcs->shadow_vmcs) {
4377 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4378 if (loaded_vmcs->shadow_vmcs)
4379 vmcs_clear(loaded_vmcs->shadow_vmcs);
4380 }
4381 return loaded_vmcs->shadow_vmcs;
4382 }
4383
enter_vmx_operation(struct kvm_vcpu * vcpu)4384 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4385 {
4386 struct vcpu_vmx *vmx = to_vmx(vcpu);
4387 int r;
4388
4389 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4390 if (r < 0)
4391 goto out_vmcs02;
4392
4393 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4394 if (!vmx->nested.cached_vmcs12)
4395 goto out_cached_vmcs12;
4396
4397 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4398 if (!vmx->nested.cached_shadow_vmcs12)
4399 goto out_cached_shadow_vmcs12;
4400
4401 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4402 goto out_shadow_vmcs;
4403
4404 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4405 HRTIMER_MODE_REL_PINNED);
4406 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4407
4408 vmx->nested.vpid02 = allocate_vpid();
4409
4410 vmx->nested.vmcs02_initialized = false;
4411 vmx->nested.vmxon = true;
4412
4413 if (pt_mode == PT_MODE_HOST_GUEST) {
4414 vmx->pt_desc.guest.ctl = 0;
4415 pt_update_intercept_for_msr(vmx);
4416 }
4417
4418 return 0;
4419
4420 out_shadow_vmcs:
4421 kfree(vmx->nested.cached_shadow_vmcs12);
4422
4423 out_cached_shadow_vmcs12:
4424 kfree(vmx->nested.cached_vmcs12);
4425
4426 out_cached_vmcs12:
4427 free_loaded_vmcs(&vmx->nested.vmcs02);
4428
4429 out_vmcs02:
4430 return -ENOMEM;
4431 }
4432
4433 /*
4434 * Emulate the VMXON instruction.
4435 * Currently, we just remember that VMX is active, and do not save or even
4436 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4437 * do not currently need to store anything in that guest-allocated memory
4438 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4439 * argument is different from the VMXON pointer (which the spec says they do).
4440 */
handle_vmon(struct kvm_vcpu * vcpu)4441 static int handle_vmon(struct kvm_vcpu *vcpu)
4442 {
4443 int ret;
4444 gpa_t vmptr;
4445 uint32_t revision;
4446 struct vcpu_vmx *vmx = to_vmx(vcpu);
4447 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4448 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4449
4450 /*
4451 * The Intel VMX Instruction Reference lists a bunch of bits that are
4452 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4453 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4454 * Otherwise, we should fail with #UD. But most faulting conditions
4455 * have already been checked by hardware, prior to the VM-exit for
4456 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4457 * that bit set to 1 in non-root mode.
4458 */
4459 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4460 kvm_queue_exception(vcpu, UD_VECTOR);
4461 return 1;
4462 }
4463
4464 /* CPL=0 must be checked manually. */
4465 if (vmx_get_cpl(vcpu)) {
4466 kvm_inject_gp(vcpu, 0);
4467 return 1;
4468 }
4469
4470 if (vmx->nested.vmxon)
4471 return nested_vmx_failValid(vcpu,
4472 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4473
4474 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4475 != VMXON_NEEDED_FEATURES) {
4476 kvm_inject_gp(vcpu, 0);
4477 return 1;
4478 }
4479
4480 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4481 return 1;
4482
4483 /*
4484 * SDM 3: 24.11.5
4485 * The first 4 bytes of VMXON region contain the supported
4486 * VMCS revision identifier
4487 *
4488 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4489 * which replaces physical address width with 32
4490 */
4491 if (!page_address_valid(vcpu, vmptr))
4492 return nested_vmx_failInvalid(vcpu);
4493
4494 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4495 revision != VMCS12_REVISION)
4496 return nested_vmx_failInvalid(vcpu);
4497
4498 vmx->nested.vmxon_ptr = vmptr;
4499 ret = enter_vmx_operation(vcpu);
4500 if (ret)
4501 return ret;
4502
4503 return nested_vmx_succeed(vcpu);
4504 }
4505
nested_release_vmcs12(struct kvm_vcpu * vcpu)4506 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4507 {
4508 struct vcpu_vmx *vmx = to_vmx(vcpu);
4509
4510 if (vmx->nested.current_vmptr == -1ull)
4511 return;
4512
4513 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
4514
4515 if (enable_shadow_vmcs) {
4516 /* copy to memory all shadowed fields in case
4517 they were modified */
4518 copy_shadow_to_vmcs12(vmx);
4519 vmx_disable_shadow_vmcs(vmx);
4520 }
4521 vmx->nested.posted_intr_nv = -1;
4522
4523 /* Flush VMCS12 to guest memory */
4524 kvm_vcpu_write_guest_page(vcpu,
4525 vmx->nested.current_vmptr >> PAGE_SHIFT,
4526 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4527
4528 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4529
4530 vmx->nested.current_vmptr = -1ull;
4531 }
4532
4533 /* Emulate the VMXOFF instruction */
handle_vmoff(struct kvm_vcpu * vcpu)4534 static int handle_vmoff(struct kvm_vcpu *vcpu)
4535 {
4536 if (!nested_vmx_check_permission(vcpu))
4537 return 1;
4538
4539 free_nested(vcpu);
4540
4541 /* Process a latched INIT during time CPU was in VMX operation */
4542 kvm_make_request(KVM_REQ_EVENT, vcpu);
4543
4544 return nested_vmx_succeed(vcpu);
4545 }
4546
4547 /* Emulate the VMCLEAR instruction */
handle_vmclear(struct kvm_vcpu * vcpu)4548 static int handle_vmclear(struct kvm_vcpu *vcpu)
4549 {
4550 struct vcpu_vmx *vmx = to_vmx(vcpu);
4551 u32 zero = 0;
4552 gpa_t vmptr;
4553 u64 evmcs_gpa;
4554
4555 if (!nested_vmx_check_permission(vcpu))
4556 return 1;
4557
4558 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4559 return 1;
4560
4561 if (!page_address_valid(vcpu, vmptr))
4562 return nested_vmx_failValid(vcpu,
4563 VMXERR_VMCLEAR_INVALID_ADDRESS);
4564
4565 if (vmptr == vmx->nested.vmxon_ptr)
4566 return nested_vmx_failValid(vcpu,
4567 VMXERR_VMCLEAR_VMXON_POINTER);
4568
4569 /*
4570 * When Enlightened VMEntry is enabled on the calling CPU we treat
4571 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
4572 * way to distinguish it from VMCS12) and we must not corrupt it by
4573 * writing to the non-existent 'launch_state' field. The area doesn't
4574 * have to be the currently active EVMCS on the calling CPU and there's
4575 * nothing KVM has to do to transition it from 'active' to 'non-active'
4576 * state. It is possible that the area will stay mapped as
4577 * vmx->nested.hv_evmcs but this shouldn't be a problem.
4578 */
4579 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
4580 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
4581 if (vmptr == vmx->nested.current_vmptr)
4582 nested_release_vmcs12(vcpu);
4583
4584 kvm_vcpu_write_guest(vcpu,
4585 vmptr + offsetof(struct vmcs12,
4586 launch_state),
4587 &zero, sizeof(zero));
4588 }
4589
4590 return nested_vmx_succeed(vcpu);
4591 }
4592
4593 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4594
4595 /* Emulate the VMLAUNCH instruction */
handle_vmlaunch(struct kvm_vcpu * vcpu)4596 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4597 {
4598 return nested_vmx_run(vcpu, true);
4599 }
4600
4601 /* Emulate the VMRESUME instruction */
handle_vmresume(struct kvm_vcpu * vcpu)4602 static int handle_vmresume(struct kvm_vcpu *vcpu)
4603 {
4604
4605 return nested_vmx_run(vcpu, false);
4606 }
4607
handle_vmread(struct kvm_vcpu * vcpu)4608 static int handle_vmread(struct kvm_vcpu *vcpu)
4609 {
4610 unsigned long field;
4611 u64 field_value;
4612 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4613 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4614 int len;
4615 gva_t gva = 0;
4616 struct vmcs12 *vmcs12;
4617 struct x86_exception e;
4618 short offset;
4619
4620 if (!nested_vmx_check_permission(vcpu))
4621 return 1;
4622
4623 if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4624 return nested_vmx_failInvalid(vcpu);
4625
4626 if (!is_guest_mode(vcpu))
4627 vmcs12 = get_vmcs12(vcpu);
4628 else {
4629 /*
4630 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4631 * to shadowed-field sets the ALU flags for VMfailInvalid.
4632 */
4633 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4634 return nested_vmx_failInvalid(vcpu);
4635 vmcs12 = get_shadow_vmcs12(vcpu);
4636 }
4637
4638 /* Decode instruction info and find the field to read */
4639 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4640
4641 offset = vmcs_field_to_offset(field);
4642 if (offset < 0)
4643 return nested_vmx_failValid(vcpu,
4644 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4645
4646 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
4647 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4648
4649 /* Read the field, zero-extended to a u64 field_value */
4650 field_value = vmcs12_read_any(vmcs12, field, offset);
4651
4652 /*
4653 * Now copy part of this value to register or memory, as requested.
4654 * Note that the number of bits actually copied is 32 or 64 depending
4655 * on the guest's mode (32 or 64 bit), not on the given field's length.
4656 */
4657 if (vmx_instruction_info & (1u << 10)) {
4658 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4659 field_value);
4660 } else {
4661 len = is_64_bit_mode(vcpu) ? 8 : 4;
4662 if (get_vmx_mem_address(vcpu, exit_qualification,
4663 vmx_instruction_info, true, len, &gva))
4664 return 1;
4665 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4666 if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
4667 kvm_inject_page_fault(vcpu, &e);
4668 }
4669
4670 return nested_vmx_succeed(vcpu);
4671 }
4672
is_shadow_field_rw(unsigned long field)4673 static bool is_shadow_field_rw(unsigned long field)
4674 {
4675 switch (field) {
4676 #define SHADOW_FIELD_RW(x, y) case x:
4677 #include "vmcs_shadow_fields.h"
4678 return true;
4679 default:
4680 break;
4681 }
4682 return false;
4683 }
4684
is_shadow_field_ro(unsigned long field)4685 static bool is_shadow_field_ro(unsigned long field)
4686 {
4687 switch (field) {
4688 #define SHADOW_FIELD_RO(x, y) case x:
4689 #include "vmcs_shadow_fields.h"
4690 return true;
4691 default:
4692 break;
4693 }
4694 return false;
4695 }
4696
handle_vmwrite(struct kvm_vcpu * vcpu)4697 static int handle_vmwrite(struct kvm_vcpu *vcpu)
4698 {
4699 unsigned long field;
4700 int len;
4701 gva_t gva;
4702 struct vcpu_vmx *vmx = to_vmx(vcpu);
4703 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4704 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4705
4706 /* The value to write might be 32 or 64 bits, depending on L1's long
4707 * mode, and eventually we need to write that into a field of several
4708 * possible lengths. The code below first zero-extends the value to 64
4709 * bit (field_value), and then copies only the appropriate number of
4710 * bits into the vmcs12 field.
4711 */
4712 u64 field_value = 0;
4713 struct x86_exception e;
4714 struct vmcs12 *vmcs12;
4715 short offset;
4716
4717 if (!nested_vmx_check_permission(vcpu))
4718 return 1;
4719
4720 if (vmx->nested.current_vmptr == -1ull)
4721 return nested_vmx_failInvalid(vcpu);
4722
4723 if (vmx_instruction_info & (1u << 10))
4724 field_value = kvm_register_readl(vcpu,
4725 (((vmx_instruction_info) >> 3) & 0xf));
4726 else {
4727 len = is_64_bit_mode(vcpu) ? 8 : 4;
4728 if (get_vmx_mem_address(vcpu, exit_qualification,
4729 vmx_instruction_info, false, len, &gva))
4730 return 1;
4731 if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
4732 kvm_inject_page_fault(vcpu, &e);
4733 return 1;
4734 }
4735 }
4736
4737
4738 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4739 /*
4740 * If the vCPU supports "VMWRITE to any supported field in the
4741 * VMCS," then the "read-only" fields are actually read/write.
4742 */
4743 if (vmcs_field_readonly(field) &&
4744 !nested_cpu_has_vmwrite_any_field(vcpu))
4745 return nested_vmx_failValid(vcpu,
4746 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4747
4748 if (!is_guest_mode(vcpu)) {
4749 vmcs12 = get_vmcs12(vcpu);
4750
4751 /*
4752 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
4753 * vmcs12, else we may crush a field or consume a stale value.
4754 */
4755 if (!is_shadow_field_rw(field))
4756 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4757 } else {
4758 /*
4759 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4760 * to shadowed-field sets the ALU flags for VMfailInvalid.
4761 */
4762 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4763 return nested_vmx_failInvalid(vcpu);
4764 vmcs12 = get_shadow_vmcs12(vcpu);
4765 }
4766
4767 offset = vmcs_field_to_offset(field);
4768 if (offset < 0)
4769 return nested_vmx_failValid(vcpu,
4770 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4771
4772 /*
4773 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
4774 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
4775 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
4776 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
4777 * from L1 will return a different value than VMREAD from L2 (L1 sees
4778 * the stripped down value, L2 sees the full value as stored by KVM).
4779 */
4780 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
4781 field_value &= 0x1f0ff;
4782
4783 vmcs12_write_any(vmcs12, field, offset, field_value);
4784
4785 /*
4786 * Do not track vmcs12 dirty-state if in guest-mode as we actually
4787 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
4788 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
4789 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
4790 */
4791 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
4792 /*
4793 * L1 can read these fields without exiting, ensure the
4794 * shadow VMCS is up-to-date.
4795 */
4796 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
4797 preempt_disable();
4798 vmcs_load(vmx->vmcs01.shadow_vmcs);
4799
4800 __vmcs_writel(field, field_value);
4801
4802 vmcs_clear(vmx->vmcs01.shadow_vmcs);
4803 vmcs_load(vmx->loaded_vmcs->vmcs);
4804 preempt_enable();
4805 }
4806 vmx->nested.dirty_vmcs12 = true;
4807 }
4808
4809 return nested_vmx_succeed(vcpu);
4810 }
4811
set_current_vmptr(struct vcpu_vmx * vmx,gpa_t vmptr)4812 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4813 {
4814 vmx->nested.current_vmptr = vmptr;
4815 if (enable_shadow_vmcs) {
4816 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
4817 vmcs_write64(VMCS_LINK_POINTER,
4818 __pa(vmx->vmcs01.shadow_vmcs));
4819 vmx->nested.need_vmcs12_to_shadow_sync = true;
4820 }
4821 vmx->nested.dirty_vmcs12 = true;
4822 }
4823
4824 /* Emulate the VMPTRLD instruction */
handle_vmptrld(struct kvm_vcpu * vcpu)4825 static int handle_vmptrld(struct kvm_vcpu *vcpu)
4826 {
4827 struct vcpu_vmx *vmx = to_vmx(vcpu);
4828 gpa_t vmptr;
4829
4830 if (!nested_vmx_check_permission(vcpu))
4831 return 1;
4832
4833 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4834 return 1;
4835
4836 if (!page_address_valid(vcpu, vmptr))
4837 return nested_vmx_failValid(vcpu,
4838 VMXERR_VMPTRLD_INVALID_ADDRESS);
4839
4840 if (vmptr == vmx->nested.vmxon_ptr)
4841 return nested_vmx_failValid(vcpu,
4842 VMXERR_VMPTRLD_VMXON_POINTER);
4843
4844 /* Forbid normal VMPTRLD if Enlightened version was used */
4845 if (vmx->nested.hv_evmcs)
4846 return 1;
4847
4848 if (vmx->nested.current_vmptr != vmptr) {
4849 struct kvm_host_map map;
4850 struct vmcs12 *new_vmcs12;
4851
4852 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
4853 /*
4854 * Reads from an unbacked page return all 1s,
4855 * which means that the 32 bits located at the
4856 * given physical address won't match the required
4857 * VMCS12_REVISION identifier.
4858 */
4859 return nested_vmx_failValid(vcpu,
4860 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4861 }
4862
4863 new_vmcs12 = map.hva;
4864
4865 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4866 (new_vmcs12->hdr.shadow_vmcs &&
4867 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
4868 kvm_vcpu_unmap(vcpu, &map, false);
4869 return nested_vmx_failValid(vcpu,
4870 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4871 }
4872
4873 nested_release_vmcs12(vcpu);
4874
4875 /*
4876 * Load VMCS12 from guest memory since it is not already
4877 * cached.
4878 */
4879 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
4880 kvm_vcpu_unmap(vcpu, &map, false);
4881
4882 set_current_vmptr(vmx, vmptr);
4883 }
4884
4885 return nested_vmx_succeed(vcpu);
4886 }
4887
4888 /* Emulate the VMPTRST instruction */
handle_vmptrst(struct kvm_vcpu * vcpu)4889 static int handle_vmptrst(struct kvm_vcpu *vcpu)
4890 {
4891 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4892 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4893 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4894 struct x86_exception e;
4895 gva_t gva;
4896
4897 if (!nested_vmx_check_permission(vcpu))
4898 return 1;
4899
4900 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4901 return 1;
4902
4903 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
4904 true, sizeof(gpa_t), &gva))
4905 return 1;
4906 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4907 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
4908 sizeof(gpa_t), &e)) {
4909 kvm_inject_page_fault(vcpu, &e);
4910 return 1;
4911 }
4912 return nested_vmx_succeed(vcpu);
4913 }
4914
4915 /* Emulate the INVEPT instruction */
handle_invept(struct kvm_vcpu * vcpu)4916 static int handle_invept(struct kvm_vcpu *vcpu)
4917 {
4918 struct vcpu_vmx *vmx = to_vmx(vcpu);
4919 u32 vmx_instruction_info, types;
4920 unsigned long type;
4921 gva_t gva;
4922 struct x86_exception e;
4923 struct {
4924 u64 eptp, gpa;
4925 } operand;
4926
4927 if (!(vmx->nested.msrs.secondary_ctls_high &
4928 SECONDARY_EXEC_ENABLE_EPT) ||
4929 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4930 kvm_queue_exception(vcpu, UD_VECTOR);
4931 return 1;
4932 }
4933
4934 if (!nested_vmx_check_permission(vcpu))
4935 return 1;
4936
4937 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4938 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4939
4940 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4941
4942 if (type >= 32 || !(types & (1 << type)))
4943 return nested_vmx_failValid(vcpu,
4944 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4945
4946 /* According to the Intel VMX instruction reference, the memory
4947 * operand is read even if it isn't needed (e.g., for type==global)
4948 */
4949 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4950 vmx_instruction_info, false, sizeof(operand), &gva))
4951 return 1;
4952 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4953 kvm_inject_page_fault(vcpu, &e);
4954 return 1;
4955 }
4956
4957 switch (type) {
4958 case VMX_EPT_EXTENT_GLOBAL:
4959 case VMX_EPT_EXTENT_CONTEXT:
4960 /*
4961 * TODO: Sync the necessary shadow EPT roots here, rather than
4962 * at the next emulated VM-entry.
4963 */
4964 break;
4965 default:
4966 BUG_ON(1);
4967 break;
4968 }
4969
4970 return nested_vmx_succeed(vcpu);
4971 }
4972
handle_invvpid(struct kvm_vcpu * vcpu)4973 static int handle_invvpid(struct kvm_vcpu *vcpu)
4974 {
4975 struct vcpu_vmx *vmx = to_vmx(vcpu);
4976 u32 vmx_instruction_info;
4977 unsigned long type, types;
4978 gva_t gva;
4979 struct x86_exception e;
4980 struct {
4981 u64 vpid;
4982 u64 gla;
4983 } operand;
4984 u16 vpid02;
4985
4986 if (!(vmx->nested.msrs.secondary_ctls_high &
4987 SECONDARY_EXEC_ENABLE_VPID) ||
4988 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4989 kvm_queue_exception(vcpu, UD_VECTOR);
4990 return 1;
4991 }
4992
4993 if (!nested_vmx_check_permission(vcpu))
4994 return 1;
4995
4996 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4997 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4998
4999 types = (vmx->nested.msrs.vpid_caps &
5000 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5001
5002 if (type >= 32 || !(types & (1 << type)))
5003 return nested_vmx_failValid(vcpu,
5004 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5005
5006 /* according to the intel vmx instruction reference, the memory
5007 * operand is read even if it isn't needed (e.g., for type==global)
5008 */
5009 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5010 vmx_instruction_info, false, sizeof(operand), &gva))
5011 return 1;
5012 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
5013 kvm_inject_page_fault(vcpu, &e);
5014 return 1;
5015 }
5016 if (operand.vpid >> 16)
5017 return nested_vmx_failValid(vcpu,
5018 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5019
5020 vpid02 = nested_get_vpid02(vcpu);
5021 switch (type) {
5022 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5023 if (!operand.vpid ||
5024 is_noncanonical_address(operand.gla, vcpu))
5025 return nested_vmx_failValid(vcpu,
5026 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5027 if (cpu_has_vmx_invvpid_individual_addr()) {
5028 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
5029 vpid02, operand.gla);
5030 } else
5031 __vmx_flush_tlb(vcpu, vpid02, false);
5032 break;
5033 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5034 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5035 if (!operand.vpid)
5036 return nested_vmx_failValid(vcpu,
5037 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5038 __vmx_flush_tlb(vcpu, vpid02, false);
5039 break;
5040 case VMX_VPID_EXTENT_ALL_CONTEXT:
5041 __vmx_flush_tlb(vcpu, vpid02, false);
5042 break;
5043 default:
5044 WARN_ON_ONCE(1);
5045 return kvm_skip_emulated_instruction(vcpu);
5046 }
5047
5048 return nested_vmx_succeed(vcpu);
5049 }
5050
nested_vmx_eptp_switching(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5051 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5052 struct vmcs12 *vmcs12)
5053 {
5054 u32 index = kvm_rcx_read(vcpu);
5055 u64 address;
5056 bool accessed_dirty;
5057 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
5058
5059 if (!nested_cpu_has_eptp_switching(vmcs12) ||
5060 !nested_cpu_has_ept(vmcs12))
5061 return 1;
5062
5063 if (index >= VMFUNC_EPTP_ENTRIES)
5064 return 1;
5065
5066
5067 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5068 &address, index * 8, 8))
5069 return 1;
5070
5071 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
5072
5073 /*
5074 * If the (L2) guest does a vmfunc to the currently
5075 * active ept pointer, we don't have to do anything else
5076 */
5077 if (vmcs12->ept_pointer != address) {
5078 if (!valid_ept_address(vcpu, address))
5079 return 1;
5080
5081 kvm_mmu_unload(vcpu);
5082 mmu->ept_ad = accessed_dirty;
5083 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
5084 vmcs12->ept_pointer = address;
5085 /*
5086 * TODO: Check what's the correct approach in case
5087 * mmu reload fails. Currently, we just let the next
5088 * reload potentially fail
5089 */
5090 kvm_mmu_reload(vcpu);
5091 }
5092
5093 return 0;
5094 }
5095
handle_vmfunc(struct kvm_vcpu * vcpu)5096 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5097 {
5098 struct vcpu_vmx *vmx = to_vmx(vcpu);
5099 struct vmcs12 *vmcs12;
5100 u32 function = kvm_rax_read(vcpu);
5101
5102 /*
5103 * VMFUNC is only supported for nested guests, but we always enable the
5104 * secondary control for simplicity; for non-nested mode, fake that we
5105 * didn't by injecting #UD.
5106 */
5107 if (!is_guest_mode(vcpu)) {
5108 kvm_queue_exception(vcpu, UD_VECTOR);
5109 return 1;
5110 }
5111
5112 vmcs12 = get_vmcs12(vcpu);
5113 if ((vmcs12->vm_function_control & (1 << function)) == 0)
5114 goto fail;
5115
5116 switch (function) {
5117 case 0:
5118 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5119 goto fail;
5120 break;
5121 default:
5122 goto fail;
5123 }
5124 return kvm_skip_emulated_instruction(vcpu);
5125
5126 fail:
5127 nested_vmx_vmexit(vcpu, vmx->exit_reason,
5128 vmcs_read32(VM_EXIT_INTR_INFO),
5129 vmcs_readl(EXIT_QUALIFICATION));
5130 return 1;
5131 }
5132
5133
nested_vmx_exit_handled_io(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5134 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5135 struct vmcs12 *vmcs12)
5136 {
5137 unsigned long exit_qualification;
5138 gpa_t bitmap, last_bitmap;
5139 unsigned int port;
5140 int size;
5141 u8 b;
5142
5143 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5144 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5145
5146 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5147
5148 port = exit_qualification >> 16;
5149 size = (exit_qualification & 7) + 1;
5150
5151 last_bitmap = (gpa_t)-1;
5152 b = -1;
5153
5154 while (size > 0) {
5155 if (port < 0x8000)
5156 bitmap = vmcs12->io_bitmap_a;
5157 else if (port < 0x10000)
5158 bitmap = vmcs12->io_bitmap_b;
5159 else
5160 return true;
5161 bitmap += (port & 0x7fff) / 8;
5162
5163 if (last_bitmap != bitmap)
5164 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5165 return true;
5166 if (b & (1 << (port & 7)))
5167 return true;
5168
5169 port++;
5170 size--;
5171 last_bitmap = bitmap;
5172 }
5173
5174 return false;
5175 }
5176
5177 /*
5178 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
5179 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5180 * disinterest in the current event (read or write a specific MSR) by using an
5181 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5182 */
nested_vmx_exit_handled_msr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 exit_reason)5183 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5184 struct vmcs12 *vmcs12, u32 exit_reason)
5185 {
5186 u32 msr_index = kvm_rcx_read(vcpu);
5187 gpa_t bitmap;
5188
5189 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5190 return true;
5191
5192 /*
5193 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5194 * for the four combinations of read/write and low/high MSR numbers.
5195 * First we need to figure out which of the four to use:
5196 */
5197 bitmap = vmcs12->msr_bitmap;
5198 if (exit_reason == EXIT_REASON_MSR_WRITE)
5199 bitmap += 2048;
5200 if (msr_index >= 0xc0000000) {
5201 msr_index -= 0xc0000000;
5202 bitmap += 1024;
5203 }
5204
5205 /* Then read the msr_index'th bit from this bitmap: */
5206 if (msr_index < 1024*8) {
5207 unsigned char b;
5208 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5209 return true;
5210 return 1 & (b >> (msr_index & 7));
5211 } else
5212 return true; /* let L1 handle the wrong parameter */
5213 }
5214
5215 /*
5216 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5217 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5218 * intercept (via guest_host_mask etc.) the current event.
5219 */
nested_vmx_exit_handled_cr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5220 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5221 struct vmcs12 *vmcs12)
5222 {
5223 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5224 int cr = exit_qualification & 15;
5225 int reg;
5226 unsigned long val;
5227
5228 switch ((exit_qualification >> 4) & 3) {
5229 case 0: /* mov to cr */
5230 reg = (exit_qualification >> 8) & 15;
5231 val = kvm_register_readl(vcpu, reg);
5232 switch (cr) {
5233 case 0:
5234 if (vmcs12->cr0_guest_host_mask &
5235 (val ^ vmcs12->cr0_read_shadow))
5236 return true;
5237 break;
5238 case 3:
5239 if ((vmcs12->cr3_target_count >= 1 &&
5240 vmcs12->cr3_target_value0 == val) ||
5241 (vmcs12->cr3_target_count >= 2 &&
5242 vmcs12->cr3_target_value1 == val) ||
5243 (vmcs12->cr3_target_count >= 3 &&
5244 vmcs12->cr3_target_value2 == val) ||
5245 (vmcs12->cr3_target_count >= 4 &&
5246 vmcs12->cr3_target_value3 == val))
5247 return false;
5248 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5249 return true;
5250 break;
5251 case 4:
5252 if (vmcs12->cr4_guest_host_mask &
5253 (vmcs12->cr4_read_shadow ^ val))
5254 return true;
5255 break;
5256 case 8:
5257 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5258 return true;
5259 break;
5260 }
5261 break;
5262 case 2: /* clts */
5263 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5264 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5265 return true;
5266 break;
5267 case 1: /* mov from cr */
5268 switch (cr) {
5269 case 3:
5270 if (vmcs12->cpu_based_vm_exec_control &
5271 CPU_BASED_CR3_STORE_EXITING)
5272 return true;
5273 break;
5274 case 8:
5275 if (vmcs12->cpu_based_vm_exec_control &
5276 CPU_BASED_CR8_STORE_EXITING)
5277 return true;
5278 break;
5279 }
5280 break;
5281 case 3: /* lmsw */
5282 /*
5283 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5284 * cr0. Other attempted changes are ignored, with no exit.
5285 */
5286 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5287 if (vmcs12->cr0_guest_host_mask & 0xe &
5288 (val ^ vmcs12->cr0_read_shadow))
5289 return true;
5290 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5291 !(vmcs12->cr0_read_shadow & 0x1) &&
5292 (val & 0x1))
5293 return true;
5294 break;
5295 }
5296 return false;
5297 }
5298
nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,gpa_t bitmap)5299 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5300 struct vmcs12 *vmcs12, gpa_t bitmap)
5301 {
5302 u32 vmx_instruction_info;
5303 unsigned long field;
5304 u8 b;
5305
5306 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5307 return true;
5308
5309 /* Decode instruction info and find the field to access */
5310 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5311 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5312
5313 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5314 if (field >> 15)
5315 return true;
5316
5317 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5318 return true;
5319
5320 return 1 & (b >> (field & 7));
5321 }
5322
5323 /*
5324 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5325 * should handle it ourselves in L0 (and then continue L2). Only call this
5326 * when in is_guest_mode (L2).
5327 */
nested_vmx_exit_reflected(struct kvm_vcpu * vcpu,u32 exit_reason)5328 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
5329 {
5330 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5331 struct vcpu_vmx *vmx = to_vmx(vcpu);
5332 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5333
5334 if (vmx->nested.nested_run_pending)
5335 return false;
5336
5337 if (unlikely(vmx->fail)) {
5338 trace_kvm_nested_vmenter_failed(
5339 "hardware VM-instruction error: ",
5340 vmcs_read32(VM_INSTRUCTION_ERROR));
5341 return true;
5342 }
5343
5344 /*
5345 * The host physical addresses of some pages of guest memory
5346 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5347 * Page). The CPU may write to these pages via their host
5348 * physical address while L2 is running, bypassing any
5349 * address-translation-based dirty tracking (e.g. EPT write
5350 * protection).
5351 *
5352 * Mark them dirty on every exit from L2 to prevent them from
5353 * getting out of sync with dirty tracking.
5354 */
5355 nested_mark_vmcs12_pages_dirty(vcpu);
5356
5357 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5358 vmcs_readl(EXIT_QUALIFICATION),
5359 vmx->idt_vectoring_info,
5360 intr_info,
5361 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5362 KVM_ISA_VMX);
5363
5364 switch (exit_reason) {
5365 case EXIT_REASON_EXCEPTION_NMI:
5366 if (is_nmi(intr_info))
5367 return false;
5368 else if (is_page_fault(intr_info))
5369 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5370 else if (is_debug(intr_info) &&
5371 vcpu->guest_debug &
5372 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5373 return false;
5374 else if (is_breakpoint(intr_info) &&
5375 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5376 return false;
5377 return vmcs12->exception_bitmap &
5378 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5379 case EXIT_REASON_EXTERNAL_INTERRUPT:
5380 return false;
5381 case EXIT_REASON_TRIPLE_FAULT:
5382 return true;
5383 case EXIT_REASON_PENDING_INTERRUPT:
5384 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5385 case EXIT_REASON_NMI_WINDOW:
5386 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5387 case EXIT_REASON_TASK_SWITCH:
5388 return true;
5389 case EXIT_REASON_CPUID:
5390 return true;
5391 case EXIT_REASON_HLT:
5392 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5393 case EXIT_REASON_INVD:
5394 return true;
5395 case EXIT_REASON_INVLPG:
5396 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5397 case EXIT_REASON_RDPMC:
5398 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5399 case EXIT_REASON_RDRAND:
5400 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5401 case EXIT_REASON_RDSEED:
5402 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5403 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5404 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5405 case EXIT_REASON_VMREAD:
5406 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5407 vmcs12->vmread_bitmap);
5408 case EXIT_REASON_VMWRITE:
5409 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5410 vmcs12->vmwrite_bitmap);
5411 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5412 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5413 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5414 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5415 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5416 /*
5417 * VMX instructions trap unconditionally. This allows L1 to
5418 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5419 */
5420 return true;
5421 case EXIT_REASON_CR_ACCESS:
5422 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5423 case EXIT_REASON_DR_ACCESS:
5424 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5425 case EXIT_REASON_IO_INSTRUCTION:
5426 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5427 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5428 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5429 case EXIT_REASON_MSR_READ:
5430 case EXIT_REASON_MSR_WRITE:
5431 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5432 case EXIT_REASON_INVALID_STATE:
5433 return true;
5434 case EXIT_REASON_MWAIT_INSTRUCTION:
5435 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5436 case EXIT_REASON_MONITOR_TRAP_FLAG:
5437 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5438 case EXIT_REASON_MONITOR_INSTRUCTION:
5439 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5440 case EXIT_REASON_PAUSE_INSTRUCTION:
5441 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5442 nested_cpu_has2(vmcs12,
5443 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5444 case EXIT_REASON_MCE_DURING_VMENTRY:
5445 return false;
5446 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5447 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5448 case EXIT_REASON_APIC_ACCESS:
5449 case EXIT_REASON_APIC_WRITE:
5450 case EXIT_REASON_EOI_INDUCED:
5451 /*
5452 * The controls for "virtualize APIC accesses," "APIC-
5453 * register virtualization," and "virtual-interrupt
5454 * delivery" only come from vmcs12.
5455 */
5456 return true;
5457 case EXIT_REASON_EPT_VIOLATION:
5458 /*
5459 * L0 always deals with the EPT violation. If nested EPT is
5460 * used, and the nested mmu code discovers that the address is
5461 * missing in the guest EPT table (EPT12), the EPT violation
5462 * will be injected with nested_ept_inject_page_fault()
5463 */
5464 return false;
5465 case EXIT_REASON_EPT_MISCONFIG:
5466 /*
5467 * L2 never uses directly L1's EPT, but rather L0's own EPT
5468 * table (shadow on EPT) or a merged EPT table that L0 built
5469 * (EPT on EPT). So any problems with the structure of the
5470 * table is L0's fault.
5471 */
5472 return false;
5473 case EXIT_REASON_INVPCID:
5474 return
5475 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5476 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5477 case EXIT_REASON_WBINVD:
5478 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5479 case EXIT_REASON_XSETBV:
5480 return true;
5481 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5482 /*
5483 * This should never happen, since it is not possible to
5484 * set XSS to a non-zero value---neither in L1 nor in L2.
5485 * If if it were, XSS would have to be checked against
5486 * the XSS exit bitmap in vmcs12.
5487 */
5488 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5489 case EXIT_REASON_PREEMPTION_TIMER:
5490 return false;
5491 case EXIT_REASON_PML_FULL:
5492 /* We emulate PML support to L1. */
5493 return false;
5494 case EXIT_REASON_VMFUNC:
5495 /* VM functions are emulated through L2->L0 vmexits. */
5496 return false;
5497 case EXIT_REASON_ENCLS:
5498 /* SGX is never exposed to L1 */
5499 return false;
5500 case EXIT_REASON_UMWAIT:
5501 case EXIT_REASON_TPAUSE:
5502 return nested_cpu_has2(vmcs12,
5503 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
5504 default:
5505 return true;
5506 }
5507 }
5508
5509
vmx_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)5510 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5511 struct kvm_nested_state __user *user_kvm_nested_state,
5512 u32 user_data_size)
5513 {
5514 struct vcpu_vmx *vmx;
5515 struct vmcs12 *vmcs12;
5516 struct kvm_nested_state kvm_state = {
5517 .flags = 0,
5518 .format = KVM_STATE_NESTED_FORMAT_VMX,
5519 .size = sizeof(kvm_state),
5520 .hdr.vmx.vmxon_pa = -1ull,
5521 .hdr.vmx.vmcs12_pa = -1ull,
5522 };
5523 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
5524 &user_kvm_nested_state->data.vmx[0];
5525
5526 if (!vcpu)
5527 return kvm_state.size + sizeof(*user_vmx_nested_state);
5528
5529 vmx = to_vmx(vcpu);
5530 vmcs12 = get_vmcs12(vcpu);
5531
5532 if (nested_vmx_allowed(vcpu) &&
5533 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5534 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5535 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
5536
5537 if (vmx_has_valid_vmcs12(vcpu)) {
5538 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
5539
5540 if (vmx->nested.hv_evmcs)
5541 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5542
5543 if (is_guest_mode(vcpu) &&
5544 nested_cpu_has_shadow_vmcs(vmcs12) &&
5545 vmcs12->vmcs_link_pointer != -1ull)
5546 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
5547 }
5548
5549 if (vmx->nested.smm.vmxon)
5550 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5551
5552 if (vmx->nested.smm.guest_mode)
5553 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5554
5555 if (is_guest_mode(vcpu)) {
5556 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5557
5558 if (vmx->nested.nested_run_pending)
5559 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5560 }
5561 }
5562
5563 if (user_data_size < kvm_state.size)
5564 goto out;
5565
5566 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5567 return -EFAULT;
5568
5569 if (!vmx_has_valid_vmcs12(vcpu))
5570 goto out;
5571
5572 /*
5573 * When running L2, the authoritative vmcs12 state is in the
5574 * vmcs02. When running L1, the authoritative vmcs12 state is
5575 * in the shadow or enlightened vmcs linked to vmcs01, unless
5576 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
5577 * vmcs12 state is in the vmcs12 already.
5578 */
5579 if (is_guest_mode(vcpu)) {
5580 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
5581 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5582 } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
5583 if (vmx->nested.hv_evmcs)
5584 copy_enlightened_to_vmcs12(vmx);
5585 else if (enable_shadow_vmcs)
5586 copy_shadow_to_vmcs12(vmx);
5587 }
5588
5589 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
5590 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
5591
5592 /*
5593 * Copy over the full allocated size of vmcs12 rather than just the size
5594 * of the struct.
5595 */
5596 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
5597 return -EFAULT;
5598
5599 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5600 vmcs12->vmcs_link_pointer != -1ull) {
5601 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
5602 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5603 return -EFAULT;
5604 }
5605
5606 out:
5607 return kvm_state.size;
5608 }
5609
5610 /*
5611 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5612 */
vmx_leave_nested(struct kvm_vcpu * vcpu)5613 void vmx_leave_nested(struct kvm_vcpu *vcpu)
5614 {
5615 if (is_guest_mode(vcpu)) {
5616 to_vmx(vcpu)->nested.nested_run_pending = 0;
5617 nested_vmx_vmexit(vcpu, -1, 0, 0);
5618 }
5619 free_nested(vcpu);
5620 }
5621
vmx_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)5622 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5623 struct kvm_nested_state __user *user_kvm_nested_state,
5624 struct kvm_nested_state *kvm_state)
5625 {
5626 struct vcpu_vmx *vmx = to_vmx(vcpu);
5627 struct vmcs12 *vmcs12;
5628 u32 exit_qual;
5629 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
5630 &user_kvm_nested_state->data.vmx[0];
5631 int ret;
5632
5633 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
5634 return -EINVAL;
5635
5636 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
5637 if (kvm_state->hdr.vmx.smm.flags)
5638 return -EINVAL;
5639
5640 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
5641 return -EINVAL;
5642
5643 /*
5644 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
5645 * enable eVMCS capability on vCPU. However, since then
5646 * code was changed such that flag signals vmcs12 should
5647 * be copied into eVMCS in guest memory.
5648 *
5649 * To preserve backwards compatability, allow user
5650 * to set this flag even when there is no VMXON region.
5651 */
5652 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
5653 return -EINVAL;
5654 } else {
5655 if (!nested_vmx_allowed(vcpu))
5656 return -EINVAL;
5657
5658 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
5659 return -EINVAL;
5660 }
5661
5662 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5663 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5664 return -EINVAL;
5665
5666 if (kvm_state->hdr.vmx.smm.flags &
5667 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5668 return -EINVAL;
5669
5670 /*
5671 * SMM temporarily disables VMX, so we cannot be in guest mode,
5672 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5673 * must be zero.
5674 */
5675 if (is_smm(vcpu) ?
5676 (kvm_state->flags &
5677 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
5678 : kvm_state->hdr.vmx.smm.flags)
5679 return -EINVAL;
5680
5681 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5682 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5683 return -EINVAL;
5684
5685 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
5686 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
5687 return -EINVAL;
5688
5689 vmx_leave_nested(vcpu);
5690
5691 if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
5692 return 0;
5693
5694 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
5695 ret = enter_vmx_operation(vcpu);
5696 if (ret)
5697 return ret;
5698
5699 /* Empty 'VMXON' state is permitted */
5700 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
5701 return 0;
5702
5703 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
5704 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
5705 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
5706 return -EINVAL;
5707
5708 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
5709 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5710 /*
5711 * Sync eVMCS upon entry as we may not have
5712 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5713 */
5714 vmx->nested.need_vmcs12_to_shadow_sync = true;
5715 } else {
5716 return -EINVAL;
5717 }
5718
5719 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5720 vmx->nested.smm.vmxon = true;
5721 vmx->nested.vmxon = false;
5722
5723 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5724 vmx->nested.smm.guest_mode = true;
5725 }
5726
5727 vmcs12 = get_vmcs12(vcpu);
5728 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
5729 return -EFAULT;
5730
5731 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5732 return -EINVAL;
5733
5734 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5735 return 0;
5736
5737 vmx->nested.nested_run_pending =
5738 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5739
5740 ret = -EINVAL;
5741 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5742 vmcs12->vmcs_link_pointer != -1ull) {
5743 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5744
5745 if (kvm_state->size <
5746 sizeof(*kvm_state) +
5747 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
5748 goto error_guest_mode;
5749
5750 if (copy_from_user(shadow_vmcs12,
5751 user_vmx_nested_state->shadow_vmcs12,
5752 sizeof(*shadow_vmcs12))) {
5753 ret = -EFAULT;
5754 goto error_guest_mode;
5755 }
5756
5757 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5758 !shadow_vmcs12->hdr.shadow_vmcs)
5759 goto error_guest_mode;
5760 }
5761
5762 if (nested_vmx_check_controls(vcpu, vmcs12) ||
5763 nested_vmx_check_host_state(vcpu, vmcs12) ||
5764 nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
5765 goto error_guest_mode;
5766
5767 vmx->nested.dirty_vmcs12 = true;
5768 ret = nested_vmx_enter_non_root_mode(vcpu, false);
5769 if (ret)
5770 goto error_guest_mode;
5771
5772 return 0;
5773
5774 error_guest_mode:
5775 vmx->nested.nested_run_pending = 0;
5776 return ret;
5777 }
5778
nested_vmx_vcpu_setup(void)5779 void nested_vmx_vcpu_setup(void)
5780 {
5781 if (enable_shadow_vmcs) {
5782 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
5783 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
5784 }
5785 }
5786
5787 /*
5788 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5789 * returned for the various VMX controls MSRs when nested VMX is enabled.
5790 * The same values should also be used to verify that vmcs12 control fields are
5791 * valid during nested entry from L1 to L2.
5792 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5793 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5794 * bit in the high half is on if the corresponding bit in the control field
5795 * may be on. See also vmx_control_verify().
5796 */
nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs * msrs,u32 ept_caps,bool apicv)5797 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5798 bool apicv)
5799 {
5800 /*
5801 * Note that as a general rule, the high half of the MSRs (bits in
5802 * the control fields which may be 1) should be initialized by the
5803 * intersection of the underlying hardware's MSR (i.e., features which
5804 * can be supported) and the list of features we want to expose -
5805 * because they are known to be properly supported in our code.
5806 * Also, usually, the low half of the MSRs (bits which must be 1) can
5807 * be set to 0, meaning that L1 may turn off any of these bits. The
5808 * reason is that if one of these bits is necessary, it will appear
5809 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5810 * fields of vmcs01 and vmcs02, will turn these bits off - and
5811 * nested_vmx_exit_reflected() will not pass related exits to L1.
5812 * These rules have exceptions below.
5813 */
5814
5815 /* pin-based controls */
5816 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5817 msrs->pinbased_ctls_low,
5818 msrs->pinbased_ctls_high);
5819 msrs->pinbased_ctls_low |=
5820 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5821 msrs->pinbased_ctls_high &=
5822 PIN_BASED_EXT_INTR_MASK |
5823 PIN_BASED_NMI_EXITING |
5824 PIN_BASED_VIRTUAL_NMIS |
5825 (apicv ? PIN_BASED_POSTED_INTR : 0);
5826 msrs->pinbased_ctls_high |=
5827 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5828 PIN_BASED_VMX_PREEMPTION_TIMER;
5829
5830 /* exit controls */
5831 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5832 msrs->exit_ctls_low,
5833 msrs->exit_ctls_high);
5834 msrs->exit_ctls_low =
5835 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5836
5837 msrs->exit_ctls_high &=
5838 #ifdef CONFIG_X86_64
5839 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5840 #endif
5841 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5842 msrs->exit_ctls_high |=
5843 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5844 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5845 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5846
5847 /* We support free control of debug control saving. */
5848 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5849
5850 /* entry controls */
5851 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5852 msrs->entry_ctls_low,
5853 msrs->entry_ctls_high);
5854 msrs->entry_ctls_low =
5855 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5856 msrs->entry_ctls_high &=
5857 #ifdef CONFIG_X86_64
5858 VM_ENTRY_IA32E_MODE |
5859 #endif
5860 VM_ENTRY_LOAD_IA32_PAT;
5861 msrs->entry_ctls_high |=
5862 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5863
5864 /* We support free control of debug control loading. */
5865 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5866
5867 /* cpu-based controls */
5868 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5869 msrs->procbased_ctls_low,
5870 msrs->procbased_ctls_high);
5871 msrs->procbased_ctls_low =
5872 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5873 msrs->procbased_ctls_high &=
5874 CPU_BASED_VIRTUAL_INTR_PENDING |
5875 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5876 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5877 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5878 CPU_BASED_CR3_STORE_EXITING |
5879 #ifdef CONFIG_X86_64
5880 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5881 #endif
5882 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5883 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5884 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5885 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5886 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5887 /*
5888 * We can allow some features even when not supported by the
5889 * hardware. For example, L1 can specify an MSR bitmap - and we
5890 * can use it to avoid exits to L1 - even when L0 runs L2
5891 * without MSR bitmaps.
5892 */
5893 msrs->procbased_ctls_high |=
5894 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5895 CPU_BASED_USE_MSR_BITMAPS;
5896
5897 /* We support free control of CR3 access interception. */
5898 msrs->procbased_ctls_low &=
5899 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5900
5901 /*
5902 * secondary cpu-based controls. Do not include those that
5903 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5904 */
5905 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5906 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5907 msrs->secondary_ctls_low,
5908 msrs->secondary_ctls_high);
5909
5910 msrs->secondary_ctls_low = 0;
5911 msrs->secondary_ctls_high &=
5912 SECONDARY_EXEC_DESC |
5913 SECONDARY_EXEC_RDTSCP |
5914 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5915 SECONDARY_EXEC_WBINVD_EXITING |
5916 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5917 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5918 SECONDARY_EXEC_RDRAND_EXITING |
5919 SECONDARY_EXEC_ENABLE_INVPCID |
5920 SECONDARY_EXEC_RDSEED_EXITING |
5921 SECONDARY_EXEC_XSAVES;
5922
5923 /*
5924 * We can emulate "VMCS shadowing," even if the hardware
5925 * doesn't support it.
5926 */
5927 msrs->secondary_ctls_high |=
5928 SECONDARY_EXEC_SHADOW_VMCS;
5929
5930 if (enable_ept) {
5931 /* nested EPT: emulate EPT also to L1 */
5932 msrs->secondary_ctls_high |=
5933 SECONDARY_EXEC_ENABLE_EPT;
5934 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5935 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5936 if (cpu_has_vmx_ept_execute_only())
5937 msrs->ept_caps |=
5938 VMX_EPT_EXECUTE_ONLY_BIT;
5939 msrs->ept_caps &= ept_caps;
5940 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5941 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5942 VMX_EPT_1GB_PAGE_BIT;
5943 if (enable_ept_ad_bits) {
5944 msrs->secondary_ctls_high |=
5945 SECONDARY_EXEC_ENABLE_PML;
5946 msrs->ept_caps |= VMX_EPT_AD_BIT;
5947 }
5948 }
5949
5950 if (cpu_has_vmx_vmfunc()) {
5951 msrs->secondary_ctls_high |=
5952 SECONDARY_EXEC_ENABLE_VMFUNC;
5953 /*
5954 * Advertise EPTP switching unconditionally
5955 * since we emulate it
5956 */
5957 if (enable_ept)
5958 msrs->vmfunc_controls =
5959 VMX_VMFUNC_EPTP_SWITCHING;
5960 }
5961
5962 /*
5963 * Old versions of KVM use the single-context version without
5964 * checking for support, so declare that it is supported even
5965 * though it is treated as global context. The alternative is
5966 * not failing the single-context invvpid, and it is worse.
5967 */
5968 if (enable_vpid) {
5969 msrs->secondary_ctls_high |=
5970 SECONDARY_EXEC_ENABLE_VPID;
5971 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5972 VMX_VPID_EXTENT_SUPPORTED_MASK;
5973 }
5974
5975 if (enable_unrestricted_guest)
5976 msrs->secondary_ctls_high |=
5977 SECONDARY_EXEC_UNRESTRICTED_GUEST;
5978
5979 if (flexpriority_enabled)
5980 msrs->secondary_ctls_high |=
5981 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5982
5983 /* miscellaneous data */
5984 rdmsr(MSR_IA32_VMX_MISC,
5985 msrs->misc_low,
5986 msrs->misc_high);
5987 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5988 msrs->misc_low |=
5989 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5990 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5991 VMX_MISC_ACTIVITY_HLT;
5992 msrs->misc_high = 0;
5993
5994 /*
5995 * This MSR reports some information about VMX support. We
5996 * should return information about the VMX we emulate for the
5997 * guest, and the VMCS structure we give it - not about the
5998 * VMX support of the underlying hardware.
5999 */
6000 msrs->basic =
6001 VMCS12_REVISION |
6002 VMX_BASIC_TRUE_CTLS |
6003 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6004 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6005
6006 if (cpu_has_vmx_basic_inout())
6007 msrs->basic |= VMX_BASIC_INOUT;
6008
6009 /*
6010 * These MSRs specify bits which the guest must keep fixed on
6011 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6012 * We picked the standard core2 setting.
6013 */
6014 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6015 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6016 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6017 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6018
6019 /* These MSRs specify bits which the guest must keep fixed off. */
6020 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6021 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6022
6023 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
6024 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
6025 }
6026
nested_vmx_hardware_unsetup(void)6027 void nested_vmx_hardware_unsetup(void)
6028 {
6029 int i;
6030
6031 if (enable_shadow_vmcs) {
6032 for (i = 0; i < VMX_BITMAP_NR; i++)
6033 free_page((unsigned long)vmx_bitmap[i]);
6034 }
6035 }
6036
nested_vmx_hardware_setup(int (* exit_handlers[])(struct kvm_vcpu *))6037 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
6038 {
6039 int i;
6040
6041 if (!cpu_has_vmx_shadow_vmcs())
6042 enable_shadow_vmcs = 0;
6043 if (enable_shadow_vmcs) {
6044 for (i = 0; i < VMX_BITMAP_NR; i++) {
6045 /*
6046 * The vmx_bitmap is not tied to a VM and so should
6047 * not be charged to a memcg.
6048 */
6049 vmx_bitmap[i] = (unsigned long *)
6050 __get_free_page(GFP_KERNEL);
6051 if (!vmx_bitmap[i]) {
6052 nested_vmx_hardware_unsetup();
6053 return -ENOMEM;
6054 }
6055 }
6056
6057 init_vmcs_shadow_fields();
6058 }
6059
6060 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
6061 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
6062 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
6063 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
6064 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
6065 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
6066 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
6067 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
6068 exit_handlers[EXIT_REASON_VMON] = handle_vmon,
6069 exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
6070 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
6071 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
6072
6073 kvm_x86_ops->check_nested_events = vmx_check_nested_events;
6074 kvm_x86_ops->get_nested_state = vmx_get_nested_state;
6075 kvm_x86_ops->set_nested_state = vmx_set_nested_state;
6076 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
6077 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
6078 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
6079
6080 return 0;
6081 }
6082