• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15 
16 #include <linux/highmem.h>
17 #include <linux/hrtimer.h>
18 #include <linux/kernel.h>
19 #include <linux/kvm_host.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mm.h>
24 #include <linux/objtool.h>
25 #include <linux/sched.h>
26 #include <linux/sched/smt.h>
27 #include <linux/slab.h>
28 #include <linux/tboot.h>
29 #include <linux/trace_events.h>
30 #include <linux/entry-kvm.h>
31 
32 #include <asm/apic.h>
33 #include <asm/asm.h>
34 #include <asm/cpu.h>
35 #include <asm/cpu_device_id.h>
36 #include <asm/debugreg.h>
37 #include <asm/desc.h>
38 #include <asm/fpu/internal.h>
39 #include <asm/idtentry.h>
40 #include <asm/io.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/kexec.h>
43 #include <asm/perf_event.h>
44 #include <asm/mce.h>
45 #include <asm/mmu_context.h>
46 #include <asm/mshyperv.h>
47 #include <asm/mwait.h>
48 #include <asm/spec-ctrl.h>
49 #include <asm/virtext.h>
50 #include <asm/vmx.h>
51 
52 #include "capabilities.h"
53 #include "cpuid.h"
54 #include "evmcs.h"
55 #include "irq.h"
56 #include "kvm_cache_regs.h"
57 #include "lapic.h"
58 #include "mmu.h"
59 #include "nested.h"
60 #include "pmu.h"
61 #include "trace.h"
62 #include "vmcs.h"
63 #include "vmcs12.h"
64 #include "vmx.h"
65 #include "x86.h"
66 
67 MODULE_AUTHOR("Qumranet");
68 MODULE_LICENSE("GPL");
69 
70 #ifdef MODULE
71 static const struct x86_cpu_id vmx_cpu_id[] = {
72 	X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
73 	{}
74 };
75 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
76 #endif
77 
78 bool __read_mostly enable_vpid = 1;
79 module_param_named(vpid, enable_vpid, bool, 0444);
80 
81 static bool __read_mostly enable_vnmi = 1;
82 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
83 
84 bool __read_mostly flexpriority_enabled = 1;
85 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
86 
87 bool __read_mostly enable_ept = 1;
88 module_param_named(ept, enable_ept, bool, S_IRUGO);
89 
90 bool __read_mostly enable_unrestricted_guest = 1;
91 module_param_named(unrestricted_guest,
92 			enable_unrestricted_guest, bool, S_IRUGO);
93 
94 bool __read_mostly enable_ept_ad_bits = 1;
95 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
96 
97 static bool __read_mostly emulate_invalid_guest_state = true;
98 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
99 
100 static bool __read_mostly fasteoi = 1;
101 module_param(fasteoi, bool, S_IRUGO);
102 
103 bool __read_mostly enable_apicv = 1;
104 module_param(enable_apicv, bool, S_IRUGO);
105 
106 /*
107  * If nested=1, nested virtualization is supported, i.e., guests may use
108  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
109  * use VMX instructions.
110  */
111 static bool __read_mostly nested = 1;
112 module_param(nested, bool, S_IRUGO);
113 
114 bool __read_mostly enable_pml = 1;
115 module_param_named(pml, enable_pml, bool, S_IRUGO);
116 
117 static bool __read_mostly dump_invalid_vmcs = 0;
118 module_param(dump_invalid_vmcs, bool, 0644);
119 
120 #define MSR_BITMAP_MODE_X2APIC		1
121 #define MSR_BITMAP_MODE_X2APIC_APICV	2
122 
123 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
124 
125 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
126 static int __read_mostly cpu_preemption_timer_multi;
127 static bool __read_mostly enable_preemption_timer = 1;
128 #ifdef CONFIG_X86_64
129 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
130 #endif
131 
132 extern bool __read_mostly allow_smaller_maxphyaddr;
133 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
134 
135 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
136 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
137 #define KVM_VM_CR0_ALWAYS_ON				\
138 	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | 	\
139 	 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
140 
141 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
142 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
143 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
144 
145 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
146 
147 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
148 	RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
149 	RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
150 	RTIT_STATUS_BYTECNT))
151 
152 /*
153  * List of MSRs that can be directly passed to the guest.
154  * In addition to these x2apic and PT MSRs are handled specially.
155  */
156 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
157 	MSR_IA32_SPEC_CTRL,
158 	MSR_IA32_PRED_CMD,
159 	MSR_IA32_TSC,
160 #ifdef CONFIG_X86_64
161 	MSR_FS_BASE,
162 	MSR_GS_BASE,
163 	MSR_KERNEL_GS_BASE,
164 #endif
165 	MSR_IA32_SYSENTER_CS,
166 	MSR_IA32_SYSENTER_ESP,
167 	MSR_IA32_SYSENTER_EIP,
168 	MSR_CORE_C1_RES,
169 	MSR_CORE_C3_RESIDENCY,
170 	MSR_CORE_C6_RESIDENCY,
171 	MSR_CORE_C7_RESIDENCY,
172 };
173 
174 /*
175  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
176  * ple_gap:    upper bound on the amount of time between two successive
177  *             executions of PAUSE in a loop. Also indicate if ple enabled.
178  *             According to test, this time is usually smaller than 128 cycles.
179  * ple_window: upper bound on the amount of time a guest is allowed to execute
180  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
181  *             less than 2^12 cycles
182  * Time is measured based on a counter that runs at the same rate as the TSC,
183  * refer SDM volume 3b section 21.6.13 & 22.1.3.
184  */
185 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
186 module_param(ple_gap, uint, 0444);
187 
188 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
189 module_param(ple_window, uint, 0444);
190 
191 /* Default doubles per-vcpu window every exit. */
192 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
193 module_param(ple_window_grow, uint, 0444);
194 
195 /* Default resets per-vcpu window every exit to ple_window. */
196 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
197 module_param(ple_window_shrink, uint, 0444);
198 
199 /* Default is to compute the maximum so we can never overflow. */
200 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
201 module_param(ple_window_max, uint, 0444);
202 
203 /* Default is SYSTEM mode, 1 for host-guest mode */
204 int __read_mostly pt_mode = PT_MODE_SYSTEM;
205 module_param(pt_mode, int, S_IRUGO);
206 
207 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
208 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
209 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
210 
211 /* Storage for pre module init parameter parsing */
212 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
213 
214 static const struct {
215 	const char *option;
216 	bool for_parse;
217 } vmentry_l1d_param[] = {
218 	[VMENTER_L1D_FLUSH_AUTO]	 = {"auto", true},
219 	[VMENTER_L1D_FLUSH_NEVER]	 = {"never", true},
220 	[VMENTER_L1D_FLUSH_COND]	 = {"cond", true},
221 	[VMENTER_L1D_FLUSH_ALWAYS]	 = {"always", true},
222 	[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
223 	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
224 };
225 
226 #define L1D_CACHE_ORDER 4
227 static void *vmx_l1d_flush_pages;
228 
229 /* Control for disabling CPU Fill buffer clear */
230 static bool __read_mostly vmx_fb_clear_ctrl_available;
231 
vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)232 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
233 {
234 	struct page *page;
235 	unsigned int i;
236 
237 	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
238 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
239 		return 0;
240 	}
241 
242 	if (!enable_ept) {
243 		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
244 		return 0;
245 	}
246 
247 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
248 		u64 msr;
249 
250 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
251 		if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
252 			l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
253 			return 0;
254 		}
255 	}
256 
257 	/* If set to auto use the default l1tf mitigation method */
258 	if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
259 		switch (l1tf_mitigation) {
260 		case L1TF_MITIGATION_OFF:
261 			l1tf = VMENTER_L1D_FLUSH_NEVER;
262 			break;
263 		case L1TF_MITIGATION_FLUSH_NOWARN:
264 		case L1TF_MITIGATION_FLUSH:
265 		case L1TF_MITIGATION_FLUSH_NOSMT:
266 			l1tf = VMENTER_L1D_FLUSH_COND;
267 			break;
268 		case L1TF_MITIGATION_FULL:
269 		case L1TF_MITIGATION_FULL_FORCE:
270 			l1tf = VMENTER_L1D_FLUSH_ALWAYS;
271 			break;
272 		}
273 	} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
274 		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
275 	}
276 
277 	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
278 	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
279 		/*
280 		 * This allocation for vmx_l1d_flush_pages is not tied to a VM
281 		 * lifetime and so should not be charged to a memcg.
282 		 */
283 		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
284 		if (!page)
285 			return -ENOMEM;
286 		vmx_l1d_flush_pages = page_address(page);
287 
288 		/*
289 		 * Initialize each page with a different pattern in
290 		 * order to protect against KSM in the nested
291 		 * virtualization case.
292 		 */
293 		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
294 			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
295 			       PAGE_SIZE);
296 		}
297 	}
298 
299 	l1tf_vmx_mitigation = l1tf;
300 
301 	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
302 		static_branch_enable(&vmx_l1d_should_flush);
303 	else
304 		static_branch_disable(&vmx_l1d_should_flush);
305 
306 	if (l1tf == VMENTER_L1D_FLUSH_COND)
307 		static_branch_enable(&vmx_l1d_flush_cond);
308 	else
309 		static_branch_disable(&vmx_l1d_flush_cond);
310 	return 0;
311 }
312 
vmentry_l1d_flush_parse(const char * s)313 static int vmentry_l1d_flush_parse(const char *s)
314 {
315 	unsigned int i;
316 
317 	if (s) {
318 		for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
319 			if (vmentry_l1d_param[i].for_parse &&
320 			    sysfs_streq(s, vmentry_l1d_param[i].option))
321 				return i;
322 		}
323 	}
324 	return -EINVAL;
325 }
326 
vmentry_l1d_flush_set(const char * s,const struct kernel_param * kp)327 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
328 {
329 	int l1tf, ret;
330 
331 	l1tf = vmentry_l1d_flush_parse(s);
332 	if (l1tf < 0)
333 		return l1tf;
334 
335 	if (!boot_cpu_has(X86_BUG_L1TF))
336 		return 0;
337 
338 	/*
339 	 * Has vmx_init() run already? If not then this is the pre init
340 	 * parameter parsing. In that case just store the value and let
341 	 * vmx_init() do the proper setup after enable_ept has been
342 	 * established.
343 	 */
344 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
345 		vmentry_l1d_flush_param = l1tf;
346 		return 0;
347 	}
348 
349 	mutex_lock(&vmx_l1d_flush_mutex);
350 	ret = vmx_setup_l1d_flush(l1tf);
351 	mutex_unlock(&vmx_l1d_flush_mutex);
352 	return ret;
353 }
354 
vmentry_l1d_flush_get(char * s,const struct kernel_param * kp)355 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
356 {
357 	if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
358 		return sprintf(s, "???\n");
359 
360 	return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
361 }
362 
vmx_setup_fb_clear_ctrl(void)363 static void vmx_setup_fb_clear_ctrl(void)
364 {
365 	u64 msr;
366 
367 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
368 	    !boot_cpu_has_bug(X86_BUG_MDS) &&
369 	    !boot_cpu_has_bug(X86_BUG_TAA)) {
370 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
371 		if (msr & ARCH_CAP_FB_CLEAR_CTRL)
372 			vmx_fb_clear_ctrl_available = true;
373 	}
374 }
375 
vmx_disable_fb_clear(struct vcpu_vmx * vmx)376 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
377 {
378 	u64 msr;
379 
380 	if (!vmx->disable_fb_clear)
381 		return;
382 
383 	msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
384 	msr |= FB_CLEAR_DIS;
385 	native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
386 	/* Cache the MSR value to avoid reading it later */
387 	vmx->msr_ia32_mcu_opt_ctrl = msr;
388 }
389 
vmx_enable_fb_clear(struct vcpu_vmx * vmx)390 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
391 {
392 	if (!vmx->disable_fb_clear)
393 		return;
394 
395 	vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
396 	native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
397 }
398 
vmx_update_fb_clear_dis(struct kvm_vcpu * vcpu,struct vcpu_vmx * vmx)399 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
400 {
401 	vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
402 
403 	/*
404 	 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
405 	 * at VMEntry. Skip the MSR read/write when a guest has no use case to
406 	 * execute VERW.
407 	 */
408 	if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
409 	   ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
410 	    (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
411 	    (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
412 	    (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
413 	    (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
414 		vmx->disable_fb_clear = false;
415 }
416 
417 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
418 	.set = vmentry_l1d_flush_set,
419 	.get = vmentry_l1d_flush_get,
420 };
421 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
422 
423 static u32 vmx_segment_access_rights(struct kvm_segment *var);
424 static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
425 							  u32 msr, int type);
426 
427 void vmx_vmexit(void);
428 
429 #define vmx_insn_failed(fmt...)		\
430 do {					\
431 	WARN_ONCE(1, fmt);		\
432 	pr_warn_ratelimited(fmt);	\
433 } while (0)
434 
vmread_error(unsigned long field,bool fault)435 asmlinkage void vmread_error(unsigned long field, bool fault)
436 {
437 	if (fault)
438 		kvm_spurious_fault();
439 	else
440 		vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
441 }
442 
vmwrite_error(unsigned long field,unsigned long value)443 noinline void vmwrite_error(unsigned long field, unsigned long value)
444 {
445 	vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",
446 			field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
447 }
448 
vmclear_error(struct vmcs * vmcs,u64 phys_addr)449 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
450 {
451 	vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr);
452 }
453 
vmptrld_error(struct vmcs * vmcs,u64 phys_addr)454 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
455 {
456 	vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr);
457 }
458 
invvpid_error(unsigned long ext,u16 vpid,gva_t gva)459 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
460 {
461 	vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
462 			ext, vpid, gva);
463 }
464 
invept_error(unsigned long ext,u64 eptp,gpa_t gpa)465 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
466 {
467 	vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
468 			ext, eptp, gpa);
469 }
470 
471 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
472 DEFINE_PER_CPU(struct vmcs *, current_vmcs);
473 /*
474  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
475  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
476  */
477 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
478 
479 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
480 static DEFINE_SPINLOCK(vmx_vpid_lock);
481 
482 struct vmcs_config vmcs_config;
483 struct vmx_capability vmx_capability;
484 
485 #define VMX_SEGMENT_FIELD(seg)					\
486 	[VCPU_SREG_##seg] = {                                   \
487 		.selector = GUEST_##seg##_SELECTOR,		\
488 		.base = GUEST_##seg##_BASE,		   	\
489 		.limit = GUEST_##seg##_LIMIT,		   	\
490 		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
491 	}
492 
493 static const struct kvm_vmx_segment_field {
494 	unsigned selector;
495 	unsigned base;
496 	unsigned limit;
497 	unsigned ar_bytes;
498 } kvm_vmx_segment_fields[] = {
499 	VMX_SEGMENT_FIELD(CS),
500 	VMX_SEGMENT_FIELD(DS),
501 	VMX_SEGMENT_FIELD(ES),
502 	VMX_SEGMENT_FIELD(FS),
503 	VMX_SEGMENT_FIELD(GS),
504 	VMX_SEGMENT_FIELD(SS),
505 	VMX_SEGMENT_FIELD(TR),
506 	VMX_SEGMENT_FIELD(LDTR),
507 };
508 
vmx_segment_cache_clear(struct vcpu_vmx * vmx)509 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
510 {
511 	vmx->segment_cache.bitmask = 0;
512 }
513 
514 static unsigned long host_idt_base;
515 
516 /*
517  * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
518  * will emulate SYSCALL in legacy mode if the vendor string in guest
519  * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
520  * support this emulation, IA32_STAR must always be included in
521  * vmx_uret_msrs_list[], even in i386 builds.
522  */
523 static const u32 vmx_uret_msrs_list[] = {
524 #ifdef CONFIG_X86_64
525 	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
526 #endif
527 	MSR_EFER, MSR_TSC_AUX, MSR_STAR,
528 	MSR_IA32_TSX_CTRL,
529 };
530 
531 #if IS_ENABLED(CONFIG_HYPERV)
532 static bool __read_mostly enlightened_vmcs = true;
533 module_param(enlightened_vmcs, bool, 0444);
534 
535 /* check_ept_pointer() should be under protection of ept_pointer_lock. */
check_ept_pointer_match(struct kvm * kvm)536 static void check_ept_pointer_match(struct kvm *kvm)
537 {
538 	struct kvm_vcpu *vcpu;
539 	u64 tmp_eptp = INVALID_PAGE;
540 	int i;
541 
542 	kvm_for_each_vcpu(i, vcpu, kvm) {
543 		if (!VALID_PAGE(tmp_eptp)) {
544 			tmp_eptp = to_vmx(vcpu)->ept_pointer;
545 		} else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
546 			to_kvm_vmx(kvm)->ept_pointers_match
547 				= EPT_POINTERS_MISMATCH;
548 			return;
549 		}
550 	}
551 
552 	to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
553 }
554 
kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list * flush,void * data)555 static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
556 		void *data)
557 {
558 	struct kvm_tlb_range *range = data;
559 
560 	return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
561 			range->pages);
562 }
563 
__hv_remote_flush_tlb_with_range(struct kvm * kvm,struct kvm_vcpu * vcpu,struct kvm_tlb_range * range)564 static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
565 		struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
566 {
567 	u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
568 
569 	/*
570 	 * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
571 	 * of the base of EPT PML4 table, strip off EPT configuration
572 	 * information.
573 	 */
574 	if (range)
575 		return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
576 				kvm_fill_hv_flush_list_func, (void *)range);
577 	else
578 		return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
579 }
580 
hv_remote_flush_tlb_with_range(struct kvm * kvm,struct kvm_tlb_range * range)581 static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
582 		struct kvm_tlb_range *range)
583 {
584 	struct kvm_vcpu *vcpu;
585 	int ret = 0, i;
586 
587 	spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
588 
589 	if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
590 		check_ept_pointer_match(kvm);
591 
592 	if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
593 		kvm_for_each_vcpu(i, vcpu, kvm) {
594 			/* If ept_pointer is invalid pointer, bypass flush request. */
595 			if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
596 				ret |= __hv_remote_flush_tlb_with_range(
597 					kvm, vcpu, range);
598 		}
599 	} else {
600 		ret = __hv_remote_flush_tlb_with_range(kvm,
601 				kvm_get_vcpu(kvm, 0), range);
602 	}
603 
604 	spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
605 	return ret;
606 }
hv_remote_flush_tlb(struct kvm * kvm)607 static int hv_remote_flush_tlb(struct kvm *kvm)
608 {
609 	return hv_remote_flush_tlb_with_range(kvm, NULL);
610 }
611 
hv_enable_direct_tlbflush(struct kvm_vcpu * vcpu)612 static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
613 {
614 	struct hv_enlightened_vmcs *evmcs;
615 	struct hv_partition_assist_pg **p_hv_pa_pg =
616 			&vcpu->kvm->arch.hyperv.hv_pa_pg;
617 	/*
618 	 * Synthetic VM-Exit is not enabled in current code and so All
619 	 * evmcs in singe VM shares same assist page.
620 	 */
621 	if (!*p_hv_pa_pg)
622 		*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
623 
624 	if (!*p_hv_pa_pg)
625 		return -ENOMEM;
626 
627 	evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
628 
629 	evmcs->partition_assist_page =
630 		__pa(*p_hv_pa_pg);
631 	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
632 	evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
633 
634 	return 0;
635 }
636 
637 #endif /* IS_ENABLED(CONFIG_HYPERV) */
638 
639 /*
640  * Comment's format: document - errata name - stepping - processor name.
641  * Refer from
642  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
643  */
644 static u32 vmx_preemption_cpu_tfms[] = {
645 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
646 0x000206E6,
647 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
648 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
649 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
650 0x00020652,
651 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
652 0x00020655,
653 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
654 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
655 /*
656  * 320767.pdf - AAP86  - B1 -
657  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
658  */
659 0x000106E5,
660 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
661 0x000106A0,
662 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
663 0x000106A1,
664 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
665 0x000106A4,
666  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
667  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
668  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
669 0x000106A5,
670  /* Xeon E3-1220 V2 */
671 0x000306A8,
672 };
673 
cpu_has_broken_vmx_preemption_timer(void)674 static inline bool cpu_has_broken_vmx_preemption_timer(void)
675 {
676 	u32 eax = cpuid_eax(0x00000001), i;
677 
678 	/* Clear the reserved bits */
679 	eax &= ~(0x3U << 14 | 0xfU << 28);
680 	for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
681 		if (eax == vmx_preemption_cpu_tfms[i])
682 			return true;
683 
684 	return false;
685 }
686 
cpu_need_virtualize_apic_accesses(struct kvm_vcpu * vcpu)687 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
688 {
689 	return flexpriority_enabled && lapic_in_kernel(vcpu);
690 }
691 
report_flexpriority(void)692 static inline bool report_flexpriority(void)
693 {
694 	return flexpriority_enabled;
695 }
696 
possible_passthrough_msr_slot(u32 msr)697 static int possible_passthrough_msr_slot(u32 msr)
698 {
699 	u32 i;
700 
701 	for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
702 		if (vmx_possible_passthrough_msrs[i] == msr)
703 			return i;
704 
705 	return -ENOENT;
706 }
707 
is_valid_passthrough_msr(u32 msr)708 static bool is_valid_passthrough_msr(u32 msr)
709 {
710 	bool r;
711 
712 	switch (msr) {
713 	case 0x800 ... 0x8ff:
714 		/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
715 		return true;
716 	case MSR_IA32_RTIT_STATUS:
717 	case MSR_IA32_RTIT_OUTPUT_BASE:
718 	case MSR_IA32_RTIT_OUTPUT_MASK:
719 	case MSR_IA32_RTIT_CR3_MATCH:
720 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
721 		/* PT MSRs. These are handled in pt_update_intercept_for_msr() */
722 		return true;
723 	}
724 
725 	r = possible_passthrough_msr_slot(msr) != -ENOENT;
726 
727 	WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
728 
729 	return r;
730 }
731 
__vmx_find_uret_msr(struct vcpu_vmx * vmx,u32 msr)732 static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
733 {
734 	int i;
735 
736 	for (i = 0; i < vmx->nr_uret_msrs; ++i)
737 		if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
738 			return i;
739 	return -1;
740 }
741 
vmx_find_uret_msr(struct vcpu_vmx * vmx,u32 msr)742 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
743 {
744 	int i;
745 
746 	i = __vmx_find_uret_msr(vmx, msr);
747 	if (i >= 0)
748 		return &vmx->guest_uret_msrs[i];
749 	return NULL;
750 }
751 
vmx_set_guest_uret_msr(struct vcpu_vmx * vmx,struct vmx_uret_msr * msr,u64 data)752 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
753 				  struct vmx_uret_msr *msr, u64 data)
754 {
755 	int ret = 0;
756 
757 	u64 old_msr_data = msr->data;
758 	msr->data = data;
759 	if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
760 		preempt_disable();
761 		ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
762 		preempt_enable();
763 		if (ret)
764 			msr->data = old_msr_data;
765 	}
766 	return ret;
767 }
768 
769 #ifdef CONFIG_KEXEC_CORE
crash_vmclear_local_loaded_vmcss(void)770 static void crash_vmclear_local_loaded_vmcss(void)
771 {
772 	int cpu = raw_smp_processor_id();
773 	struct loaded_vmcs *v;
774 
775 	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
776 			    loaded_vmcss_on_cpu_link)
777 		vmcs_clear(v->vmcs);
778 }
779 #endif /* CONFIG_KEXEC_CORE */
780 
__loaded_vmcs_clear(void * arg)781 static void __loaded_vmcs_clear(void *arg)
782 {
783 	struct loaded_vmcs *loaded_vmcs = arg;
784 	int cpu = raw_smp_processor_id();
785 
786 	if (loaded_vmcs->cpu != cpu)
787 		return; /* vcpu migration can race with cpu offline */
788 	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
789 		per_cpu(current_vmcs, cpu) = NULL;
790 
791 	vmcs_clear(loaded_vmcs->vmcs);
792 	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
793 		vmcs_clear(loaded_vmcs->shadow_vmcs);
794 
795 	list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
796 
797 	/*
798 	 * Ensure all writes to loaded_vmcs, including deleting it from its
799 	 * current percpu list, complete before setting loaded_vmcs->vcpu to
800 	 * -1, otherwise a different cpu can see vcpu == -1 first and add
801 	 * loaded_vmcs to its percpu list before it's deleted from this cpu's
802 	 * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
803 	 */
804 	smp_wmb();
805 
806 	loaded_vmcs->cpu = -1;
807 	loaded_vmcs->launched = 0;
808 }
809 
loaded_vmcs_clear(struct loaded_vmcs * loaded_vmcs)810 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
811 {
812 	int cpu = loaded_vmcs->cpu;
813 
814 	if (cpu != -1)
815 		smp_call_function_single(cpu,
816 			 __loaded_vmcs_clear, loaded_vmcs, 1);
817 }
818 
vmx_segment_cache_test_set(struct vcpu_vmx * vmx,unsigned seg,unsigned field)819 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
820 				       unsigned field)
821 {
822 	bool ret;
823 	u32 mask = 1 << (seg * SEG_FIELD_NR + field);
824 
825 	if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
826 		kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
827 		vmx->segment_cache.bitmask = 0;
828 	}
829 	ret = vmx->segment_cache.bitmask & mask;
830 	vmx->segment_cache.bitmask |= mask;
831 	return ret;
832 }
833 
vmx_read_guest_seg_selector(struct vcpu_vmx * vmx,unsigned seg)834 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
835 {
836 	u16 *p = &vmx->segment_cache.seg[seg].selector;
837 
838 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
839 		*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
840 	return *p;
841 }
842 
vmx_read_guest_seg_base(struct vcpu_vmx * vmx,unsigned seg)843 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
844 {
845 	ulong *p = &vmx->segment_cache.seg[seg].base;
846 
847 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
848 		*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
849 	return *p;
850 }
851 
vmx_read_guest_seg_limit(struct vcpu_vmx * vmx,unsigned seg)852 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
853 {
854 	u32 *p = &vmx->segment_cache.seg[seg].limit;
855 
856 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
857 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
858 	return *p;
859 }
860 
vmx_read_guest_seg_ar(struct vcpu_vmx * vmx,unsigned seg)861 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
862 {
863 	u32 *p = &vmx->segment_cache.seg[seg].ar;
864 
865 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
866 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
867 	return *p;
868 }
869 
update_exception_bitmap(struct kvm_vcpu * vcpu)870 void update_exception_bitmap(struct kvm_vcpu *vcpu)
871 {
872 	u32 eb;
873 
874 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
875 	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
876 	/*
877 	 * Guest access to VMware backdoor ports could legitimately
878 	 * trigger #GP because of TSS I/O permission bitmap.
879 	 * We intercept those #GP and allow access to them anyway
880 	 * as VMware does.
881 	 */
882 	if (enable_vmware_backdoor)
883 		eb |= (1u << GP_VECTOR);
884 	if ((vcpu->guest_debug &
885 	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
886 	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
887 		eb |= 1u << BP_VECTOR;
888 	if (to_vmx(vcpu)->rmode.vm86_active)
889 		eb = ~0;
890 	if (!vmx_need_pf_intercept(vcpu))
891 		eb &= ~(1u << PF_VECTOR);
892 
893 	/* When we are running a nested L2 guest and L1 specified for it a
894 	 * certain exception bitmap, we must trap the same exceptions and pass
895 	 * them to L1. When running L2, we will only handle the exceptions
896 	 * specified above if L1 did not want them.
897 	 */
898 	if (is_guest_mode(vcpu))
899 		eb |= get_vmcs12(vcpu)->exception_bitmap;
900         else {
901 		/*
902 		 * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
903 		 * between guest and host.  In that case we only care about present
904 		 * faults.  For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
905 		 * prepare_vmcs02_rare.
906 		 */
907 		bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
908 		int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
909 		vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
910 		vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
911 	}
912 
913 	vmcs_write32(EXCEPTION_BITMAP, eb);
914 }
915 
916 /*
917  * Check if MSR is intercepted for currently loaded MSR bitmap.
918  */
msr_write_intercepted(struct kvm_vcpu * vcpu,u32 msr)919 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
920 {
921 	unsigned long *msr_bitmap;
922 	int f = sizeof(unsigned long);
923 
924 	if (!cpu_has_vmx_msr_bitmap())
925 		return true;
926 
927 	msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
928 
929 	if (msr <= 0x1fff) {
930 		return !!test_bit(msr, msr_bitmap + 0x800 / f);
931 	} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
932 		msr &= 0x1fff;
933 		return !!test_bit(msr, msr_bitmap + 0xc00 / f);
934 	}
935 
936 	return true;
937 }
938 
__vmx_vcpu_run_flags(struct vcpu_vmx * vmx)939 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
940 {
941 	unsigned int flags = 0;
942 
943 	if (vmx->loaded_vmcs->launched)
944 		flags |= VMX_RUN_VMRESUME;
945 
946 	/*
947 	 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
948 	 * to change it directly without causing a vmexit.  In that case read
949 	 * it after vmexit and store it in vmx->spec_ctrl.
950 	 */
951 	if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
952 		flags |= VMX_RUN_SAVE_SPEC_CTRL;
953 
954 	return flags;
955 }
956 
clear_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit)957 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
958 		unsigned long entry, unsigned long exit)
959 {
960 	vm_entry_controls_clearbit(vmx, entry);
961 	vm_exit_controls_clearbit(vmx, exit);
962 }
963 
vmx_find_loadstore_msr_slot(struct vmx_msrs * m,u32 msr)964 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
965 {
966 	unsigned int i;
967 
968 	for (i = 0; i < m->nr; ++i) {
969 		if (m->val[i].index == msr)
970 			return i;
971 	}
972 	return -ENOENT;
973 }
974 
clear_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr)975 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
976 {
977 	int i;
978 	struct msr_autoload *m = &vmx->msr_autoload;
979 
980 	switch (msr) {
981 	case MSR_EFER:
982 		if (cpu_has_load_ia32_efer()) {
983 			clear_atomic_switch_msr_special(vmx,
984 					VM_ENTRY_LOAD_IA32_EFER,
985 					VM_EXIT_LOAD_IA32_EFER);
986 			return;
987 		}
988 		break;
989 	case MSR_CORE_PERF_GLOBAL_CTRL:
990 		if (cpu_has_load_perf_global_ctrl()) {
991 			clear_atomic_switch_msr_special(vmx,
992 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
993 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
994 			return;
995 		}
996 		break;
997 	}
998 	i = vmx_find_loadstore_msr_slot(&m->guest, msr);
999 	if (i < 0)
1000 		goto skip_guest;
1001 	--m->guest.nr;
1002 	m->guest.val[i] = m->guest.val[m->guest.nr];
1003 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1004 
1005 skip_guest:
1006 	i = vmx_find_loadstore_msr_slot(&m->host, msr);
1007 	if (i < 0)
1008 		return;
1009 
1010 	--m->host.nr;
1011 	m->host.val[i] = m->host.val[m->host.nr];
1012 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1013 }
1014 
add_atomic_switch_msr_special(struct vcpu_vmx * vmx,unsigned long entry,unsigned long exit,unsigned long guest_val_vmcs,unsigned long host_val_vmcs,u64 guest_val,u64 host_val)1015 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1016 		unsigned long entry, unsigned long exit,
1017 		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1018 		u64 guest_val, u64 host_val)
1019 {
1020 	vmcs_write64(guest_val_vmcs, guest_val);
1021 	if (host_val_vmcs != HOST_IA32_EFER)
1022 		vmcs_write64(host_val_vmcs, host_val);
1023 	vm_entry_controls_setbit(vmx, entry);
1024 	vm_exit_controls_setbit(vmx, exit);
1025 }
1026 
add_atomic_switch_msr(struct vcpu_vmx * vmx,unsigned msr,u64 guest_val,u64 host_val,bool entry_only)1027 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1028 				  u64 guest_val, u64 host_val, bool entry_only)
1029 {
1030 	int i, j = 0;
1031 	struct msr_autoload *m = &vmx->msr_autoload;
1032 
1033 	switch (msr) {
1034 	case MSR_EFER:
1035 		if (cpu_has_load_ia32_efer()) {
1036 			add_atomic_switch_msr_special(vmx,
1037 					VM_ENTRY_LOAD_IA32_EFER,
1038 					VM_EXIT_LOAD_IA32_EFER,
1039 					GUEST_IA32_EFER,
1040 					HOST_IA32_EFER,
1041 					guest_val, host_val);
1042 			return;
1043 		}
1044 		break;
1045 	case MSR_CORE_PERF_GLOBAL_CTRL:
1046 		if (cpu_has_load_perf_global_ctrl()) {
1047 			add_atomic_switch_msr_special(vmx,
1048 					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1049 					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1050 					GUEST_IA32_PERF_GLOBAL_CTRL,
1051 					HOST_IA32_PERF_GLOBAL_CTRL,
1052 					guest_val, host_val);
1053 			return;
1054 		}
1055 		break;
1056 	case MSR_IA32_PEBS_ENABLE:
1057 		/* PEBS needs a quiescent period after being disabled (to write
1058 		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
1059 		 * provide that period, so a CPU could write host's record into
1060 		 * guest's memory.
1061 		 */
1062 		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1063 	}
1064 
1065 	i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1066 	if (!entry_only)
1067 		j = vmx_find_loadstore_msr_slot(&m->host, msr);
1068 
1069 	if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
1070 	    (j < 0 &&  m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
1071 		printk_once(KERN_WARNING "Not enough msr switch entries. "
1072 				"Can't add msr %x\n", msr);
1073 		return;
1074 	}
1075 	if (i < 0) {
1076 		i = m->guest.nr++;
1077 		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1078 	}
1079 	m->guest.val[i].index = msr;
1080 	m->guest.val[i].value = guest_val;
1081 
1082 	if (entry_only)
1083 		return;
1084 
1085 	if (j < 0) {
1086 		j = m->host.nr++;
1087 		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1088 	}
1089 	m->host.val[j].index = msr;
1090 	m->host.val[j].value = host_val;
1091 }
1092 
update_transition_efer(struct vcpu_vmx * vmx)1093 static bool update_transition_efer(struct vcpu_vmx *vmx)
1094 {
1095 	u64 guest_efer = vmx->vcpu.arch.efer;
1096 	u64 ignore_bits = 0;
1097 	int i;
1098 
1099 	/* Shadow paging assumes NX to be available.  */
1100 	if (!enable_ept)
1101 		guest_efer |= EFER_NX;
1102 
1103 	/*
1104 	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1105 	 */
1106 	ignore_bits |= EFER_SCE;
1107 #ifdef CONFIG_X86_64
1108 	ignore_bits |= EFER_LMA | EFER_LME;
1109 	/* SCE is meaningful only in long mode on Intel */
1110 	if (guest_efer & EFER_LMA)
1111 		ignore_bits &= ~(u64)EFER_SCE;
1112 #endif
1113 
1114 	/*
1115 	 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1116 	 * On CPUs that support "load IA32_EFER", always switch EFER
1117 	 * atomically, since it's faster than switching it manually.
1118 	 */
1119 	if (cpu_has_load_ia32_efer() ||
1120 	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1121 		if (!(guest_efer & EFER_LMA))
1122 			guest_efer &= ~EFER_LME;
1123 		if (guest_efer != host_efer)
1124 			add_atomic_switch_msr(vmx, MSR_EFER,
1125 					      guest_efer, host_efer, false);
1126 		else
1127 			clear_atomic_switch_msr(vmx, MSR_EFER);
1128 		return false;
1129 	}
1130 
1131 	i = __vmx_find_uret_msr(vmx, MSR_EFER);
1132 	if (i < 0)
1133 		return false;
1134 
1135 	clear_atomic_switch_msr(vmx, MSR_EFER);
1136 
1137 	guest_efer &= ~ignore_bits;
1138 	guest_efer |= host_efer & ignore_bits;
1139 
1140 	vmx->guest_uret_msrs[i].data = guest_efer;
1141 	vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1142 
1143 	return true;
1144 }
1145 
1146 #ifdef CONFIG_X86_32
1147 /*
1148  * On 32-bit kernels, VM exits still load the FS and GS bases from the
1149  * VMCS rather than the segment table.  KVM uses this helper to figure
1150  * out the current bases to poke them into the VMCS before entry.
1151  */
segment_base(u16 selector)1152 static unsigned long segment_base(u16 selector)
1153 {
1154 	struct desc_struct *table;
1155 	unsigned long v;
1156 
1157 	if (!(selector & ~SEGMENT_RPL_MASK))
1158 		return 0;
1159 
1160 	table = get_current_gdt_ro();
1161 
1162 	if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1163 		u16 ldt_selector = kvm_read_ldt();
1164 
1165 		if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1166 			return 0;
1167 
1168 		table = (struct desc_struct *)segment_base(ldt_selector);
1169 	}
1170 	v = get_desc_base(&table[selector >> 3]);
1171 	return v;
1172 }
1173 #endif
1174 
pt_can_write_msr(struct vcpu_vmx * vmx)1175 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1176 {
1177 	return vmx_pt_mode_is_host_guest() &&
1178 	       !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1179 }
1180 
pt_output_base_valid(struct kvm_vcpu * vcpu,u64 base)1181 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1182 {
1183 	/* The base must be 128-byte aligned and a legal physical address. */
1184 	return !kvm_vcpu_is_illegal_gpa(vcpu, base) && !(base & 0x7f);
1185 }
1186 
pt_load_msr(struct pt_ctx * ctx,u32 addr_range)1187 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
1188 {
1189 	u32 i;
1190 
1191 	wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1192 	wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1193 	wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1194 	wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1195 	for (i = 0; i < addr_range; i++) {
1196 		wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1197 		wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1198 	}
1199 }
1200 
pt_save_msr(struct pt_ctx * ctx,u32 addr_range)1201 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
1202 {
1203 	u32 i;
1204 
1205 	rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1206 	rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1207 	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1208 	rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1209 	for (i = 0; i < addr_range; i++) {
1210 		rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1211 		rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1212 	}
1213 }
1214 
pt_guest_enter(struct vcpu_vmx * vmx)1215 static void pt_guest_enter(struct vcpu_vmx *vmx)
1216 {
1217 	if (vmx_pt_mode_is_system())
1218 		return;
1219 
1220 	/*
1221 	 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1222 	 * Save host state before VM entry.
1223 	 */
1224 	rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1225 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1226 		wrmsrl(MSR_IA32_RTIT_CTL, 0);
1227 		pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
1228 		pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
1229 	}
1230 }
1231 
pt_guest_exit(struct vcpu_vmx * vmx)1232 static void pt_guest_exit(struct vcpu_vmx *vmx)
1233 {
1234 	if (vmx_pt_mode_is_system())
1235 		return;
1236 
1237 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1238 		pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
1239 		pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
1240 	}
1241 
1242 	/* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */
1243 	wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1244 }
1245 
vmx_set_host_fs_gs(struct vmcs_host_state * host,u16 fs_sel,u16 gs_sel,unsigned long fs_base,unsigned long gs_base)1246 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
1247 			unsigned long fs_base, unsigned long gs_base)
1248 {
1249 	if (unlikely(fs_sel != host->fs_sel)) {
1250 		if (!(fs_sel & 7))
1251 			vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1252 		else
1253 			vmcs_write16(HOST_FS_SELECTOR, 0);
1254 		host->fs_sel = fs_sel;
1255 	}
1256 	if (unlikely(gs_sel != host->gs_sel)) {
1257 		if (!(gs_sel & 7))
1258 			vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1259 		else
1260 			vmcs_write16(HOST_GS_SELECTOR, 0);
1261 		host->gs_sel = gs_sel;
1262 	}
1263 	if (unlikely(fs_base != host->fs_base)) {
1264 		vmcs_writel(HOST_FS_BASE, fs_base);
1265 		host->fs_base = fs_base;
1266 	}
1267 	if (unlikely(gs_base != host->gs_base)) {
1268 		vmcs_writel(HOST_GS_BASE, gs_base);
1269 		host->gs_base = gs_base;
1270 	}
1271 }
1272 
vmx_prepare_switch_to_guest(struct kvm_vcpu * vcpu)1273 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1274 {
1275 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1276 	struct vmcs_host_state *host_state;
1277 #ifdef CONFIG_X86_64
1278 	int cpu = raw_smp_processor_id();
1279 #endif
1280 	unsigned long fs_base, gs_base;
1281 	u16 fs_sel, gs_sel;
1282 	int i;
1283 
1284 	vmx->req_immediate_exit = false;
1285 
1286 	/*
1287 	 * Note that guest MSRs to be saved/restored can also be changed
1288 	 * when guest state is loaded. This happens when guest transitions
1289 	 * to/from long-mode by setting MSR_EFER.LMA.
1290 	 */
1291 	if (!vmx->guest_uret_msrs_loaded) {
1292 		vmx->guest_uret_msrs_loaded = true;
1293 		for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
1294 			kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot,
1295 						vmx->guest_uret_msrs[i].data,
1296 						vmx->guest_uret_msrs[i].mask);
1297 
1298 	}
1299 
1300     	if (vmx->nested.need_vmcs12_to_shadow_sync)
1301 		nested_sync_vmcs12_to_shadow(vcpu);
1302 
1303 	if (vmx->guest_state_loaded)
1304 		return;
1305 
1306 	host_state = &vmx->loaded_vmcs->host_state;
1307 
1308 	/*
1309 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1310 	 * allow segment selectors with cpl > 0 or ti == 1.
1311 	 */
1312 	host_state->ldt_sel = kvm_read_ldt();
1313 
1314 #ifdef CONFIG_X86_64
1315 	savesegment(ds, host_state->ds_sel);
1316 	savesegment(es, host_state->es_sel);
1317 
1318 	gs_base = cpu_kernelmode_gs_base(cpu);
1319 	if (likely(is_64bit_mm(current->mm))) {
1320 		current_save_fsgs();
1321 		fs_sel = current->thread.fsindex;
1322 		gs_sel = current->thread.gsindex;
1323 		fs_base = current->thread.fsbase;
1324 		vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1325 	} else {
1326 		savesegment(fs, fs_sel);
1327 		savesegment(gs, gs_sel);
1328 		fs_base = read_msr(MSR_FS_BASE);
1329 		vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1330 	}
1331 
1332 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1333 #else
1334 	savesegment(fs, fs_sel);
1335 	savesegment(gs, gs_sel);
1336 	fs_base = segment_base(fs_sel);
1337 	gs_base = segment_base(gs_sel);
1338 #endif
1339 
1340 	vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
1341 	vmx->guest_state_loaded = true;
1342 }
1343 
vmx_prepare_switch_to_host(struct vcpu_vmx * vmx)1344 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1345 {
1346 	struct vmcs_host_state *host_state;
1347 
1348 	if (!vmx->guest_state_loaded)
1349 		return;
1350 
1351 	host_state = &vmx->loaded_vmcs->host_state;
1352 
1353 	++vmx->vcpu.stat.host_state_reload;
1354 
1355 #ifdef CONFIG_X86_64
1356 	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1357 #endif
1358 	if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
1359 		kvm_load_ldt(host_state->ldt_sel);
1360 #ifdef CONFIG_X86_64
1361 		load_gs_index(host_state->gs_sel);
1362 #else
1363 		loadsegment(gs, host_state->gs_sel);
1364 #endif
1365 	}
1366 	if (host_state->fs_sel & 7)
1367 		loadsegment(fs, host_state->fs_sel);
1368 #ifdef CONFIG_X86_64
1369 	if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1370 		loadsegment(ds, host_state->ds_sel);
1371 		loadsegment(es, host_state->es_sel);
1372 	}
1373 #endif
1374 	invalidate_tss_limit();
1375 #ifdef CONFIG_X86_64
1376 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1377 #endif
1378 	load_fixmap_gdt(raw_smp_processor_id());
1379 	vmx->guest_state_loaded = false;
1380 	vmx->guest_uret_msrs_loaded = false;
1381 }
1382 
1383 #ifdef CONFIG_X86_64
vmx_read_guest_kernel_gs_base(struct vcpu_vmx * vmx)1384 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1385 {
1386 	preempt_disable();
1387 	if (vmx->guest_state_loaded)
1388 		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1389 	preempt_enable();
1390 	return vmx->msr_guest_kernel_gs_base;
1391 }
1392 
vmx_write_guest_kernel_gs_base(struct vcpu_vmx * vmx,u64 data)1393 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1394 {
1395 	preempt_disable();
1396 	if (vmx->guest_state_loaded)
1397 		wrmsrl(MSR_KERNEL_GS_BASE, data);
1398 	preempt_enable();
1399 	vmx->msr_guest_kernel_gs_base = data;
1400 }
1401 #endif
1402 
vmx_vcpu_load_vmcs(struct kvm_vcpu * vcpu,int cpu,struct loaded_vmcs * buddy)1403 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1404 			struct loaded_vmcs *buddy)
1405 {
1406 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1407 	bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1408 	struct vmcs *prev;
1409 
1410 	if (!already_loaded) {
1411 		loaded_vmcs_clear(vmx->loaded_vmcs);
1412 		local_irq_disable();
1413 
1414 		/*
1415 		 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1416 		 * this cpu's percpu list, otherwise it may not yet be deleted
1417 		 * from its previous cpu's percpu list.  Pairs with the
1418 		 * smb_wmb() in __loaded_vmcs_clear().
1419 		 */
1420 		smp_rmb();
1421 
1422 		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1423 			 &per_cpu(loaded_vmcss_on_cpu, cpu));
1424 		local_irq_enable();
1425 	}
1426 
1427 	prev = per_cpu(current_vmcs, cpu);
1428 	if (prev != vmx->loaded_vmcs->vmcs) {
1429 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1430 		vmcs_load(vmx->loaded_vmcs->vmcs);
1431 
1432 		/*
1433 		 * No indirect branch prediction barrier needed when switching
1434 		 * the active VMCS within a vCPU, unless IBRS is advertised to
1435 		 * the vCPU.  To minimize the number of IBPBs executed, KVM
1436 		 * performs IBPB on nested VM-Exit (a single nested transition
1437 		 * may switch the active VMCS multiple times).
1438 		 */
1439 		if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
1440 			indirect_branch_prediction_barrier();
1441 	}
1442 
1443 	if (!already_loaded) {
1444 		void *gdt = get_current_gdt_ro();
1445 		unsigned long sysenter_esp;
1446 
1447 		/*
1448 		 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1449 		 * TLB entries from its previous association with the vCPU.
1450 		 */
1451 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1452 
1453 		/*
1454 		 * Linux uses per-cpu TSS and GDT, so set these when switching
1455 		 * processors.  See 22.2.4.
1456 		 */
1457 		vmcs_writel(HOST_TR_BASE,
1458 			    (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1459 		vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
1460 
1461 		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1462 		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
1463 
1464 		vmx->loaded_vmcs->cpu = cpu;
1465 	}
1466 
1467 	/* Setup TSC multiplier */
1468 	if (kvm_has_tsc_control &&
1469 	    vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
1470 		decache_tsc_multiplier(vmx);
1471 }
1472 
1473 /*
1474  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1475  * vcpu mutex is already taken.
1476  */
vmx_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1477 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1478 {
1479 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1480 
1481 	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1482 
1483 	vmx_vcpu_pi_load(vcpu, cpu);
1484 
1485 	vmx->host_debugctlmsr = get_debugctlmsr();
1486 }
1487 
vmx_vcpu_put(struct kvm_vcpu * vcpu)1488 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1489 {
1490 	vmx_vcpu_pi_put(vcpu);
1491 
1492 	vmx_prepare_switch_to_host(to_vmx(vcpu));
1493 }
1494 
emulation_required(struct kvm_vcpu * vcpu)1495 static bool emulation_required(struct kvm_vcpu *vcpu)
1496 {
1497 	return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1498 }
1499 
vmx_get_rflags(struct kvm_vcpu * vcpu)1500 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1501 {
1502 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1503 	unsigned long rflags, save_rflags;
1504 
1505 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1506 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1507 		rflags = vmcs_readl(GUEST_RFLAGS);
1508 		if (vmx->rmode.vm86_active) {
1509 			rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1510 			save_rflags = vmx->rmode.save_rflags;
1511 			rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1512 		}
1513 		vmx->rflags = rflags;
1514 	}
1515 	return vmx->rflags;
1516 }
1517 
vmx_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1518 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1519 {
1520 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1521 	unsigned long old_rflags;
1522 
1523 	if (is_unrestricted_guest(vcpu)) {
1524 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1525 		vmx->rflags = rflags;
1526 		vmcs_writel(GUEST_RFLAGS, rflags);
1527 		return;
1528 	}
1529 
1530 	old_rflags = vmx_get_rflags(vcpu);
1531 	vmx->rflags = rflags;
1532 	if (vmx->rmode.vm86_active) {
1533 		vmx->rmode.save_rflags = rflags;
1534 		rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1535 	}
1536 	vmcs_writel(GUEST_RFLAGS, rflags);
1537 
1538 	if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1539 		vmx->emulation_required = emulation_required(vcpu);
1540 }
1541 
vmx_get_interrupt_shadow(struct kvm_vcpu * vcpu)1542 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1543 {
1544 	u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1545 	int ret = 0;
1546 
1547 	if (interruptibility & GUEST_INTR_STATE_STI)
1548 		ret |= KVM_X86_SHADOW_INT_STI;
1549 	if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1550 		ret |= KVM_X86_SHADOW_INT_MOV_SS;
1551 
1552 	return ret;
1553 }
1554 
vmx_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)1555 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1556 {
1557 	u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1558 	u32 interruptibility = interruptibility_old;
1559 
1560 	interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1561 
1562 	if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1563 		interruptibility |= GUEST_INTR_STATE_MOV_SS;
1564 	else if (mask & KVM_X86_SHADOW_INT_STI)
1565 		interruptibility |= GUEST_INTR_STATE_STI;
1566 
1567 	if ((interruptibility != interruptibility_old))
1568 		vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1569 }
1570 
vmx_rtit_ctl_check(struct kvm_vcpu * vcpu,u64 data)1571 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1572 {
1573 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1574 	unsigned long value;
1575 
1576 	/*
1577 	 * Any MSR write that attempts to change bits marked reserved will
1578 	 * case a #GP fault.
1579 	 */
1580 	if (data & vmx->pt_desc.ctl_bitmask)
1581 		return 1;
1582 
1583 	/*
1584 	 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1585 	 * result in a #GP unless the same write also clears TraceEn.
1586 	 */
1587 	if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1588 		((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1589 		return 1;
1590 
1591 	/*
1592 	 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1593 	 * and FabricEn would cause #GP, if
1594 	 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1595 	 */
1596 	if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1597 		!(data & RTIT_CTL_FABRIC_EN) &&
1598 		!intel_pt_validate_cap(vmx->pt_desc.caps,
1599 					PT_CAP_single_range_output))
1600 		return 1;
1601 
1602 	/*
1603 	 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1604 	 * utilize encodings marked reserved will casue a #GP fault.
1605 	 */
1606 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1607 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1608 			!test_bit((data & RTIT_CTL_MTC_RANGE) >>
1609 			RTIT_CTL_MTC_RANGE_OFFSET, &value))
1610 		return 1;
1611 	value = intel_pt_validate_cap(vmx->pt_desc.caps,
1612 						PT_CAP_cycle_thresholds);
1613 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1614 			!test_bit((data & RTIT_CTL_CYC_THRESH) >>
1615 			RTIT_CTL_CYC_THRESH_OFFSET, &value))
1616 		return 1;
1617 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1618 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1619 			!test_bit((data & RTIT_CTL_PSB_FREQ) >>
1620 			RTIT_CTL_PSB_FREQ_OFFSET, &value))
1621 		return 1;
1622 
1623 	/*
1624 	 * If ADDRx_CFG is reserved or the encodings is >2 will
1625 	 * cause a #GP fault.
1626 	 */
1627 	value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1628 	if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2))
1629 		return 1;
1630 	value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1631 	if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2))
1632 		return 1;
1633 	value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1634 	if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2))
1635 		return 1;
1636 	value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1637 	if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2))
1638 		return 1;
1639 
1640 	return 0;
1641 }
1642 
vmx_can_emulate_instruction(struct kvm_vcpu * vcpu,void * insn,int insn_len)1643 static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
1644 {
1645 	return true;
1646 }
1647 
skip_emulated_instruction(struct kvm_vcpu * vcpu)1648 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1649 {
1650 	unsigned long rip, orig_rip;
1651 
1652 	/*
1653 	 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1654 	 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1655 	 * set when EPT misconfig occurs.  In practice, real hardware updates
1656 	 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1657 	 * (namely Hyper-V) don't set it due to it being undefined behavior,
1658 	 * i.e. we end up advancing IP with some random value.
1659 	 */
1660 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1661 	    to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1662 		orig_rip = kvm_rip_read(vcpu);
1663 		rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1664 #ifdef CONFIG_X86_64
1665 		/*
1666 		 * We need to mask out the high 32 bits of RIP if not in 64-bit
1667 		 * mode, but just finding out that we are in 64-bit mode is
1668 		 * quite expensive.  Only do it if there was a carry.
1669 		 */
1670 		if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1671 			rip = (u32)rip;
1672 #endif
1673 		kvm_rip_write(vcpu, rip);
1674 	} else {
1675 		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1676 			return 0;
1677 	}
1678 
1679 	/* skipping an emulated instruction also counts */
1680 	vmx_set_interrupt_shadow(vcpu, 0);
1681 
1682 	return 1;
1683 }
1684 
1685 /*
1686  * Recognizes a pending MTF VM-exit and records the nested state for later
1687  * delivery.
1688  */
vmx_update_emulated_instruction(struct kvm_vcpu * vcpu)1689 static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1690 {
1691 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1692 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1693 
1694 	if (!is_guest_mode(vcpu))
1695 		return;
1696 
1697 	/*
1698 	 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1699 	 * T-bit traps. As instruction emulation is completed (i.e. at the
1700 	 * instruction boundary), any #DB exception pending delivery must be a
1701 	 * debug-trap. Record the pending MTF state to be delivered in
1702 	 * vmx_check_nested_events().
1703 	 */
1704 	if (nested_cpu_has_mtf(vmcs12) &&
1705 	    (!vcpu->arch.exception.pending ||
1706 	     vcpu->arch.exception.nr == DB_VECTOR))
1707 		vmx->nested.mtf_pending = true;
1708 	else
1709 		vmx->nested.mtf_pending = false;
1710 }
1711 
vmx_skip_emulated_instruction(struct kvm_vcpu * vcpu)1712 static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1713 {
1714 	vmx_update_emulated_instruction(vcpu);
1715 	return skip_emulated_instruction(vcpu);
1716 }
1717 
vmx_clear_hlt(struct kvm_vcpu * vcpu)1718 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1719 {
1720 	/*
1721 	 * Ensure that we clear the HLT state in the VMCS.  We don't need to
1722 	 * explicitly skip the instruction because if the HLT state is set,
1723 	 * then the instruction is already executing and RIP has already been
1724 	 * advanced.
1725 	 */
1726 	if (kvm_hlt_in_guest(vcpu->kvm) &&
1727 			vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1728 		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1729 }
1730 
vmx_queue_exception(struct kvm_vcpu * vcpu)1731 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
1732 {
1733 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1734 	unsigned nr = vcpu->arch.exception.nr;
1735 	bool has_error_code = vcpu->arch.exception.has_error_code;
1736 	u32 error_code = vcpu->arch.exception.error_code;
1737 	u32 intr_info = nr | INTR_INFO_VALID_MASK;
1738 
1739 	kvm_deliver_exception_payload(vcpu);
1740 
1741 	if (has_error_code) {
1742 		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1743 		intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1744 	}
1745 
1746 	if (vmx->rmode.vm86_active) {
1747 		int inc_eip = 0;
1748 		if (kvm_exception_is_soft(nr))
1749 			inc_eip = vcpu->arch.event_exit_inst_len;
1750 		kvm_inject_realmode_interrupt(vcpu, nr, inc_eip);
1751 		return;
1752 	}
1753 
1754 	WARN_ON_ONCE(vmx->emulation_required);
1755 
1756 	if (kvm_exception_is_soft(nr)) {
1757 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1758 			     vmx->vcpu.arch.event_exit_inst_len);
1759 		intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1760 	} else
1761 		intr_info |= INTR_TYPE_HARD_EXCEPTION;
1762 
1763 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1764 
1765 	vmx_clear_hlt(vcpu);
1766 }
1767 
vmx_setup_uret_msr(struct vcpu_vmx * vmx,unsigned int msr)1768 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
1769 {
1770 	struct vmx_uret_msr tmp;
1771 	int from, to;
1772 
1773 	from = __vmx_find_uret_msr(vmx, msr);
1774 	if (from < 0)
1775 		return;
1776 	to = vmx->nr_active_uret_msrs++;
1777 
1778 	tmp = vmx->guest_uret_msrs[to];
1779 	vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
1780 	vmx->guest_uret_msrs[from] = tmp;
1781 }
1782 
1783 /*
1784  * Set up the vmcs to automatically save and restore system
1785  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
1786  * mode, as fiddling with msrs is very expensive.
1787  */
setup_msrs(struct vcpu_vmx * vmx)1788 static void setup_msrs(struct vcpu_vmx *vmx)
1789 {
1790 	vmx->guest_uret_msrs_loaded = false;
1791 	vmx->nr_active_uret_msrs = 0;
1792 #ifdef CONFIG_X86_64
1793 	/*
1794 	 * The SYSCALL MSRs are only needed on long mode guests, and only
1795 	 * when EFER.SCE is set.
1796 	 */
1797 	if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
1798 		vmx_setup_uret_msr(vmx, MSR_STAR);
1799 		vmx_setup_uret_msr(vmx, MSR_LSTAR);
1800 		vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
1801 	}
1802 #endif
1803 	if (update_transition_efer(vmx))
1804 		vmx_setup_uret_msr(vmx, MSR_EFER);
1805 
1806 	if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
1807 		vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
1808 
1809 	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
1810 
1811 	if (cpu_has_vmx_msr_bitmap())
1812 		vmx_update_msr_bitmap(&vmx->vcpu);
1813 }
1814 
vmx_write_l1_tsc_offset(struct kvm_vcpu * vcpu,u64 offset)1815 static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1816 {
1817 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1818 	u64 g_tsc_offset = 0;
1819 
1820 	/*
1821 	 * We're here if L1 chose not to trap WRMSR to TSC. According
1822 	 * to the spec, this should set L1's TSC; The offset that L1
1823 	 * set for L2 remains unchanged, and still needs to be added
1824 	 * to the newly set TSC to get L2's TSC.
1825 	 */
1826 	if (is_guest_mode(vcpu) &&
1827 	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
1828 		g_tsc_offset = vmcs12->tsc_offset;
1829 
1830 	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1831 				   vcpu->arch.tsc_offset - g_tsc_offset,
1832 				   offset);
1833 	vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
1834 	return offset + g_tsc_offset;
1835 }
1836 
1837 /*
1838  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1839  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1840  * all guests if the "nested" module option is off, and can also be disabled
1841  * for a single guest by disabling its VMX cpuid bit.
1842  */
nested_vmx_allowed(struct kvm_vcpu * vcpu)1843 bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
1844 {
1845 	return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
1846 }
1847 
vmx_feature_control_msr_valid(struct kvm_vcpu * vcpu,uint64_t val)1848 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
1849 						 uint64_t val)
1850 {
1851 	uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
1852 
1853 	return !(val & ~valid_bits);
1854 }
1855 
vmx_get_msr_feature(struct kvm_msr_entry * msr)1856 static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
1857 {
1858 	switch (msr->index) {
1859 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
1860 		if (!nested)
1861 			return 1;
1862 		return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
1863 	case MSR_IA32_PERF_CAPABILITIES:
1864 		msr->data = vmx_get_perf_capabilities();
1865 		return 0;
1866 	default:
1867 		return KVM_MSR_RET_INVALID;
1868 	}
1869 }
1870 
1871 /*
1872  * Reads an msr value (of 'msr_index') into 'pdata'.
1873  * Returns 0 on success, non-0 otherwise.
1874  * Assumes vcpu_load() was already called.
1875  */
vmx_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)1876 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1877 {
1878 	struct vcpu_vmx *vmx = to_vmx(vcpu);
1879 	struct vmx_uret_msr *msr;
1880 	u32 index;
1881 
1882 	switch (msr_info->index) {
1883 #ifdef CONFIG_X86_64
1884 	case MSR_FS_BASE:
1885 		msr_info->data = vmcs_readl(GUEST_FS_BASE);
1886 		break;
1887 	case MSR_GS_BASE:
1888 		msr_info->data = vmcs_readl(GUEST_GS_BASE);
1889 		break;
1890 	case MSR_KERNEL_GS_BASE:
1891 		msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
1892 		break;
1893 #endif
1894 	case MSR_EFER:
1895 		return kvm_get_msr_common(vcpu, msr_info);
1896 	case MSR_IA32_TSX_CTRL:
1897 		if (!msr_info->host_initiated &&
1898 		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
1899 			return 1;
1900 		goto find_uret_msr;
1901 	case MSR_IA32_UMWAIT_CONTROL:
1902 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
1903 			return 1;
1904 
1905 		msr_info->data = vmx->msr_ia32_umwait_control;
1906 		break;
1907 	case MSR_IA32_SPEC_CTRL:
1908 		if (!msr_info->host_initiated &&
1909 		    !guest_has_spec_ctrl_msr(vcpu))
1910 			return 1;
1911 
1912 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
1913 		break;
1914 	case MSR_IA32_SYSENTER_CS:
1915 		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
1916 		break;
1917 	case MSR_IA32_SYSENTER_EIP:
1918 		msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
1919 		break;
1920 	case MSR_IA32_SYSENTER_ESP:
1921 		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
1922 		break;
1923 	case MSR_IA32_BNDCFGS:
1924 		if (!kvm_mpx_supported() ||
1925 		    (!msr_info->host_initiated &&
1926 		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
1927 			return 1;
1928 		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
1929 		break;
1930 	case MSR_IA32_MCG_EXT_CTL:
1931 		if (!msr_info->host_initiated &&
1932 		    !(vmx->msr_ia32_feature_control &
1933 		      FEAT_CTL_LMCE_ENABLED))
1934 			return 1;
1935 		msr_info->data = vcpu->arch.mcg_ext_ctl;
1936 		break;
1937 	case MSR_IA32_FEAT_CTL:
1938 		msr_info->data = vmx->msr_ia32_feature_control;
1939 		break;
1940 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
1941 		if (!nested_vmx_allowed(vcpu))
1942 			return 1;
1943 		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
1944 				    &msr_info->data))
1945 			return 1;
1946 		/*
1947 		 * Enlightened VMCS v1 doesn't have certain VMCS fields but
1948 		 * instead of just ignoring the features, different Hyper-V
1949 		 * versions are either trying to use them and fail or do some
1950 		 * sanity checking and refuse to boot. Filter all unsupported
1951 		 * features out.
1952 		 */
1953 		if (!msr_info->host_initiated &&
1954 		    vmx->nested.enlightened_vmcs_enabled)
1955 			nested_evmcs_filter_control_msr(msr_info->index,
1956 							&msr_info->data);
1957 		break;
1958 	case MSR_IA32_RTIT_CTL:
1959 		if (!vmx_pt_mode_is_host_guest())
1960 			return 1;
1961 		msr_info->data = vmx->pt_desc.guest.ctl;
1962 		break;
1963 	case MSR_IA32_RTIT_STATUS:
1964 		if (!vmx_pt_mode_is_host_guest())
1965 			return 1;
1966 		msr_info->data = vmx->pt_desc.guest.status;
1967 		break;
1968 	case MSR_IA32_RTIT_CR3_MATCH:
1969 		if (!vmx_pt_mode_is_host_guest() ||
1970 			!intel_pt_validate_cap(vmx->pt_desc.caps,
1971 						PT_CAP_cr3_filtering))
1972 			return 1;
1973 		msr_info->data = vmx->pt_desc.guest.cr3_match;
1974 		break;
1975 	case MSR_IA32_RTIT_OUTPUT_BASE:
1976 		if (!vmx_pt_mode_is_host_guest() ||
1977 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
1978 					PT_CAP_topa_output) &&
1979 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
1980 					PT_CAP_single_range_output)))
1981 			return 1;
1982 		msr_info->data = vmx->pt_desc.guest.output_base;
1983 		break;
1984 	case MSR_IA32_RTIT_OUTPUT_MASK:
1985 		if (!vmx_pt_mode_is_host_guest() ||
1986 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
1987 					PT_CAP_topa_output) &&
1988 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
1989 					PT_CAP_single_range_output)))
1990 			return 1;
1991 		msr_info->data = vmx->pt_desc.guest.output_mask;
1992 		break;
1993 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
1994 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
1995 		if (!vmx_pt_mode_is_host_guest() ||
1996 			(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
1997 					PT_CAP_num_address_ranges)))
1998 			return 1;
1999 		if (index % 2)
2000 			msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2001 		else
2002 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2003 		break;
2004 	case MSR_TSC_AUX:
2005 		if (!msr_info->host_initiated &&
2006 		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
2007 			return 1;
2008 		goto find_uret_msr;
2009 	default:
2010 	find_uret_msr:
2011 		msr = vmx_find_uret_msr(vmx, msr_info->index);
2012 		if (msr) {
2013 			msr_info->data = msr->data;
2014 			break;
2015 		}
2016 		return kvm_get_msr_common(vcpu, msr_info);
2017 	}
2018 
2019 	return 0;
2020 }
2021 
nested_vmx_truncate_sysenter_addr(struct kvm_vcpu * vcpu,u64 data)2022 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2023 						    u64 data)
2024 {
2025 #ifdef CONFIG_X86_64
2026 	if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
2027 		return (u32)data;
2028 #endif
2029 	return (unsigned long)data;
2030 }
2031 
2032 /*
2033  * Writes msr value into the appropriate "register".
2034  * Returns 0 on success, non-0 otherwise.
2035  * Assumes vcpu_load() was already called.
2036  */
vmx_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2037 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2038 {
2039 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2040 	struct vmx_uret_msr *msr;
2041 	int ret = 0;
2042 	u32 msr_index = msr_info->index;
2043 	u64 data = msr_info->data;
2044 	u32 index;
2045 
2046 	switch (msr_index) {
2047 	case MSR_EFER:
2048 		ret = kvm_set_msr_common(vcpu, msr_info);
2049 		break;
2050 #ifdef CONFIG_X86_64
2051 	case MSR_FS_BASE:
2052 		vmx_segment_cache_clear(vmx);
2053 		vmcs_writel(GUEST_FS_BASE, data);
2054 		break;
2055 	case MSR_GS_BASE:
2056 		vmx_segment_cache_clear(vmx);
2057 		vmcs_writel(GUEST_GS_BASE, data);
2058 		break;
2059 	case MSR_KERNEL_GS_BASE:
2060 		vmx_write_guest_kernel_gs_base(vmx, data);
2061 		break;
2062 #endif
2063 	case MSR_IA32_SYSENTER_CS:
2064 		if (is_guest_mode(vcpu))
2065 			get_vmcs12(vcpu)->guest_sysenter_cs = data;
2066 		vmcs_write32(GUEST_SYSENTER_CS, data);
2067 		break;
2068 	case MSR_IA32_SYSENTER_EIP:
2069 		if (is_guest_mode(vcpu)) {
2070 			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2071 			get_vmcs12(vcpu)->guest_sysenter_eip = data;
2072 		}
2073 		vmcs_writel(GUEST_SYSENTER_EIP, data);
2074 		break;
2075 	case MSR_IA32_SYSENTER_ESP:
2076 		if (is_guest_mode(vcpu)) {
2077 			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2078 			get_vmcs12(vcpu)->guest_sysenter_esp = data;
2079 		}
2080 		vmcs_writel(GUEST_SYSENTER_ESP, data);
2081 		break;
2082 	case MSR_IA32_DEBUGCTLMSR:
2083 		if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2084 						VM_EXIT_SAVE_DEBUG_CONTROLS)
2085 			get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2086 
2087 		ret = kvm_set_msr_common(vcpu, msr_info);
2088 		break;
2089 
2090 	case MSR_IA32_BNDCFGS:
2091 		if (!kvm_mpx_supported() ||
2092 		    (!msr_info->host_initiated &&
2093 		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2094 			return 1;
2095 		if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
2096 		    (data & MSR_IA32_BNDCFGS_RSVD))
2097 			return 1;
2098 		vmcs_write64(GUEST_BNDCFGS, data);
2099 		break;
2100 	case MSR_IA32_UMWAIT_CONTROL:
2101 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2102 			return 1;
2103 
2104 		/* The reserved bit 1 and non-32 bit [63:32] should be zero */
2105 		if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2106 			return 1;
2107 
2108 		vmx->msr_ia32_umwait_control = data;
2109 		break;
2110 	case MSR_IA32_SPEC_CTRL:
2111 		if (!msr_info->host_initiated &&
2112 		    !guest_has_spec_ctrl_msr(vcpu))
2113 			return 1;
2114 
2115 		if (kvm_spec_ctrl_test_value(data))
2116 			return 1;
2117 
2118 		vmx->spec_ctrl = data;
2119 		if (!data)
2120 			break;
2121 
2122 		/*
2123 		 * For non-nested:
2124 		 * When it's written (to non-zero) for the first time, pass
2125 		 * it through.
2126 		 *
2127 		 * For nested:
2128 		 * The handling of the MSR bitmap for L2 guests is done in
2129 		 * nested_vmx_prepare_msr_bitmap. We should not touch the
2130 		 * vmcs02.msr_bitmap here since it gets completely overwritten
2131 		 * in the merging. We update the vmcs01 here for L1 as well
2132 		 * since it will end up touching the MSR anyway now.
2133 		 */
2134 		vmx_disable_intercept_for_msr(vcpu,
2135 					      MSR_IA32_SPEC_CTRL,
2136 					      MSR_TYPE_RW);
2137 		break;
2138 	case MSR_IA32_TSX_CTRL:
2139 		if (!msr_info->host_initiated &&
2140 		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2141 			return 1;
2142 		if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2143 			return 1;
2144 		goto find_uret_msr;
2145 	case MSR_IA32_PRED_CMD:
2146 		if (!msr_info->host_initiated &&
2147 		    !guest_has_pred_cmd_msr(vcpu))
2148 			return 1;
2149 
2150 		if (data & ~PRED_CMD_IBPB)
2151 			return 1;
2152 		if (!boot_cpu_has(X86_FEATURE_IBPB))
2153 			return 1;
2154 		if (!data)
2155 			break;
2156 
2157 		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
2158 
2159 		/*
2160 		 * For non-nested:
2161 		 * When it's written (to non-zero) for the first time, pass
2162 		 * it through.
2163 		 *
2164 		 * For nested:
2165 		 * The handling of the MSR bitmap for L2 guests is done in
2166 		 * nested_vmx_prepare_msr_bitmap. We should not touch the
2167 		 * vmcs02.msr_bitmap here since it gets completely overwritten
2168 		 * in the merging.
2169 		 */
2170 		vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W);
2171 		break;
2172 	case MSR_IA32_CR_PAT:
2173 		if (!kvm_pat_valid(data))
2174 			return 1;
2175 
2176 		if (is_guest_mode(vcpu) &&
2177 		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2178 			get_vmcs12(vcpu)->guest_ia32_pat = data;
2179 
2180 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2181 			vmcs_write64(GUEST_IA32_PAT, data);
2182 			vcpu->arch.pat = data;
2183 			break;
2184 		}
2185 		ret = kvm_set_msr_common(vcpu, msr_info);
2186 		break;
2187 	case MSR_IA32_TSC_ADJUST:
2188 		ret = kvm_set_msr_common(vcpu, msr_info);
2189 		break;
2190 	case MSR_IA32_MCG_EXT_CTL:
2191 		if ((!msr_info->host_initiated &&
2192 		     !(to_vmx(vcpu)->msr_ia32_feature_control &
2193 		       FEAT_CTL_LMCE_ENABLED)) ||
2194 		    (data & ~MCG_EXT_CTL_LMCE_EN))
2195 			return 1;
2196 		vcpu->arch.mcg_ext_ctl = data;
2197 		break;
2198 	case MSR_IA32_FEAT_CTL:
2199 		if (!vmx_feature_control_msr_valid(vcpu, data) ||
2200 		    (to_vmx(vcpu)->msr_ia32_feature_control &
2201 		     FEAT_CTL_LOCKED && !msr_info->host_initiated))
2202 			return 1;
2203 		vmx->msr_ia32_feature_control = data;
2204 		if (msr_info->host_initiated && data == 0)
2205 			vmx_leave_nested(vcpu);
2206 		break;
2207 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2208 		if (!msr_info->host_initiated)
2209 			return 1; /* they are read-only */
2210 		if (!nested_vmx_allowed(vcpu))
2211 			return 1;
2212 		return vmx_set_vmx_msr(vcpu, msr_index, data);
2213 	case MSR_IA32_RTIT_CTL:
2214 		if (!vmx_pt_mode_is_host_guest() ||
2215 			vmx_rtit_ctl_check(vcpu, data) ||
2216 			vmx->nested.vmxon)
2217 			return 1;
2218 		vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2219 		vmx->pt_desc.guest.ctl = data;
2220 		pt_update_intercept_for_msr(vcpu);
2221 		break;
2222 	case MSR_IA32_RTIT_STATUS:
2223 		if (!pt_can_write_msr(vmx))
2224 			return 1;
2225 		if (data & MSR_IA32_RTIT_STATUS_MASK)
2226 			return 1;
2227 		vmx->pt_desc.guest.status = data;
2228 		break;
2229 	case MSR_IA32_RTIT_CR3_MATCH:
2230 		if (!pt_can_write_msr(vmx))
2231 			return 1;
2232 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2233 					   PT_CAP_cr3_filtering))
2234 			return 1;
2235 		vmx->pt_desc.guest.cr3_match = data;
2236 		break;
2237 	case MSR_IA32_RTIT_OUTPUT_BASE:
2238 		if (!pt_can_write_msr(vmx))
2239 			return 1;
2240 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2241 					   PT_CAP_topa_output) &&
2242 		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2243 					   PT_CAP_single_range_output))
2244 			return 1;
2245 		if (!pt_output_base_valid(vcpu, data))
2246 			return 1;
2247 		vmx->pt_desc.guest.output_base = data;
2248 		break;
2249 	case MSR_IA32_RTIT_OUTPUT_MASK:
2250 		if (!pt_can_write_msr(vmx))
2251 			return 1;
2252 		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2253 					   PT_CAP_topa_output) &&
2254 		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2255 					   PT_CAP_single_range_output))
2256 			return 1;
2257 		vmx->pt_desc.guest.output_mask = data;
2258 		break;
2259 	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2260 		if (!pt_can_write_msr(vmx))
2261 			return 1;
2262 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2263 		if (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
2264 						       PT_CAP_num_address_ranges))
2265 			return 1;
2266 		if (is_noncanonical_address(data, vcpu))
2267 			return 1;
2268 		if (index % 2)
2269 			vmx->pt_desc.guest.addr_b[index / 2] = data;
2270 		else
2271 			vmx->pt_desc.guest.addr_a[index / 2] = data;
2272 		break;
2273 	case MSR_TSC_AUX:
2274 		if (!msr_info->host_initiated &&
2275 		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
2276 			return 1;
2277 		/* Check reserved bit, higher 32 bits should be zero */
2278 		if ((data >> 32) != 0)
2279 			return 1;
2280 		goto find_uret_msr;
2281 
2282 	default:
2283 	find_uret_msr:
2284 		msr = vmx_find_uret_msr(vmx, msr_index);
2285 		if (msr)
2286 			ret = vmx_set_guest_uret_msr(vmx, msr, data);
2287 		else
2288 			ret = kvm_set_msr_common(vcpu, msr_info);
2289 	}
2290 
2291 	/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2292 	if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2293 		vmx_update_fb_clear_dis(vcpu, vmx);
2294 
2295 	return ret;
2296 }
2297 
vmx_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)2298 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2299 {
2300 	unsigned long guest_owned_bits;
2301 
2302 	kvm_register_mark_available(vcpu, reg);
2303 
2304 	switch (reg) {
2305 	case VCPU_REGS_RSP:
2306 		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2307 		break;
2308 	case VCPU_REGS_RIP:
2309 		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2310 		break;
2311 	case VCPU_EXREG_PDPTR:
2312 		if (enable_ept)
2313 			ept_save_pdptrs(vcpu);
2314 		break;
2315 	case VCPU_EXREG_CR0:
2316 		guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2317 
2318 		vcpu->arch.cr0 &= ~guest_owned_bits;
2319 		vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2320 		break;
2321 	case VCPU_EXREG_CR3:
2322 		if (is_unrestricted_guest(vcpu) ||
2323 		    (enable_ept && is_paging(vcpu)))
2324 			vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2325 		break;
2326 	case VCPU_EXREG_CR4:
2327 		guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2328 
2329 		vcpu->arch.cr4 &= ~guest_owned_bits;
2330 		vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2331 		break;
2332 	default:
2333 		WARN_ON_ONCE(1);
2334 		break;
2335 	}
2336 }
2337 
cpu_has_kvm_support(void)2338 static __init int cpu_has_kvm_support(void)
2339 {
2340 	return cpu_has_vmx();
2341 }
2342 
vmx_disabled_by_bios(void)2343 static __init int vmx_disabled_by_bios(void)
2344 {
2345 	return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2346 	       !boot_cpu_has(X86_FEATURE_VMX);
2347 }
2348 
kvm_cpu_vmxon(u64 vmxon_pointer)2349 static int kvm_cpu_vmxon(u64 vmxon_pointer)
2350 {
2351 	u64 msr;
2352 
2353 	cr4_set_bits(X86_CR4_VMXE);
2354 	intel_pt_handle_vmx(1);
2355 
2356 	asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
2357 			  _ASM_EXTABLE(1b, %l[fault])
2358 			  : : [vmxon_pointer] "m"(vmxon_pointer)
2359 			  : : fault);
2360 	return 0;
2361 
2362 fault:
2363 	WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2364 		  rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
2365 	intel_pt_handle_vmx(0);
2366 	cr4_clear_bits(X86_CR4_VMXE);
2367 
2368 	return -EFAULT;
2369 }
2370 
hardware_enable(void)2371 static int hardware_enable(void)
2372 {
2373 	int cpu = raw_smp_processor_id();
2374 	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2375 	int r;
2376 
2377 	if (cr4_read_shadow() & X86_CR4_VMXE)
2378 		return -EBUSY;
2379 
2380 	/*
2381 	 * This can happen if we hot-added a CPU but failed to allocate
2382 	 * VP assist page for it.
2383 	 */
2384 	if (static_branch_unlikely(&enable_evmcs) &&
2385 	    !hv_get_vp_assist_page(cpu))
2386 		return -EFAULT;
2387 
2388 	r = kvm_cpu_vmxon(phys_addr);
2389 	if (r)
2390 		return r;
2391 
2392 	if (enable_ept)
2393 		ept_sync_global();
2394 
2395 	return 0;
2396 }
2397 
vmclear_local_loaded_vmcss(void)2398 static void vmclear_local_loaded_vmcss(void)
2399 {
2400 	int cpu = raw_smp_processor_id();
2401 	struct loaded_vmcs *v, *n;
2402 
2403 	list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2404 				 loaded_vmcss_on_cpu_link)
2405 		__loaded_vmcs_clear(v);
2406 }
2407 
2408 
2409 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2410  * tricks.
2411  */
kvm_cpu_vmxoff(void)2412 static void kvm_cpu_vmxoff(void)
2413 {
2414 	asm volatile (__ex("vmxoff"));
2415 
2416 	intel_pt_handle_vmx(0);
2417 	cr4_clear_bits(X86_CR4_VMXE);
2418 }
2419 
hardware_disable(void)2420 static void hardware_disable(void)
2421 {
2422 	vmclear_local_loaded_vmcss();
2423 	kvm_cpu_vmxoff();
2424 }
2425 
2426 /*
2427  * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2428  * directly instead of going through cpu_has(), to ensure KVM is trapping
2429  * ENCLS whenever it's supported in hardware.  It does not matter whether
2430  * the host OS supports or has enabled SGX.
2431  */
cpu_has_sgx(void)2432 static bool cpu_has_sgx(void)
2433 {
2434 	return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2435 }
2436 
adjust_vmx_controls(u32 ctl_min,u32 ctl_opt,u32 msr,u32 * result)2437 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2438 				      u32 msr, u32 *result)
2439 {
2440 	u32 vmx_msr_low, vmx_msr_high;
2441 	u32 ctl = ctl_min | ctl_opt;
2442 
2443 	rdmsr(msr, vmx_msr_low, vmx_msr_high);
2444 
2445 	ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2446 	ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2447 
2448 	/* Ensure minimum (required) set of control bits are supported. */
2449 	if (ctl_min & ~ctl)
2450 		return -EIO;
2451 
2452 	*result = ctl;
2453 	return 0;
2454 }
2455 
setup_vmcs_config(struct vmcs_config * vmcs_conf,struct vmx_capability * vmx_cap)2456 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2457 				    struct vmx_capability *vmx_cap)
2458 {
2459 	u32 vmx_msr_low, vmx_msr_high;
2460 	u32 min, opt, min2, opt2;
2461 	u32 _pin_based_exec_control = 0;
2462 	u32 _cpu_based_exec_control = 0;
2463 	u32 _cpu_based_2nd_exec_control = 0;
2464 	u32 _vmexit_control = 0;
2465 	u32 _vmentry_control = 0;
2466 
2467 	memset(vmcs_conf, 0, sizeof(*vmcs_conf));
2468 	min = CPU_BASED_HLT_EXITING |
2469 #ifdef CONFIG_X86_64
2470 	      CPU_BASED_CR8_LOAD_EXITING |
2471 	      CPU_BASED_CR8_STORE_EXITING |
2472 #endif
2473 	      CPU_BASED_CR3_LOAD_EXITING |
2474 	      CPU_BASED_CR3_STORE_EXITING |
2475 	      CPU_BASED_UNCOND_IO_EXITING |
2476 	      CPU_BASED_MOV_DR_EXITING |
2477 	      CPU_BASED_USE_TSC_OFFSETTING |
2478 	      CPU_BASED_MWAIT_EXITING |
2479 	      CPU_BASED_MONITOR_EXITING |
2480 	      CPU_BASED_INVLPG_EXITING |
2481 	      CPU_BASED_RDPMC_EXITING;
2482 
2483 	opt = CPU_BASED_TPR_SHADOW |
2484 	      CPU_BASED_USE_MSR_BITMAPS |
2485 	      CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2486 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2487 				&_cpu_based_exec_control) < 0)
2488 		return -EIO;
2489 #ifdef CONFIG_X86_64
2490 	if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2491 		_cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2492 					   ~CPU_BASED_CR8_STORE_EXITING;
2493 #endif
2494 	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2495 		min2 = 0;
2496 		opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2497 			SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2498 			SECONDARY_EXEC_WBINVD_EXITING |
2499 			SECONDARY_EXEC_ENABLE_VPID |
2500 			SECONDARY_EXEC_ENABLE_EPT |
2501 			SECONDARY_EXEC_UNRESTRICTED_GUEST |
2502 			SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2503 			SECONDARY_EXEC_DESC |
2504 			SECONDARY_EXEC_ENABLE_RDTSCP |
2505 			SECONDARY_EXEC_ENABLE_INVPCID |
2506 			SECONDARY_EXEC_APIC_REGISTER_VIRT |
2507 			SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2508 			SECONDARY_EXEC_SHADOW_VMCS |
2509 			SECONDARY_EXEC_XSAVES |
2510 			SECONDARY_EXEC_RDSEED_EXITING |
2511 			SECONDARY_EXEC_RDRAND_EXITING |
2512 			SECONDARY_EXEC_ENABLE_PML |
2513 			SECONDARY_EXEC_TSC_SCALING |
2514 			SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2515 			SECONDARY_EXEC_PT_USE_GPA |
2516 			SECONDARY_EXEC_PT_CONCEAL_VMX |
2517 			SECONDARY_EXEC_ENABLE_VMFUNC;
2518 		if (cpu_has_sgx())
2519 			opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
2520 		if (adjust_vmx_controls(min2, opt2,
2521 					MSR_IA32_VMX_PROCBASED_CTLS2,
2522 					&_cpu_based_2nd_exec_control) < 0)
2523 			return -EIO;
2524 	}
2525 #ifndef CONFIG_X86_64
2526 	if (!(_cpu_based_2nd_exec_control &
2527 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2528 		_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2529 #endif
2530 
2531 	if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2532 		_cpu_based_2nd_exec_control &= ~(
2533 				SECONDARY_EXEC_APIC_REGISTER_VIRT |
2534 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2535 				SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2536 
2537 	rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2538 		&vmx_cap->ept, &vmx_cap->vpid);
2539 
2540 	if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2541 		/* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2542 		   enabled */
2543 		_cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2544 					     CPU_BASED_CR3_STORE_EXITING |
2545 					     CPU_BASED_INVLPG_EXITING);
2546 	} else if (vmx_cap->ept) {
2547 		vmx_cap->ept = 0;
2548 		pr_warn_once("EPT CAP should not exist if not support "
2549 				"1-setting enable EPT VM-execution control\n");
2550 	}
2551 	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2552 		vmx_cap->vpid) {
2553 		vmx_cap->vpid = 0;
2554 		pr_warn_once("VPID CAP should not exist if not support "
2555 				"1-setting enable VPID VM-execution control\n");
2556 	}
2557 
2558 	min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
2559 #ifdef CONFIG_X86_64
2560 	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2561 #endif
2562 	opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2563 	      VM_EXIT_LOAD_IA32_PAT |
2564 	      VM_EXIT_LOAD_IA32_EFER |
2565 	      VM_EXIT_CLEAR_BNDCFGS |
2566 	      VM_EXIT_PT_CONCEAL_PIP |
2567 	      VM_EXIT_CLEAR_IA32_RTIT_CTL;
2568 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2569 				&_vmexit_control) < 0)
2570 		return -EIO;
2571 
2572 	min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2573 	opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
2574 		 PIN_BASED_VMX_PREEMPTION_TIMER;
2575 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2576 				&_pin_based_exec_control) < 0)
2577 		return -EIO;
2578 
2579 	if (cpu_has_broken_vmx_preemption_timer())
2580 		_pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2581 	if (!(_cpu_based_2nd_exec_control &
2582 		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2583 		_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2584 
2585 	min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
2586 	opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
2587 	      VM_ENTRY_LOAD_IA32_PAT |
2588 	      VM_ENTRY_LOAD_IA32_EFER |
2589 	      VM_ENTRY_LOAD_BNDCFGS |
2590 	      VM_ENTRY_PT_CONCEAL_PIP |
2591 	      VM_ENTRY_LOAD_IA32_RTIT_CTL;
2592 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2593 				&_vmentry_control) < 0)
2594 		return -EIO;
2595 
2596 	/*
2597 	 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2598 	 * can't be used due to an errata where VM Exit may incorrectly clear
2599 	 * IA32_PERF_GLOBAL_CTRL[34:32].  Workaround the errata by using the
2600 	 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2601 	 */
2602 	if (boot_cpu_data.x86 == 0x6) {
2603 		switch (boot_cpu_data.x86_model) {
2604 		case 26: /* AAK155 */
2605 		case 30: /* AAP115 */
2606 		case 37: /* AAT100 */
2607 		case 44: /* BC86,AAY89,BD102 */
2608 		case 46: /* BA97 */
2609 			_vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
2610 			_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
2611 			pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2612 					"does not work properly. Using workaround\n");
2613 			break;
2614 		default:
2615 			break;
2616 		}
2617 	}
2618 
2619 
2620 	rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2621 
2622 	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2623 	if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2624 		return -EIO;
2625 
2626 #ifdef CONFIG_X86_64
2627 	/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2628 	if (vmx_msr_high & (1u<<16))
2629 		return -EIO;
2630 #endif
2631 
2632 	/* Require Write-Back (WB) memory type for VMCS accesses. */
2633 	if (((vmx_msr_high >> 18) & 15) != 6)
2634 		return -EIO;
2635 
2636 	vmcs_conf->size = vmx_msr_high & 0x1fff;
2637 	vmcs_conf->order = get_order(vmcs_conf->size);
2638 	vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
2639 
2640 	vmcs_conf->revision_id = vmx_msr_low;
2641 
2642 	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2643 	vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2644 	vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2645 	vmcs_conf->vmexit_ctrl         = _vmexit_control;
2646 	vmcs_conf->vmentry_ctrl        = _vmentry_control;
2647 
2648 #if IS_ENABLED(CONFIG_HYPERV)
2649 	if (enlightened_vmcs)
2650 		evmcs_sanitize_exec_ctrls(vmcs_conf);
2651 #endif
2652 
2653 	return 0;
2654 }
2655 
alloc_vmcs_cpu(bool shadow,int cpu,gfp_t flags)2656 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
2657 {
2658 	int node = cpu_to_node(cpu);
2659 	struct page *pages;
2660 	struct vmcs *vmcs;
2661 
2662 	pages = __alloc_pages_node(node, flags, vmcs_config.order);
2663 	if (!pages)
2664 		return NULL;
2665 	vmcs = page_address(pages);
2666 	memset(vmcs, 0, vmcs_config.size);
2667 
2668 	/* KVM supports Enlightened VMCS v1 only */
2669 	if (static_branch_unlikely(&enable_evmcs))
2670 		vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
2671 	else
2672 		vmcs->hdr.revision_id = vmcs_config.revision_id;
2673 
2674 	if (shadow)
2675 		vmcs->hdr.shadow_vmcs = 1;
2676 	return vmcs;
2677 }
2678 
free_vmcs(struct vmcs * vmcs)2679 void free_vmcs(struct vmcs *vmcs)
2680 {
2681 	free_pages((unsigned long)vmcs, vmcs_config.order);
2682 }
2683 
2684 /*
2685  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2686  */
free_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)2687 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2688 {
2689 	if (!loaded_vmcs->vmcs)
2690 		return;
2691 	loaded_vmcs_clear(loaded_vmcs);
2692 	free_vmcs(loaded_vmcs->vmcs);
2693 	loaded_vmcs->vmcs = NULL;
2694 	if (loaded_vmcs->msr_bitmap)
2695 		free_page((unsigned long)loaded_vmcs->msr_bitmap);
2696 	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
2697 }
2698 
alloc_loaded_vmcs(struct loaded_vmcs * loaded_vmcs)2699 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2700 {
2701 	loaded_vmcs->vmcs = alloc_vmcs(false);
2702 	if (!loaded_vmcs->vmcs)
2703 		return -ENOMEM;
2704 
2705 	vmcs_clear(loaded_vmcs->vmcs);
2706 
2707 	loaded_vmcs->shadow_vmcs = NULL;
2708 	loaded_vmcs->hv_timer_soft_disabled = false;
2709 	loaded_vmcs->cpu = -1;
2710 	loaded_vmcs->launched = 0;
2711 
2712 	if (cpu_has_vmx_msr_bitmap()) {
2713 		loaded_vmcs->msr_bitmap = (unsigned long *)
2714 				__get_free_page(GFP_KERNEL_ACCOUNT);
2715 		if (!loaded_vmcs->msr_bitmap)
2716 			goto out_vmcs;
2717 		memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
2718 
2719 		if (IS_ENABLED(CONFIG_HYPERV) &&
2720 		    static_branch_unlikely(&enable_evmcs) &&
2721 		    (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
2722 			struct hv_enlightened_vmcs *evmcs =
2723 				(struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
2724 
2725 			evmcs->hv_enlightenments_control.msr_bitmap = 1;
2726 		}
2727 	}
2728 
2729 	memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2730 	memset(&loaded_vmcs->controls_shadow, 0,
2731 		sizeof(struct vmcs_controls_shadow));
2732 
2733 	return 0;
2734 
2735 out_vmcs:
2736 	free_loaded_vmcs(loaded_vmcs);
2737 	return -ENOMEM;
2738 }
2739 
free_kvm_area(void)2740 static void free_kvm_area(void)
2741 {
2742 	int cpu;
2743 
2744 	for_each_possible_cpu(cpu) {
2745 		free_vmcs(per_cpu(vmxarea, cpu));
2746 		per_cpu(vmxarea, cpu) = NULL;
2747 	}
2748 }
2749 
alloc_kvm_area(void)2750 static __init int alloc_kvm_area(void)
2751 {
2752 	int cpu;
2753 
2754 	for_each_possible_cpu(cpu) {
2755 		struct vmcs *vmcs;
2756 
2757 		vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
2758 		if (!vmcs) {
2759 			free_kvm_area();
2760 			return -ENOMEM;
2761 		}
2762 
2763 		/*
2764 		 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2765 		 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2766 		 * revision_id reported by MSR_IA32_VMX_BASIC.
2767 		 *
2768 		 * However, even though not explicitly documented by
2769 		 * TLFS, VMXArea passed as VMXON argument should
2770 		 * still be marked with revision_id reported by
2771 		 * physical CPU.
2772 		 */
2773 		if (static_branch_unlikely(&enable_evmcs))
2774 			vmcs->hdr.revision_id = vmcs_config.revision_id;
2775 
2776 		per_cpu(vmxarea, cpu) = vmcs;
2777 	}
2778 	return 0;
2779 }
2780 
fix_pmode_seg(struct kvm_vcpu * vcpu,int seg,struct kvm_segment * save)2781 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
2782 		struct kvm_segment *save)
2783 {
2784 	if (!emulate_invalid_guest_state) {
2785 		/*
2786 		 * CS and SS RPL should be equal during guest entry according
2787 		 * to VMX spec, but in reality it is not always so. Since vcpu
2788 		 * is in the middle of the transition from real mode to
2789 		 * protected mode it is safe to assume that RPL 0 is a good
2790 		 * default value.
2791 		 */
2792 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
2793 			save->selector &= ~SEGMENT_RPL_MASK;
2794 		save->dpl = save->selector & SEGMENT_RPL_MASK;
2795 		save->s = 1;
2796 	}
2797 	vmx_set_segment(vcpu, save, seg);
2798 }
2799 
enter_pmode(struct kvm_vcpu * vcpu)2800 static void enter_pmode(struct kvm_vcpu *vcpu)
2801 {
2802 	unsigned long flags;
2803 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2804 
2805 	/*
2806 	 * Update real mode segment cache. It may be not up-to-date if sement
2807 	 * register was written while vcpu was in a guest mode.
2808 	 */
2809 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2810 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2811 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2812 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2813 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2814 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2815 
2816 	vmx->rmode.vm86_active = 0;
2817 
2818 	vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2819 
2820 	flags = vmcs_readl(GUEST_RFLAGS);
2821 	flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2822 	flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2823 	vmcs_writel(GUEST_RFLAGS, flags);
2824 
2825 	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
2826 			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
2827 
2828 	update_exception_bitmap(vcpu);
2829 
2830 	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2831 	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2832 	fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2833 	fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2834 	fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2835 	fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2836 }
2837 
fix_rmode_seg(int seg,struct kvm_segment * save)2838 static void fix_rmode_seg(int seg, struct kvm_segment *save)
2839 {
2840 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2841 	struct kvm_segment var = *save;
2842 
2843 	var.dpl = 0x3;
2844 	if (seg == VCPU_SREG_CS)
2845 		var.type = 0x3;
2846 
2847 	if (!emulate_invalid_guest_state) {
2848 		var.selector = var.base >> 4;
2849 		var.base = var.base & 0xffff0;
2850 		var.limit = 0xffff;
2851 		var.g = 0;
2852 		var.db = 0;
2853 		var.present = 1;
2854 		var.s = 1;
2855 		var.l = 0;
2856 		var.unusable = 0;
2857 		var.type = 0x3;
2858 		var.avl = 0;
2859 		if (save->base & 0xf)
2860 			printk_once(KERN_WARNING "kvm: segment base is not "
2861 					"paragraph aligned when entering "
2862 					"protected mode (seg=%d)", seg);
2863 	}
2864 
2865 	vmcs_write16(sf->selector, var.selector);
2866 	vmcs_writel(sf->base, var.base);
2867 	vmcs_write32(sf->limit, var.limit);
2868 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
2869 }
2870 
enter_rmode(struct kvm_vcpu * vcpu)2871 static void enter_rmode(struct kvm_vcpu *vcpu)
2872 {
2873 	unsigned long flags;
2874 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2875 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
2876 
2877 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2878 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2879 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2880 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2881 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2882 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2883 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2884 
2885 	vmx->rmode.vm86_active = 1;
2886 
2887 	/*
2888 	 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
2889 	 * vcpu. Warn the user that an update is overdue.
2890 	 */
2891 	if (!kvm_vmx->tss_addr)
2892 		printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
2893 			     "called before entering vcpu\n");
2894 
2895 	vmx_segment_cache_clear(vmx);
2896 
2897 	vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
2898 	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
2899 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2900 
2901 	flags = vmcs_readl(GUEST_RFLAGS);
2902 	vmx->rmode.save_rflags = flags;
2903 
2904 	flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2905 
2906 	vmcs_writel(GUEST_RFLAGS, flags);
2907 	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
2908 	update_exception_bitmap(vcpu);
2909 
2910 	fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2911 	fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2912 	fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2913 	fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2914 	fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2915 	fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2916 
2917 	kvm_mmu_reset_context(vcpu);
2918 }
2919 
vmx_set_efer(struct kvm_vcpu * vcpu,u64 efer)2920 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
2921 {
2922 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2923 	struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
2924 
2925 	/* Nothing to do if hardware doesn't support EFER. */
2926 	if (!msr)
2927 		return 0;
2928 
2929 	vcpu->arch.efer = efer;
2930 	if (efer & EFER_LMA) {
2931 		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
2932 		msr->data = efer;
2933 	} else {
2934 		vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
2935 
2936 		msr->data = efer & ~EFER_LME;
2937 	}
2938 	setup_msrs(vmx);
2939 	return 0;
2940 }
2941 
2942 #ifdef CONFIG_X86_64
2943 
enter_lmode(struct kvm_vcpu * vcpu)2944 static void enter_lmode(struct kvm_vcpu *vcpu)
2945 {
2946 	u32 guest_tr_ar;
2947 
2948 	vmx_segment_cache_clear(to_vmx(vcpu));
2949 
2950 	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
2951 	if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
2952 		pr_debug_ratelimited("%s: tss fixup for long mode. \n",
2953 				     __func__);
2954 		vmcs_write32(GUEST_TR_AR_BYTES,
2955 			     (guest_tr_ar & ~VMX_AR_TYPE_MASK)
2956 			     | VMX_AR_TYPE_BUSY_64_TSS);
2957 	}
2958 	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
2959 }
2960 
exit_lmode(struct kvm_vcpu * vcpu)2961 static void exit_lmode(struct kvm_vcpu *vcpu)
2962 {
2963 	vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
2964 	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
2965 }
2966 
2967 #endif
2968 
vmx_flush_tlb_all(struct kvm_vcpu * vcpu)2969 static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
2970 {
2971 	struct vcpu_vmx *vmx = to_vmx(vcpu);
2972 
2973 	/*
2974 	 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
2975 	 * the CPU is not required to invalidate guest-physical mappings on
2976 	 * VM-Entry, even if VPID is disabled.  Guest-physical mappings are
2977 	 * associated with the root EPT structure and not any particular VPID
2978 	 * (INVVPID also isn't required to invalidate guest-physical mappings).
2979 	 */
2980 	if (enable_ept) {
2981 		ept_sync_global();
2982 	} else if (enable_vpid) {
2983 		if (cpu_has_vmx_invvpid_global()) {
2984 			vpid_sync_vcpu_global();
2985 		} else {
2986 			vpid_sync_vcpu_single(vmx->vpid);
2987 			vpid_sync_vcpu_single(vmx->nested.vpid02);
2988 		}
2989 	}
2990 }
2991 
vmx_flush_tlb_current(struct kvm_vcpu * vcpu)2992 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
2993 {
2994 	struct kvm_mmu *mmu = vcpu->arch.mmu;
2995 	u64 root_hpa = mmu->root_hpa;
2996 
2997 	/* No flush required if the current context is invalid. */
2998 	if (!VALID_PAGE(root_hpa))
2999 		return;
3000 
3001 	if (enable_ept)
3002 		ept_sync_context(construct_eptp(vcpu, root_hpa,
3003 						mmu->shadow_root_level));
3004 	else if (!is_guest_mode(vcpu))
3005 		vpid_sync_context(to_vmx(vcpu)->vpid);
3006 	else
3007 		vpid_sync_context(nested_get_vpid02(vcpu));
3008 }
3009 
vmx_flush_tlb_gva(struct kvm_vcpu * vcpu,gva_t addr)3010 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3011 {
3012 	/*
3013 	 * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
3014 	 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3015 	 */
3016 	vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
3017 }
3018 
vmx_flush_tlb_guest(struct kvm_vcpu * vcpu)3019 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3020 {
3021 	/*
3022 	 * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
3023 	 * or a vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit
3024 	 * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3025 	 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3026 	 * i.e. no explicit INVVPID is necessary.
3027 	 */
3028 	vpid_sync_context(to_vmx(vcpu)->vpid);
3029 }
3030 
vmx_ept_load_pdptrs(struct kvm_vcpu * vcpu)3031 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3032 {
3033 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3034 
3035 	if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3036 		return;
3037 
3038 	if (is_pae_paging(vcpu)) {
3039 		vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3040 		vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3041 		vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3042 		vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3043 	}
3044 }
3045 
ept_save_pdptrs(struct kvm_vcpu * vcpu)3046 void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3047 {
3048 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3049 
3050 	if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3051 		return;
3052 
3053 	mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3054 	mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3055 	mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3056 	mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3057 
3058 	kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
3059 }
3060 
ept_update_paging_mode_cr0(unsigned long * hw_cr0,unsigned long cr0,struct kvm_vcpu * vcpu)3061 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3062 					unsigned long cr0,
3063 					struct kvm_vcpu *vcpu)
3064 {
3065 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3066 
3067 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3068 		vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3069 	if (!(cr0 & X86_CR0_PG)) {
3070 		/* From paging/starting to nonpaging */
3071 		exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
3072 					  CPU_BASED_CR3_STORE_EXITING);
3073 		vcpu->arch.cr0 = cr0;
3074 		vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3075 	} else if (!is_paging(vcpu)) {
3076 		/* From nonpaging to paging */
3077 		exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
3078 					    CPU_BASED_CR3_STORE_EXITING);
3079 		vcpu->arch.cr0 = cr0;
3080 		vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3081 	}
3082 
3083 	if (!(cr0 & X86_CR0_WP))
3084 		*hw_cr0 &= ~X86_CR0_WP;
3085 }
3086 
vmx_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)3087 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3088 {
3089 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3090 	unsigned long hw_cr0;
3091 
3092 	hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3093 	if (is_unrestricted_guest(vcpu))
3094 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3095 	else {
3096 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3097 
3098 		if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3099 			enter_pmode(vcpu);
3100 
3101 		if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3102 			enter_rmode(vcpu);
3103 	}
3104 
3105 #ifdef CONFIG_X86_64
3106 	if (vcpu->arch.efer & EFER_LME) {
3107 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3108 			enter_lmode(vcpu);
3109 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3110 			exit_lmode(vcpu);
3111 	}
3112 #endif
3113 
3114 	if (enable_ept && !is_unrestricted_guest(vcpu))
3115 		ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3116 
3117 	vmcs_writel(CR0_READ_SHADOW, cr0);
3118 	vmcs_writel(GUEST_CR0, hw_cr0);
3119 	vcpu->arch.cr0 = cr0;
3120 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3121 
3122 	/* depends on vcpu->arch.cr0 to be set to a new value */
3123 	vmx->emulation_required = emulation_required(vcpu);
3124 }
3125 
vmx_get_max_tdp_level(void)3126 static int vmx_get_max_tdp_level(void)
3127 {
3128 	if (cpu_has_vmx_ept_5levels())
3129 		return 5;
3130 	return 4;
3131 }
3132 
construct_eptp(struct kvm_vcpu * vcpu,unsigned long root_hpa,int root_level)3133 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
3134 		   int root_level)
3135 {
3136 	u64 eptp = VMX_EPTP_MT_WB;
3137 
3138 	eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3139 
3140 	if (enable_ept_ad_bits &&
3141 	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3142 		eptp |= VMX_EPTP_AD_ENABLE_BIT;
3143 	eptp |= (root_hpa & PAGE_MASK);
3144 
3145 	return eptp;
3146 }
3147 
vmx_load_mmu_pgd(struct kvm_vcpu * vcpu,unsigned long pgd,int pgd_level)3148 static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
3149 			     int pgd_level)
3150 {
3151 	struct kvm *kvm = vcpu->kvm;
3152 	bool update_guest_cr3 = true;
3153 	unsigned long guest_cr3;
3154 	u64 eptp;
3155 
3156 	if (enable_ept) {
3157 		eptp = construct_eptp(vcpu, pgd, pgd_level);
3158 		vmcs_write64(EPT_POINTER, eptp);
3159 
3160 		if (kvm_x86_ops.tlb_remote_flush) {
3161 			spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
3162 			to_vmx(vcpu)->ept_pointer = eptp;
3163 			to_kvm_vmx(kvm)->ept_pointers_match
3164 				= EPT_POINTERS_CHECK;
3165 			spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
3166 		}
3167 
3168 		if (!enable_unrestricted_guest && !is_paging(vcpu))
3169 			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3170 		else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3171 			guest_cr3 = vcpu->arch.cr3;
3172 		else /* vmcs01.GUEST_CR3 is already up-to-date. */
3173 			update_guest_cr3 = false;
3174 		vmx_ept_load_pdptrs(vcpu);
3175 	} else {
3176 		guest_cr3 = pgd;
3177 	}
3178 
3179 	if (update_guest_cr3)
3180 		vmcs_writel(GUEST_CR3, guest_cr3);
3181 }
3182 
vmx_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)3183 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3184 {
3185 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3186 	/*
3187 	 * Pass through host's Machine Check Enable value to hw_cr4, which
3188 	 * is in force while we are in guest mode.  Do not let guests control
3189 	 * this bit, even if host CR4.MCE == 0.
3190 	 */
3191 	unsigned long hw_cr4;
3192 
3193 	hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3194 	if (is_unrestricted_guest(vcpu))
3195 		hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
3196 	else if (vmx->rmode.vm86_active)
3197 		hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3198 	else
3199 		hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3200 
3201 	if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
3202 		if (cr4 & X86_CR4_UMIP) {
3203 			secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3204 			hw_cr4 &= ~X86_CR4_UMIP;
3205 		} else if (!is_guest_mode(vcpu) ||
3206 			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3207 			secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3208 		}
3209 	}
3210 
3211 	if (cr4 & X86_CR4_VMXE) {
3212 		/*
3213 		 * To use VMXON (and later other VMX instructions), a guest
3214 		 * must first be able to turn on cr4.VMXE (see handle_vmon()).
3215 		 * So basically the check on whether to allow nested VMX
3216 		 * is here.  We operate under the default treatment of SMM,
3217 		 * so VMX cannot be enabled under SMM.
3218 		 */
3219 		if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
3220 			return 1;
3221 	}
3222 
3223 	if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3224 		return 1;
3225 
3226 	vcpu->arch.cr4 = cr4;
3227 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3228 
3229 	if (!is_unrestricted_guest(vcpu)) {
3230 		if (enable_ept) {
3231 			if (!is_paging(vcpu)) {
3232 				hw_cr4 &= ~X86_CR4_PAE;
3233 				hw_cr4 |= X86_CR4_PSE;
3234 			} else if (!(cr4 & X86_CR4_PAE)) {
3235 				hw_cr4 &= ~X86_CR4_PAE;
3236 			}
3237 		}
3238 
3239 		/*
3240 		 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3241 		 * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
3242 		 * to be manually disabled when guest switches to non-paging
3243 		 * mode.
3244 		 *
3245 		 * If !enable_unrestricted_guest, the CPU is always running
3246 		 * with CR0.PG=1 and CR4 needs to be modified.
3247 		 * If enable_unrestricted_guest, the CPU automatically
3248 		 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3249 		 */
3250 		if (!is_paging(vcpu))
3251 			hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3252 	}
3253 
3254 	vmcs_writel(CR4_READ_SHADOW, cr4);
3255 	vmcs_writel(GUEST_CR4, hw_cr4);
3256 	return 0;
3257 }
3258 
vmx_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3259 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3260 {
3261 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3262 	u32 ar;
3263 
3264 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3265 		*var = vmx->rmode.segs[seg];
3266 		if (seg == VCPU_SREG_TR
3267 		    || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3268 			return;
3269 		var->base = vmx_read_guest_seg_base(vmx, seg);
3270 		var->selector = vmx_read_guest_seg_selector(vmx, seg);
3271 		return;
3272 	}
3273 	var->base = vmx_read_guest_seg_base(vmx, seg);
3274 	var->limit = vmx_read_guest_seg_limit(vmx, seg);
3275 	var->selector = vmx_read_guest_seg_selector(vmx, seg);
3276 	ar = vmx_read_guest_seg_ar(vmx, seg);
3277 	var->unusable = (ar >> 16) & 1;
3278 	var->type = ar & 15;
3279 	var->s = (ar >> 4) & 1;
3280 	var->dpl = (ar >> 5) & 3;
3281 	/*
3282 	 * Some userspaces do not preserve unusable property. Since usable
3283 	 * segment has to be present according to VMX spec we can use present
3284 	 * property to amend userspace bug by making unusable segment always
3285 	 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3286 	 * segment as unusable.
3287 	 */
3288 	var->present = !var->unusable;
3289 	var->avl = (ar >> 12) & 1;
3290 	var->l = (ar >> 13) & 1;
3291 	var->db = (ar >> 14) & 1;
3292 	var->g = (ar >> 15) & 1;
3293 }
3294 
vmx_get_segment_base(struct kvm_vcpu * vcpu,int seg)3295 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3296 {
3297 	struct kvm_segment s;
3298 
3299 	if (to_vmx(vcpu)->rmode.vm86_active) {
3300 		vmx_get_segment(vcpu, &s, seg);
3301 		return s.base;
3302 	}
3303 	return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3304 }
3305 
vmx_get_cpl(struct kvm_vcpu * vcpu)3306 int vmx_get_cpl(struct kvm_vcpu *vcpu)
3307 {
3308 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3309 
3310 	if (unlikely(vmx->rmode.vm86_active))
3311 		return 0;
3312 	else {
3313 		int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3314 		return VMX_AR_DPL(ar);
3315 	}
3316 }
3317 
vmx_segment_access_rights(struct kvm_segment * var)3318 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3319 {
3320 	u32 ar;
3321 
3322 	if (var->unusable || !var->present)
3323 		ar = 1 << 16;
3324 	else {
3325 		ar = var->type & 15;
3326 		ar |= (var->s & 1) << 4;
3327 		ar |= (var->dpl & 3) << 5;
3328 		ar |= (var->present & 1) << 7;
3329 		ar |= (var->avl & 1) << 12;
3330 		ar |= (var->l & 1) << 13;
3331 		ar |= (var->db & 1) << 14;
3332 		ar |= (var->g & 1) << 15;
3333 	}
3334 
3335 	return ar;
3336 }
3337 
vmx_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3338 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3339 {
3340 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3341 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3342 
3343 	vmx_segment_cache_clear(vmx);
3344 
3345 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3346 		vmx->rmode.segs[seg] = *var;
3347 		if (seg == VCPU_SREG_TR)
3348 			vmcs_write16(sf->selector, var->selector);
3349 		else if (var->s)
3350 			fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3351 		goto out;
3352 	}
3353 
3354 	vmcs_writel(sf->base, var->base);
3355 	vmcs_write32(sf->limit, var->limit);
3356 	vmcs_write16(sf->selector, var->selector);
3357 
3358 	/*
3359 	 *   Fix the "Accessed" bit in AR field of segment registers for older
3360 	 * qemu binaries.
3361 	 *   IA32 arch specifies that at the time of processor reset the
3362 	 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3363 	 * is setting it to 0 in the userland code. This causes invalid guest
3364 	 * state vmexit when "unrestricted guest" mode is turned on.
3365 	 *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3366 	 * tree. Newer qemu binaries with that qemu fix would not need this
3367 	 * kvm hack.
3368 	 */
3369 	if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3370 		var->type |= 0x1; /* Accessed */
3371 
3372 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3373 
3374 out:
3375 	vmx->emulation_required = emulation_required(vcpu);
3376 }
3377 
vmx_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)3378 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3379 {
3380 	u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3381 
3382 	*db = (ar >> 14) & 1;
3383 	*l = (ar >> 13) & 1;
3384 }
3385 
vmx_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3386 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3387 {
3388 	dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3389 	dt->address = vmcs_readl(GUEST_IDTR_BASE);
3390 }
3391 
vmx_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3392 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3393 {
3394 	vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3395 	vmcs_writel(GUEST_IDTR_BASE, dt->address);
3396 }
3397 
vmx_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3398 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3399 {
3400 	dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3401 	dt->address = vmcs_readl(GUEST_GDTR_BASE);
3402 }
3403 
vmx_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)3404 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3405 {
3406 	vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3407 	vmcs_writel(GUEST_GDTR_BASE, dt->address);
3408 }
3409 
rmode_segment_valid(struct kvm_vcpu * vcpu,int seg)3410 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3411 {
3412 	struct kvm_segment var;
3413 	u32 ar;
3414 
3415 	vmx_get_segment(vcpu, &var, seg);
3416 	var.dpl = 0x3;
3417 	if (seg == VCPU_SREG_CS)
3418 		var.type = 0x3;
3419 	ar = vmx_segment_access_rights(&var);
3420 
3421 	if (var.base != (var.selector << 4))
3422 		return false;
3423 	if (var.limit != 0xffff)
3424 		return false;
3425 	if (ar != 0xf3)
3426 		return false;
3427 
3428 	return true;
3429 }
3430 
code_segment_valid(struct kvm_vcpu * vcpu)3431 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3432 {
3433 	struct kvm_segment cs;
3434 	unsigned int cs_rpl;
3435 
3436 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3437 	cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3438 
3439 	if (cs.unusable)
3440 		return false;
3441 	if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3442 		return false;
3443 	if (!cs.s)
3444 		return false;
3445 	if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3446 		if (cs.dpl > cs_rpl)
3447 			return false;
3448 	} else {
3449 		if (cs.dpl != cs_rpl)
3450 			return false;
3451 	}
3452 	if (!cs.present)
3453 		return false;
3454 
3455 	/* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3456 	return true;
3457 }
3458 
stack_segment_valid(struct kvm_vcpu * vcpu)3459 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3460 {
3461 	struct kvm_segment ss;
3462 	unsigned int ss_rpl;
3463 
3464 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3465 	ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3466 
3467 	if (ss.unusable)
3468 		return true;
3469 	if (ss.type != 3 && ss.type != 7)
3470 		return false;
3471 	if (!ss.s)
3472 		return false;
3473 	if (ss.dpl != ss_rpl) /* DPL != RPL */
3474 		return false;
3475 	if (!ss.present)
3476 		return false;
3477 
3478 	return true;
3479 }
3480 
data_segment_valid(struct kvm_vcpu * vcpu,int seg)3481 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3482 {
3483 	struct kvm_segment var;
3484 	unsigned int rpl;
3485 
3486 	vmx_get_segment(vcpu, &var, seg);
3487 	rpl = var.selector & SEGMENT_RPL_MASK;
3488 
3489 	if (var.unusable)
3490 		return true;
3491 	if (!var.s)
3492 		return false;
3493 	if (!var.present)
3494 		return false;
3495 	if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3496 		if (var.dpl < rpl) /* DPL < RPL */
3497 			return false;
3498 	}
3499 
3500 	/* TODO: Add other members to kvm_segment_field to allow checking for other access
3501 	 * rights flags
3502 	 */
3503 	return true;
3504 }
3505 
tr_valid(struct kvm_vcpu * vcpu)3506 static bool tr_valid(struct kvm_vcpu *vcpu)
3507 {
3508 	struct kvm_segment tr;
3509 
3510 	vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3511 
3512 	if (tr.unusable)
3513 		return false;
3514 	if (tr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3515 		return false;
3516 	if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3517 		return false;
3518 	if (!tr.present)
3519 		return false;
3520 
3521 	return true;
3522 }
3523 
ldtr_valid(struct kvm_vcpu * vcpu)3524 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3525 {
3526 	struct kvm_segment ldtr;
3527 
3528 	vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3529 
3530 	if (ldtr.unusable)
3531 		return true;
3532 	if (ldtr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3533 		return false;
3534 	if (ldtr.type != 2)
3535 		return false;
3536 	if (!ldtr.present)
3537 		return false;
3538 
3539 	return true;
3540 }
3541 
cs_ss_rpl_check(struct kvm_vcpu * vcpu)3542 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3543 {
3544 	struct kvm_segment cs, ss;
3545 
3546 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3547 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3548 
3549 	return ((cs.selector & SEGMENT_RPL_MASK) ==
3550 		 (ss.selector & SEGMENT_RPL_MASK));
3551 }
3552 
3553 /*
3554  * Check if guest state is valid. Returns true if valid, false if
3555  * not.
3556  * We assume that registers are always usable
3557  */
__vmx_guest_state_valid(struct kvm_vcpu * vcpu)3558 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3559 {
3560 	/* real mode guest state checks */
3561 	if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3562 		if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3563 			return false;
3564 		if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3565 			return false;
3566 		if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3567 			return false;
3568 		if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3569 			return false;
3570 		if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3571 			return false;
3572 		if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3573 			return false;
3574 	} else {
3575 	/* protected mode guest state checks */
3576 		if (!cs_ss_rpl_check(vcpu))
3577 			return false;
3578 		if (!code_segment_valid(vcpu))
3579 			return false;
3580 		if (!stack_segment_valid(vcpu))
3581 			return false;
3582 		if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3583 			return false;
3584 		if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3585 			return false;
3586 		if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3587 			return false;
3588 		if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3589 			return false;
3590 		if (!tr_valid(vcpu))
3591 			return false;
3592 		if (!ldtr_valid(vcpu))
3593 			return false;
3594 	}
3595 	/* TODO:
3596 	 * - Add checks on RIP
3597 	 * - Add checks on RFLAGS
3598 	 */
3599 
3600 	return true;
3601 }
3602 
init_rmode_tss(struct kvm * kvm)3603 static int init_rmode_tss(struct kvm *kvm)
3604 {
3605 	gfn_t fn;
3606 	u16 data = 0;
3607 	int idx, r;
3608 
3609 	idx = srcu_read_lock(&kvm->srcu);
3610 	fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
3611 	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3612 	if (r < 0)
3613 		goto out;
3614 	data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3615 	r = kvm_write_guest_page(kvm, fn++, &data,
3616 			TSS_IOPB_BASE_OFFSET, sizeof(u16));
3617 	if (r < 0)
3618 		goto out;
3619 	r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3620 	if (r < 0)
3621 		goto out;
3622 	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3623 	if (r < 0)
3624 		goto out;
3625 	data = ~0;
3626 	r = kvm_write_guest_page(kvm, fn, &data,
3627 				 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3628 				 sizeof(u8));
3629 out:
3630 	srcu_read_unlock(&kvm->srcu, idx);
3631 	return r;
3632 }
3633 
init_rmode_identity_map(struct kvm * kvm)3634 static int init_rmode_identity_map(struct kvm *kvm)
3635 {
3636 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
3637 	int i, r = 0;
3638 	kvm_pfn_t identity_map_pfn;
3639 	u32 tmp;
3640 
3641 	/* Protect kvm_vmx->ept_identity_pagetable_done. */
3642 	mutex_lock(&kvm->slots_lock);
3643 
3644 	if (likely(kvm_vmx->ept_identity_pagetable_done))
3645 		goto out;
3646 
3647 	if (!kvm_vmx->ept_identity_map_addr)
3648 		kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3649 	identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
3650 
3651 	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3652 				    kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
3653 	if (r < 0)
3654 		goto out;
3655 
3656 	r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3657 	if (r < 0)
3658 		goto out;
3659 	/* Set up identity-mapping pagetable for EPT in real mode */
3660 	for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
3661 		tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3662 			_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3663 		r = kvm_write_guest_page(kvm, identity_map_pfn,
3664 				&tmp, i * sizeof(tmp), sizeof(tmp));
3665 		if (r < 0)
3666 			goto out;
3667 	}
3668 	kvm_vmx->ept_identity_pagetable_done = true;
3669 
3670 out:
3671 	mutex_unlock(&kvm->slots_lock);
3672 	return r;
3673 }
3674 
seg_setup(int seg)3675 static void seg_setup(int seg)
3676 {
3677 	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3678 	unsigned int ar;
3679 
3680 	vmcs_write16(sf->selector, 0);
3681 	vmcs_writel(sf->base, 0);
3682 	vmcs_write32(sf->limit, 0xffff);
3683 	ar = 0x93;
3684 	if (seg == VCPU_SREG_CS)
3685 		ar |= 0x08; /* code segment */
3686 
3687 	vmcs_write32(sf->ar_bytes, ar);
3688 }
3689 
alloc_apic_access_page(struct kvm * kvm)3690 static int alloc_apic_access_page(struct kvm *kvm)
3691 {
3692 	struct page *page;
3693 	int r = 0;
3694 
3695 	mutex_lock(&kvm->slots_lock);
3696 	if (kvm->arch.apic_access_page_done)
3697 		goto out;
3698 	r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
3699 				    APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
3700 	if (r)
3701 		goto out;
3702 
3703 	page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
3704 	if (is_error_page(page)) {
3705 		r = -EFAULT;
3706 		goto out;
3707 	}
3708 
3709 	/*
3710 	 * Do not pin the page in memory, so that memory hot-unplug
3711 	 * is able to migrate it.
3712 	 */
3713 	put_page(page);
3714 	kvm->arch.apic_access_page_done = true;
3715 out:
3716 	mutex_unlock(&kvm->slots_lock);
3717 	return r;
3718 }
3719 
allocate_vpid(void)3720 int allocate_vpid(void)
3721 {
3722 	int vpid;
3723 
3724 	if (!enable_vpid)
3725 		return 0;
3726 	spin_lock(&vmx_vpid_lock);
3727 	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3728 	if (vpid < VMX_NR_VPIDS)
3729 		__set_bit(vpid, vmx_vpid_bitmap);
3730 	else
3731 		vpid = 0;
3732 	spin_unlock(&vmx_vpid_lock);
3733 	return vpid;
3734 }
3735 
free_vpid(int vpid)3736 void free_vpid(int vpid)
3737 {
3738 	if (!enable_vpid || vpid == 0)
3739 		return;
3740 	spin_lock(&vmx_vpid_lock);
3741 	__clear_bit(vpid, vmx_vpid_bitmap);
3742 	spin_unlock(&vmx_vpid_lock);
3743 }
3744 
vmx_clear_msr_bitmap_read(ulong * msr_bitmap,u32 msr)3745 static void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
3746 {
3747 	int f = sizeof(unsigned long);
3748 
3749 	if (msr <= 0x1fff)
3750 		__clear_bit(msr, msr_bitmap + 0x000 / f);
3751 	else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
3752 		__clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
3753 }
3754 
vmx_clear_msr_bitmap_write(ulong * msr_bitmap,u32 msr)3755 static void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
3756 {
3757 	int f = sizeof(unsigned long);
3758 
3759 	if (msr <= 0x1fff)
3760 		__clear_bit(msr, msr_bitmap + 0x800 / f);
3761 	else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
3762 		__clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
3763 }
3764 
vmx_set_msr_bitmap_read(ulong * msr_bitmap,u32 msr)3765 static void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
3766 {
3767 	int f = sizeof(unsigned long);
3768 
3769 	if (msr <= 0x1fff)
3770 		__set_bit(msr, msr_bitmap + 0x000 / f);
3771 	else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
3772 		__set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
3773 }
3774 
vmx_set_msr_bitmap_write(ulong * msr_bitmap,u32 msr)3775 static void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
3776 {
3777 	int f = sizeof(unsigned long);
3778 
3779 	if (msr <= 0x1fff)
3780 		__set_bit(msr, msr_bitmap + 0x800 / f);
3781 	else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
3782 		__set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
3783 }
3784 
vmx_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)3785 static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
3786 							  u32 msr, int type)
3787 {
3788 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3789 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3790 
3791 	if (!cpu_has_vmx_msr_bitmap())
3792 		return;
3793 
3794 	if (static_branch_unlikely(&enable_evmcs))
3795 		evmcs_touch_msr_bitmap();
3796 
3797 	/*
3798 	 * Mark the desired intercept state in shadow bitmap, this is needed
3799 	 * for resync when the MSR filters change.
3800 	*/
3801 	if (is_valid_passthrough_msr(msr)) {
3802 		int idx = possible_passthrough_msr_slot(msr);
3803 
3804 		if (idx != -ENOENT) {
3805 			if (type & MSR_TYPE_R)
3806 				clear_bit(idx, vmx->shadow_msr_intercept.read);
3807 			if (type & MSR_TYPE_W)
3808 				clear_bit(idx, vmx->shadow_msr_intercept.write);
3809 		}
3810 	}
3811 
3812 	if ((type & MSR_TYPE_R) &&
3813 	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
3814 		vmx_set_msr_bitmap_read(msr_bitmap, msr);
3815 		type &= ~MSR_TYPE_R;
3816 	}
3817 
3818 	if ((type & MSR_TYPE_W) &&
3819 	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
3820 		vmx_set_msr_bitmap_write(msr_bitmap, msr);
3821 		type &= ~MSR_TYPE_W;
3822 	}
3823 
3824 	if (type & MSR_TYPE_R)
3825 		vmx_clear_msr_bitmap_read(msr_bitmap, msr);
3826 
3827 	if (type & MSR_TYPE_W)
3828 		vmx_clear_msr_bitmap_write(msr_bitmap, msr);
3829 }
3830 
vmx_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)3831 static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
3832 							 u32 msr, int type)
3833 {
3834 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3835 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3836 
3837 	if (!cpu_has_vmx_msr_bitmap())
3838 		return;
3839 
3840 	if (static_branch_unlikely(&enable_evmcs))
3841 		evmcs_touch_msr_bitmap();
3842 
3843 	/*
3844 	 * Mark the desired intercept state in shadow bitmap, this is needed
3845 	 * for resync when the MSR filter changes.
3846 	*/
3847 	if (is_valid_passthrough_msr(msr)) {
3848 		int idx = possible_passthrough_msr_slot(msr);
3849 
3850 		if (idx != -ENOENT) {
3851 			if (type & MSR_TYPE_R)
3852 				set_bit(idx, vmx->shadow_msr_intercept.read);
3853 			if (type & MSR_TYPE_W)
3854 				set_bit(idx, vmx->shadow_msr_intercept.write);
3855 		}
3856 	}
3857 
3858 	if (type & MSR_TYPE_R)
3859 		vmx_set_msr_bitmap_read(msr_bitmap, msr);
3860 
3861 	if (type & MSR_TYPE_W)
3862 		vmx_set_msr_bitmap_write(msr_bitmap, msr);
3863 }
3864 
vmx_set_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type,bool value)3865 static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
3866 						      u32 msr, int type, bool value)
3867 {
3868 	if (value)
3869 		vmx_enable_intercept_for_msr(vcpu, msr, type);
3870 	else
3871 		vmx_disable_intercept_for_msr(vcpu, msr, type);
3872 }
3873 
vmx_msr_bitmap_mode(struct kvm_vcpu * vcpu)3874 static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
3875 {
3876 	u8 mode = 0;
3877 
3878 	if (cpu_has_secondary_exec_ctrls() &&
3879 	    (secondary_exec_controls_get(to_vmx(vcpu)) &
3880 	     SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
3881 		mode |= MSR_BITMAP_MODE_X2APIC;
3882 		if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
3883 			mode |= MSR_BITMAP_MODE_X2APIC_APICV;
3884 	}
3885 
3886 	return mode;
3887 }
3888 
vmx_reset_x2apic_msrs(struct kvm_vcpu * vcpu,u8 mode)3889 static void vmx_reset_x2apic_msrs(struct kvm_vcpu *vcpu, u8 mode)
3890 {
3891 	unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
3892 	unsigned long read_intercept;
3893 	int msr;
3894 
3895 	read_intercept = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
3896 
3897 	for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
3898 		unsigned int read_idx = msr / BITS_PER_LONG;
3899 		unsigned int write_idx = read_idx + (0x800 / sizeof(long));
3900 
3901 		msr_bitmap[read_idx] = read_intercept;
3902 		msr_bitmap[write_idx] = ~0ul;
3903 	}
3904 }
3905 
vmx_update_msr_bitmap_x2apic(struct kvm_vcpu * vcpu,u8 mode)3906 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu, u8 mode)
3907 {
3908 	if (!cpu_has_vmx_msr_bitmap())
3909 		return;
3910 
3911 	vmx_reset_x2apic_msrs(vcpu, mode);
3912 
3913 	/*
3914 	 * TPR reads and writes can be virtualized even if virtual interrupt
3915 	 * delivery is not in use.
3916 	 */
3917 	vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
3918 				  !(mode & MSR_BITMAP_MODE_X2APIC));
3919 
3920 	if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
3921 		vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
3922 		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
3923 		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
3924 	}
3925 }
3926 
vmx_update_msr_bitmap(struct kvm_vcpu * vcpu)3927 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
3928 {
3929 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3930 	u8 mode = vmx_msr_bitmap_mode(vcpu);
3931 	u8 changed = mode ^ vmx->msr_bitmap_mode;
3932 
3933 	if (!changed)
3934 		return;
3935 
3936 	if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
3937 		vmx_update_msr_bitmap_x2apic(vcpu, mode);
3938 
3939 	vmx->msr_bitmap_mode = mode;
3940 }
3941 
pt_update_intercept_for_msr(struct kvm_vcpu * vcpu)3942 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
3943 {
3944 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3945 	bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
3946 	u32 i;
3947 
3948 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
3949 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
3950 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
3951 	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
3952 	for (i = 0; i < vmx->pt_desc.addr_range; i++) {
3953 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
3954 		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
3955 	}
3956 }
3957 
vmx_guest_apic_has_interrupt(struct kvm_vcpu * vcpu)3958 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
3959 {
3960 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3961 	void *vapic_page;
3962 	u32 vppr;
3963 	int rvi;
3964 
3965 	if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
3966 		!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
3967 		WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
3968 		return false;
3969 
3970 	rvi = vmx_get_rvi();
3971 
3972 	vapic_page = vmx->nested.virtual_apic_map.hva;
3973 	vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
3974 
3975 	return ((rvi & 0xf0) > (vppr & 0xf0));
3976 }
3977 
vmx_msr_filter_changed(struct kvm_vcpu * vcpu)3978 static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
3979 {
3980 	struct vcpu_vmx *vmx = to_vmx(vcpu);
3981 	u32 i;
3982 
3983 	/*
3984 	 * Set intercept permissions for all potentially passed through MSRs
3985 	 * again. They will automatically get filtered through the MSR filter,
3986 	 * so we are back in sync after this.
3987 	 */
3988 	for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
3989 		u32 msr = vmx_possible_passthrough_msrs[i];
3990 		bool read = test_bit(i, vmx->shadow_msr_intercept.read);
3991 		bool write = test_bit(i, vmx->shadow_msr_intercept.write);
3992 
3993 		vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_R, read);
3994 		vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_W, write);
3995 	}
3996 
3997 	pt_update_intercept_for_msr(vcpu);
3998 	vmx_update_msr_bitmap_x2apic(vcpu, vmx_msr_bitmap_mode(vcpu));
3999 }
4000 
kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu * vcpu,bool nested)4001 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4002 						     bool nested)
4003 {
4004 #ifdef CONFIG_SMP
4005 	int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
4006 
4007 	if (vcpu->mode == IN_GUEST_MODE) {
4008 		/*
4009 		 * The vector of interrupt to be delivered to vcpu had
4010 		 * been set in PIR before this function.
4011 		 *
4012 		 * Following cases will be reached in this block, and
4013 		 * we always send a notification event in all cases as
4014 		 * explained below.
4015 		 *
4016 		 * Case 1: vcpu keeps in non-root mode. Sending a
4017 		 * notification event posts the interrupt to vcpu.
4018 		 *
4019 		 * Case 2: vcpu exits to root mode and is still
4020 		 * runnable. PIR will be synced to vIRR before the
4021 		 * next vcpu entry. Sending a notification event in
4022 		 * this case has no effect, as vcpu is not in root
4023 		 * mode.
4024 		 *
4025 		 * Case 3: vcpu exits to root mode and is blocked.
4026 		 * vcpu_block() has already synced PIR to vIRR and
4027 		 * never blocks vcpu if vIRR is not cleared. Therefore,
4028 		 * a blocked vcpu here does not wait for any requested
4029 		 * interrupts in PIR, and sending a notification event
4030 		 * which has no effect is safe here.
4031 		 */
4032 
4033 		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
4034 		return true;
4035 	}
4036 #endif
4037 	return false;
4038 }
4039 
vmx_deliver_nested_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4040 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4041 						int vector)
4042 {
4043 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4044 
4045 	if (is_guest_mode(vcpu) &&
4046 	    vector == vmx->nested.posted_intr_nv) {
4047 		/*
4048 		 * If a posted intr is not recognized by hardware,
4049 		 * we will accomplish it in the next vmentry.
4050 		 */
4051 		vmx->nested.pi_pending = true;
4052 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4053 		/* the PIR and ON have been set by L1. */
4054 		if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
4055 			kvm_vcpu_kick(vcpu);
4056 		return 0;
4057 	}
4058 	return -1;
4059 }
4060 /*
4061  * Send interrupt to vcpu via posted interrupt way.
4062  * 1. If target vcpu is running(non-root mode), send posted interrupt
4063  * notification to vcpu and hardware will sync PIR to vIRR atomically.
4064  * 2. If target vcpu isn't running(root mode), kick it to pick up the
4065  * interrupt from PIR in next vmentry.
4066  */
vmx_deliver_posted_interrupt(struct kvm_vcpu * vcpu,int vector)4067 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4068 {
4069 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4070 	int r;
4071 
4072 	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4073 	if (!r)
4074 		return 0;
4075 
4076 	if (!vcpu->arch.apicv_active)
4077 		return -1;
4078 
4079 	if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4080 		return 0;
4081 
4082 	/* If a previous notification has sent the IPI, nothing to do.  */
4083 	if (pi_test_and_set_on(&vmx->pi_desc))
4084 		return 0;
4085 
4086 	if (vcpu != kvm_get_running_vcpu() &&
4087 	    !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
4088 		kvm_vcpu_kick(vcpu);
4089 
4090 	return 0;
4091 }
4092 
4093 /*
4094  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4095  * will not change in the lifetime of the guest.
4096  * Note that host-state that does change is set elsewhere. E.g., host-state
4097  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4098  */
vmx_set_constant_host_state(struct vcpu_vmx * vmx)4099 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4100 {
4101 	u32 low32, high32;
4102 	unsigned long tmpl;
4103 	unsigned long cr0, cr3, cr4;
4104 
4105 	cr0 = read_cr0();
4106 	WARN_ON(cr0 & X86_CR0_TS);
4107 	vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
4108 
4109 	/*
4110 	 * Save the most likely value for this task's CR3 in the VMCS.
4111 	 * We can't use __get_current_cr3_fast() because we're not atomic.
4112 	 */
4113 	cr3 = __read_cr3();
4114 	vmcs_writel(HOST_CR3, cr3);		/* 22.2.3  FIXME: shadow tables */
4115 	vmx->loaded_vmcs->host_state.cr3 = cr3;
4116 
4117 	/* Save the most likely value for this task's CR4 in the VMCS. */
4118 	cr4 = cr4_read_shadow();
4119 	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
4120 	vmx->loaded_vmcs->host_state.cr4 = cr4;
4121 
4122 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
4123 #ifdef CONFIG_X86_64
4124 	/*
4125 	 * Load null selectors, so we can avoid reloading them in
4126 	 * vmx_prepare_switch_to_host(), in case userspace uses
4127 	 * the null selectors too (the expected case).
4128 	 */
4129 	vmcs_write16(HOST_DS_SELECTOR, 0);
4130 	vmcs_write16(HOST_ES_SELECTOR, 0);
4131 #else
4132 	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4133 	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4134 #endif
4135 	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4136 	vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
4137 
4138 	vmcs_writel(HOST_IDTR_BASE, host_idt_base);   /* 22.2.4 */
4139 
4140 	vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4141 
4142 	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4143 	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4144 	rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4145 	vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
4146 
4147 	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4148 		rdmsr(MSR_IA32_CR_PAT, low32, high32);
4149 		vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4150 	}
4151 
4152 	if (cpu_has_load_ia32_efer())
4153 		vmcs_write64(HOST_IA32_EFER, host_efer);
4154 }
4155 
set_cr4_guest_host_mask(struct vcpu_vmx * vmx)4156 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4157 {
4158 	struct kvm_vcpu *vcpu = &vmx->vcpu;
4159 
4160 	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4161 					  ~vcpu->arch.cr4_guest_rsvd_bits;
4162 	if (!enable_ept)
4163 		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
4164 	if (is_guest_mode(&vmx->vcpu))
4165 		vcpu->arch.cr4_guest_owned_bits &=
4166 			~get_vmcs12(vcpu)->cr4_guest_host_mask;
4167 	vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4168 }
4169 
vmx_pin_based_exec_ctrl(struct vcpu_vmx * vmx)4170 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4171 {
4172 	u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4173 
4174 	if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4175 		pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4176 
4177 	if (!enable_vnmi)
4178 		pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4179 
4180 	if (!enable_preemption_timer)
4181 		pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4182 
4183 	return pin_based_exec_ctrl;
4184 }
4185 
vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu * vcpu)4186 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4187 {
4188 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4189 
4190 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4191 	if (cpu_has_secondary_exec_ctrls()) {
4192 		if (kvm_vcpu_apicv_active(vcpu))
4193 			secondary_exec_controls_setbit(vmx,
4194 				      SECONDARY_EXEC_APIC_REGISTER_VIRT |
4195 				      SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4196 		else
4197 			secondary_exec_controls_clearbit(vmx,
4198 					SECONDARY_EXEC_APIC_REGISTER_VIRT |
4199 					SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4200 	}
4201 
4202 	if (cpu_has_vmx_msr_bitmap())
4203 		vmx_update_msr_bitmap(vcpu);
4204 }
4205 
vmx_exec_control(struct vcpu_vmx * vmx)4206 u32 vmx_exec_control(struct vcpu_vmx *vmx)
4207 {
4208 	u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4209 
4210 	if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4211 		exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4212 
4213 	if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
4214 		exec_control &= ~CPU_BASED_TPR_SHADOW;
4215 #ifdef CONFIG_X86_64
4216 		exec_control |= CPU_BASED_CR8_STORE_EXITING |
4217 				CPU_BASED_CR8_LOAD_EXITING;
4218 #endif
4219 	}
4220 	if (!enable_ept)
4221 		exec_control |= CPU_BASED_CR3_STORE_EXITING |
4222 				CPU_BASED_CR3_LOAD_EXITING  |
4223 				CPU_BASED_INVLPG_EXITING;
4224 	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4225 		exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4226 				CPU_BASED_MONITOR_EXITING);
4227 	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4228 		exec_control &= ~CPU_BASED_HLT_EXITING;
4229 	return exec_control;
4230 }
4231 
4232 /*
4233  * Adjust a single secondary execution control bit to intercept/allow an
4234  * instruction in the guest.  This is usually done based on whether or not a
4235  * feature has been exposed to the guest in order to correctly emulate faults.
4236  */
4237 static inline void
vmx_adjust_secondary_exec_control(struct vcpu_vmx * vmx,u32 * exec_control,u32 control,bool enabled,bool exiting)4238 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4239 				  u32 control, bool enabled, bool exiting)
4240 {
4241 	/*
4242 	 * If the control is for an opt-in feature, clear the control if the
4243 	 * feature is not exposed to the guest, i.e. not enabled.  If the
4244 	 * control is opt-out, i.e. an exiting control, clear the control if
4245 	 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4246 	 * disabled for the associated instruction.  Note, the caller is
4247 	 * responsible presetting exec_control to set all supported bits.
4248 	 */
4249 	if (enabled == exiting)
4250 		*exec_control &= ~control;
4251 
4252 	/*
4253 	 * Update the nested MSR settings so that a nested VMM can/can't set
4254 	 * controls for features that are/aren't exposed to the guest.
4255 	 */
4256 	if (nested) {
4257 		if (enabled)
4258 			vmx->nested.msrs.secondary_ctls_high |= control;
4259 		else
4260 			vmx->nested.msrs.secondary_ctls_high &= ~control;
4261 	}
4262 }
4263 
4264 /*
4265  * Wrapper macro for the common case of adjusting a secondary execution control
4266  * based on a single guest CPUID bit, with a dedicated feature bit.  This also
4267  * verifies that the control is actually supported by KVM and hardware.
4268  */
4269 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4270 ({									 \
4271 	bool __enabled;							 \
4272 									 \
4273 	if (cpu_has_vmx_##name()) {					 \
4274 		__enabled = guest_cpuid_has(&(vmx)->vcpu,		 \
4275 					    X86_FEATURE_##feat_name);	 \
4276 		vmx_adjust_secondary_exec_control(vmx, exec_control,	 \
4277 			SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \
4278 	}								 \
4279 })
4280 
4281 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4282 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4283 	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4284 
4285 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4286 	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4287 
vmx_compute_secondary_exec_control(struct vcpu_vmx * vmx)4288 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
4289 {
4290 	struct kvm_vcpu *vcpu = &vmx->vcpu;
4291 
4292 	u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4293 
4294 	if (vmx_pt_mode_is_system())
4295 		exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4296 	if (!cpu_need_virtualize_apic_accesses(vcpu))
4297 		exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4298 	if (vmx->vpid == 0)
4299 		exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4300 	if (!enable_ept) {
4301 		exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4302 		enable_unrestricted_guest = 0;
4303 	}
4304 	if (!enable_unrestricted_guest)
4305 		exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4306 	if (kvm_pause_in_guest(vmx->vcpu.kvm))
4307 		exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4308 	if (!kvm_vcpu_apicv_active(vcpu))
4309 		exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4310 				  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4311 	exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4312 
4313 	/* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4314 	 * in vmx_set_cr4.  */
4315 	exec_control &= ~SECONDARY_EXEC_DESC;
4316 
4317 	/* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4318 	   (handle_vmptrld).
4319 	   We can NOT enable shadow_vmcs here because we don't have yet
4320 	   a current VMCS12
4321 	*/
4322 	exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4323 
4324 	if (!enable_pml)
4325 		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4326 
4327 	if (cpu_has_vmx_xsaves()) {
4328 		/* Exposing XSAVES only when XSAVE is exposed */
4329 		bool xsaves_enabled =
4330 			boot_cpu_has(X86_FEATURE_XSAVE) &&
4331 			guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
4332 			guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
4333 
4334 		vcpu->arch.xsaves_enabled = xsaves_enabled;
4335 
4336 		vmx_adjust_secondary_exec_control(vmx, &exec_control,
4337 						  SECONDARY_EXEC_XSAVES,
4338 						  xsaves_enabled, false);
4339 	}
4340 
4341 	vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
4342 
4343 	/*
4344 	 * Expose INVPCID if and only if PCID is also exposed to the guest.
4345 	 * INVPCID takes a #UD when it's disabled in the VMCS, but a #GP or #PF
4346 	 * if CR4.PCIDE=0.  Enumerating CPUID.INVPCID=1 would lead to incorrect
4347 	 * behavior from the guest perspective (it would expect #GP or #PF).
4348 	 */
4349 	if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
4350 		guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
4351 	vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4352 
4353 
4354 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4355 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4356 
4357 	vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4358 				    ENABLE_USR_WAIT_PAUSE, false);
4359 
4360 	vmx->secondary_exec_control = exec_control;
4361 }
4362 
ept_set_mmio_spte_mask(void)4363 static void ept_set_mmio_spte_mask(void)
4364 {
4365 	/*
4366 	 * EPT Misconfigurations can be generated if the value of bits 2:0
4367 	 * of an EPT paging-structure entry is 110b (write/execute).
4368 	 */
4369 	kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0);
4370 }
4371 
4372 #define VMX_XSS_EXIT_BITMAP 0
4373 
4374 /*
4375  * Noting that the initialization of Guest-state Area of VMCS is in
4376  * vmx_vcpu_reset().
4377  */
init_vmcs(struct vcpu_vmx * vmx)4378 static void init_vmcs(struct vcpu_vmx *vmx)
4379 {
4380 	if (nested)
4381 		nested_vmx_set_vmcs_shadowing_bitmap();
4382 
4383 	if (cpu_has_vmx_msr_bitmap())
4384 		vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4385 
4386 	vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
4387 
4388 	/* Control */
4389 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4390 
4391 	exec_controls_set(vmx, vmx_exec_control(vmx));
4392 
4393 	if (cpu_has_secondary_exec_ctrls()) {
4394 		vmx_compute_secondary_exec_control(vmx);
4395 		secondary_exec_controls_set(vmx, vmx->secondary_exec_control);
4396 	}
4397 
4398 	if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
4399 		vmcs_write64(EOI_EXIT_BITMAP0, 0);
4400 		vmcs_write64(EOI_EXIT_BITMAP1, 0);
4401 		vmcs_write64(EOI_EXIT_BITMAP2, 0);
4402 		vmcs_write64(EOI_EXIT_BITMAP3, 0);
4403 
4404 		vmcs_write16(GUEST_INTR_STATUS, 0);
4405 
4406 		vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4407 		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4408 	}
4409 
4410 	if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
4411 		vmcs_write32(PLE_GAP, ple_gap);
4412 		vmx->ple_window = ple_window;
4413 		vmx->ple_window_dirty = true;
4414 	}
4415 
4416 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4417 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4418 	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
4419 
4420 	vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
4421 	vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
4422 	vmx_set_constant_host_state(vmx);
4423 	vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4424 	vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4425 
4426 	if (cpu_has_vmx_vmfunc())
4427 		vmcs_write64(VM_FUNCTION_CONTROL, 0);
4428 
4429 	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4430 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4431 	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4432 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4433 	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4434 
4435 	if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4436 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4437 
4438 	vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4439 
4440 	/* 22.2.1, 20.8.1 */
4441 	vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4442 
4443 	vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4444 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4445 
4446 	set_cr4_guest_host_mask(vmx);
4447 
4448 	if (vmx->vpid != 0)
4449 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4450 
4451 	if (cpu_has_vmx_xsaves())
4452 		vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4453 
4454 	if (enable_pml) {
4455 		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4456 		vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
4457 	}
4458 
4459 	if (cpu_has_vmx_encls_vmexit())
4460 		vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
4461 
4462 	if (vmx_pt_mode_is_host_guest()) {
4463 		memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4464 		/* Bit[6~0] are forced to 1, writes are ignored. */
4465 		vmx->pt_desc.guest.output_mask = 0x7F;
4466 		vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4467 	}
4468 }
4469 
vmx_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)4470 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4471 {
4472 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4473 	struct msr_data apic_base_msr;
4474 	u64 cr0;
4475 
4476 	vmx->rmode.vm86_active = 0;
4477 	vmx->spec_ctrl = 0;
4478 
4479 	vmx->msr_ia32_umwait_control = 0;
4480 
4481 	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4482 	vmx->hv_deadline_tsc = -1;
4483 	kvm_set_cr8(vcpu, 0);
4484 
4485 	if (!init_event) {
4486 		apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
4487 				     MSR_IA32_APICBASE_ENABLE;
4488 		if (kvm_vcpu_is_reset_bsp(vcpu))
4489 			apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
4490 		apic_base_msr.host_initiated = true;
4491 		kvm_set_apic_base(vcpu, &apic_base_msr);
4492 	}
4493 
4494 	vmx_segment_cache_clear(vmx);
4495 
4496 	seg_setup(VCPU_SREG_CS);
4497 	vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4498 	vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
4499 
4500 	seg_setup(VCPU_SREG_DS);
4501 	seg_setup(VCPU_SREG_ES);
4502 	seg_setup(VCPU_SREG_FS);
4503 	seg_setup(VCPU_SREG_GS);
4504 	seg_setup(VCPU_SREG_SS);
4505 
4506 	vmcs_write16(GUEST_TR_SELECTOR, 0);
4507 	vmcs_writel(GUEST_TR_BASE, 0);
4508 	vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4509 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4510 
4511 	vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4512 	vmcs_writel(GUEST_LDTR_BASE, 0);
4513 	vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4514 	vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4515 
4516 	if (!init_event) {
4517 		vmcs_write32(GUEST_SYSENTER_CS, 0);
4518 		vmcs_writel(GUEST_SYSENTER_ESP, 0);
4519 		vmcs_writel(GUEST_SYSENTER_EIP, 0);
4520 		vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4521 	}
4522 
4523 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
4524 	kvm_rip_write(vcpu, 0xfff0);
4525 
4526 	vmcs_writel(GUEST_GDTR_BASE, 0);
4527 	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4528 
4529 	vmcs_writel(GUEST_IDTR_BASE, 0);
4530 	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4531 
4532 	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4533 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4534 	vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4535 	if (kvm_mpx_supported())
4536 		vmcs_write64(GUEST_BNDCFGS, 0);
4537 
4538 	setup_msrs(vmx);
4539 
4540 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
4541 
4542 	if (cpu_has_vmx_tpr_shadow() && !init_event) {
4543 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4544 		if (cpu_need_tpr_shadow(vcpu))
4545 			vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4546 				     __pa(vcpu->arch.apic->regs));
4547 		vmcs_write32(TPR_THRESHOLD, 0);
4548 	}
4549 
4550 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4551 
4552 	cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
4553 	vmx->vcpu.arch.cr0 = cr0;
4554 	vmx_set_cr0(vcpu, cr0); /* enter rmode */
4555 	vmx_set_cr4(vcpu, 0);
4556 	vmx_set_efer(vcpu, 0);
4557 
4558 	update_exception_bitmap(vcpu);
4559 
4560 	vpid_sync_context(vmx->vpid);
4561 	if (init_event)
4562 		vmx_clear_hlt(vcpu);
4563 
4564 	vmx_update_fb_clear_dis(vcpu, vmx);
4565 }
4566 
enable_irq_window(struct kvm_vcpu * vcpu)4567 static void enable_irq_window(struct kvm_vcpu *vcpu)
4568 {
4569 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4570 }
4571 
enable_nmi_window(struct kvm_vcpu * vcpu)4572 static void enable_nmi_window(struct kvm_vcpu *vcpu)
4573 {
4574 	if (!enable_vnmi ||
4575 	    vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4576 		enable_irq_window(vcpu);
4577 		return;
4578 	}
4579 
4580 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4581 }
4582 
vmx_inject_irq(struct kvm_vcpu * vcpu)4583 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
4584 {
4585 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4586 	uint32_t intr;
4587 	int irq = vcpu->arch.interrupt.nr;
4588 
4589 	trace_kvm_inj_virq(irq);
4590 
4591 	++vcpu->stat.irq_injections;
4592 	if (vmx->rmode.vm86_active) {
4593 		int inc_eip = 0;
4594 		if (vcpu->arch.interrupt.soft)
4595 			inc_eip = vcpu->arch.event_exit_inst_len;
4596 		kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
4597 		return;
4598 	}
4599 	intr = irq | INTR_INFO_VALID_MASK;
4600 	if (vcpu->arch.interrupt.soft) {
4601 		intr |= INTR_TYPE_SOFT_INTR;
4602 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4603 			     vmx->vcpu.arch.event_exit_inst_len);
4604 	} else
4605 		intr |= INTR_TYPE_EXT_INTR;
4606 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4607 
4608 	vmx_clear_hlt(vcpu);
4609 }
4610 
vmx_inject_nmi(struct kvm_vcpu * vcpu)4611 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4612 {
4613 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4614 
4615 	if (!enable_vnmi) {
4616 		/*
4617 		 * Tracking the NMI-blocked state in software is built upon
4618 		 * finding the next open IRQ window. This, in turn, depends on
4619 		 * well-behaving guests: They have to keep IRQs disabled at
4620 		 * least as long as the NMI handler runs. Otherwise we may
4621 		 * cause NMI nesting, maybe breaking the guest. But as this is
4622 		 * highly unlikely, we can live with the residual risk.
4623 		 */
4624 		vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4625 		vmx->loaded_vmcs->vnmi_blocked_time = 0;
4626 	}
4627 
4628 	++vcpu->stat.nmi_injections;
4629 	vmx->loaded_vmcs->nmi_known_unmasked = false;
4630 
4631 	if (vmx->rmode.vm86_active) {
4632 		kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
4633 		return;
4634 	}
4635 
4636 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4637 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4638 
4639 	vmx_clear_hlt(vcpu);
4640 }
4641 
vmx_get_nmi_mask(struct kvm_vcpu * vcpu)4642 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4643 {
4644 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4645 	bool masked;
4646 
4647 	if (!enable_vnmi)
4648 		return vmx->loaded_vmcs->soft_vnmi_blocked;
4649 	if (vmx->loaded_vmcs->nmi_known_unmasked)
4650 		return false;
4651 	masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
4652 	vmx->loaded_vmcs->nmi_known_unmasked = !masked;
4653 	return masked;
4654 }
4655 
vmx_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)4656 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4657 {
4658 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4659 
4660 	if (!enable_vnmi) {
4661 		if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
4662 			vmx->loaded_vmcs->soft_vnmi_blocked = masked;
4663 			vmx->loaded_vmcs->vnmi_blocked_time = 0;
4664 		}
4665 	} else {
4666 		vmx->loaded_vmcs->nmi_known_unmasked = !masked;
4667 		if (masked)
4668 			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4669 				      GUEST_INTR_STATE_NMI);
4670 		else
4671 			vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
4672 					GUEST_INTR_STATE_NMI);
4673 	}
4674 }
4675 
vmx_nmi_blocked(struct kvm_vcpu * vcpu)4676 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
4677 {
4678 	if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
4679 		return false;
4680 
4681 	if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
4682 		return true;
4683 
4684 	return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4685 		(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
4686 		 GUEST_INTR_STATE_NMI));
4687 }
4688 
vmx_nmi_allowed(struct kvm_vcpu * vcpu,bool for_injection)4689 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4690 {
4691 	if (to_vmx(vcpu)->nested.nested_run_pending)
4692 		return -EBUSY;
4693 
4694 	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
4695 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
4696 		return -EBUSY;
4697 
4698 	return !vmx_nmi_blocked(vcpu);
4699 }
4700 
vmx_interrupt_blocked(struct kvm_vcpu * vcpu)4701 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
4702 {
4703 	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
4704 		return false;
4705 
4706 	return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
4707 	       (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4708 		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
4709 }
4710 
vmx_interrupt_allowed(struct kvm_vcpu * vcpu,bool for_injection)4711 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4712 {
4713 	if (to_vmx(vcpu)->nested.nested_run_pending)
4714 		return -EBUSY;
4715 
4716        /*
4717         * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
4718         * e.g. if the IRQ arrived asynchronously after checking nested events.
4719         */
4720 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
4721 		return -EBUSY;
4722 
4723 	return !vmx_interrupt_blocked(vcpu);
4724 }
4725 
vmx_set_tss_addr(struct kvm * kvm,unsigned int addr)4726 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4727 {
4728 	int ret;
4729 
4730 	if (enable_unrestricted_guest)
4731 		return 0;
4732 
4733 	mutex_lock(&kvm->slots_lock);
4734 	ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
4735 				      PAGE_SIZE * 3);
4736 	mutex_unlock(&kvm->slots_lock);
4737 
4738 	if (ret)
4739 		return ret;
4740 	to_kvm_vmx(kvm)->tss_addr = addr;
4741 	return init_rmode_tss(kvm);
4742 }
4743 
vmx_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)4744 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
4745 {
4746 	to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
4747 	return 0;
4748 }
4749 
rmode_exception(struct kvm_vcpu * vcpu,int vec)4750 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
4751 {
4752 	switch (vec) {
4753 	case BP_VECTOR:
4754 		/*
4755 		 * Update instruction length as we may reinject the exception
4756 		 * from user space while in guest debugging mode.
4757 		 */
4758 		to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4759 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4760 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
4761 			return false;
4762 		fallthrough;
4763 	case DB_VECTOR:
4764 		return !(vcpu->guest_debug &
4765 			(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
4766 	case DE_VECTOR:
4767 	case OF_VECTOR:
4768 	case BR_VECTOR:
4769 	case UD_VECTOR:
4770 	case DF_VECTOR:
4771 	case SS_VECTOR:
4772 	case GP_VECTOR:
4773 	case MF_VECTOR:
4774 		return true;
4775 	}
4776 	return false;
4777 }
4778 
handle_rmode_exception(struct kvm_vcpu * vcpu,int vec,u32 err_code)4779 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
4780 				  int vec, u32 err_code)
4781 {
4782 	/*
4783 	 * Instruction with address size override prefix opcode 0x67
4784 	 * Cause the #SS fault with 0 error code in VM86 mode.
4785 	 */
4786 	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
4787 		if (kvm_emulate_instruction(vcpu, 0)) {
4788 			if (vcpu->arch.halt_request) {
4789 				vcpu->arch.halt_request = 0;
4790 				return kvm_vcpu_halt(vcpu);
4791 			}
4792 			return 1;
4793 		}
4794 		return 0;
4795 	}
4796 
4797 	/*
4798 	 * Forward all other exceptions that are valid in real mode.
4799 	 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
4800 	 *        the required debugging infrastructure rework.
4801 	 */
4802 	kvm_queue_exception(vcpu, vec);
4803 	return 1;
4804 }
4805 
4806 /*
4807  * Trigger machine check on the host. We assume all the MSRs are already set up
4808  * by the CPU and that we still run on the same CPU as the MCE occurred on.
4809  * We pass a fake environment to the machine check handler because we want
4810  * the guest to be always treated like user space, no matter what context
4811  * it used internally.
4812  */
kvm_machine_check(void)4813 static void kvm_machine_check(void)
4814 {
4815 #if defined(CONFIG_X86_MCE)
4816 	struct pt_regs regs = {
4817 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
4818 		.flags = X86_EFLAGS_IF,
4819 	};
4820 
4821 	do_machine_check(&regs);
4822 #endif
4823 }
4824 
handle_machine_check(struct kvm_vcpu * vcpu)4825 static int handle_machine_check(struct kvm_vcpu *vcpu)
4826 {
4827 	/* handled by vmx_vcpu_run() */
4828 	return 1;
4829 }
4830 
4831 /*
4832  * If the host has split lock detection disabled, then #AC is
4833  * unconditionally injected into the guest, which is the pre split lock
4834  * detection behaviour.
4835  *
4836  * If the host has split lock detection enabled then #AC is
4837  * only injected into the guest when:
4838  *  - Guest CPL == 3 (user mode)
4839  *  - Guest has #AC detection enabled in CR0
4840  *  - Guest EFLAGS has AC bit set
4841  */
vmx_guest_inject_ac(struct kvm_vcpu * vcpu)4842 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
4843 {
4844 	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
4845 		return true;
4846 
4847 	return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
4848 	       (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
4849 }
4850 
handle_exception_nmi(struct kvm_vcpu * vcpu)4851 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
4852 {
4853 	struct vcpu_vmx *vmx = to_vmx(vcpu);
4854 	struct kvm_run *kvm_run = vcpu->run;
4855 	u32 intr_info, ex_no, error_code;
4856 	unsigned long cr2, rip, dr6;
4857 	u32 vect_info;
4858 
4859 	vect_info = vmx->idt_vectoring_info;
4860 	intr_info = vmx_get_intr_info(vcpu);
4861 
4862 	if (is_machine_check(intr_info) || is_nmi(intr_info))
4863 		return 1; /* handled by handle_exception_nmi_irqoff() */
4864 
4865 	if (is_invalid_opcode(intr_info))
4866 		return handle_ud(vcpu);
4867 
4868 	error_code = 0;
4869 	if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
4870 		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
4871 
4872 	if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
4873 		WARN_ON_ONCE(!enable_vmware_backdoor);
4874 
4875 		/*
4876 		 * VMware backdoor emulation on #GP interception only handles
4877 		 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
4878 		 * error code on #GP.
4879 		 */
4880 		if (error_code) {
4881 			kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
4882 			return 1;
4883 		}
4884 		return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
4885 	}
4886 
4887 	/*
4888 	 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
4889 	 * MMIO, it is better to report an internal error.
4890 	 * See the comments in vmx_handle_exit.
4891 	 */
4892 	if ((vect_info & VECTORING_INFO_VALID_MASK) &&
4893 	    !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
4894 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4895 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
4896 		vcpu->run->internal.ndata = 4;
4897 		vcpu->run->internal.data[0] = vect_info;
4898 		vcpu->run->internal.data[1] = intr_info;
4899 		vcpu->run->internal.data[2] = error_code;
4900 		vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
4901 		return 0;
4902 	}
4903 
4904 	if (is_page_fault(intr_info)) {
4905 		cr2 = vmx_get_exit_qual(vcpu);
4906 		if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
4907 			/*
4908 			 * EPT will cause page fault only if we need to
4909 			 * detect illegal GPAs.
4910 			 */
4911 			WARN_ON_ONCE(!allow_smaller_maxphyaddr);
4912 			kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
4913 			return 1;
4914 		} else
4915 			return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
4916 	}
4917 
4918 	ex_no = intr_info & INTR_INFO_VECTOR_MASK;
4919 
4920 	if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
4921 		return handle_rmode_exception(vcpu, ex_no, error_code);
4922 
4923 	switch (ex_no) {
4924 	case DB_VECTOR:
4925 		dr6 = vmx_get_exit_qual(vcpu);
4926 		if (!(vcpu->guest_debug &
4927 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4928 			if (is_icebp(intr_info))
4929 				WARN_ON(!skip_emulated_instruction(vcpu));
4930 
4931 			kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
4932 			return 1;
4933 		}
4934 		kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
4935 		kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
4936 		fallthrough;
4937 	case BP_VECTOR:
4938 		/*
4939 		 * Update instruction length as we may reinject #BP from
4940 		 * user space while in guest debugging mode. Reading it for
4941 		 * #DB as well causes no harm, it is not used in that case.
4942 		 */
4943 		vmx->vcpu.arch.event_exit_inst_len =
4944 			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4945 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
4946 		rip = kvm_rip_read(vcpu);
4947 		kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4948 		kvm_run->debug.arch.exception = ex_no;
4949 		break;
4950 	case AC_VECTOR:
4951 		if (vmx_guest_inject_ac(vcpu)) {
4952 			kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
4953 			return 1;
4954 		}
4955 
4956 		/*
4957 		 * Handle split lock. Depending on detection mode this will
4958 		 * either warn and disable split lock detection for this
4959 		 * task or force SIGBUS on it.
4960 		 */
4961 		if (handle_guest_split_lock(kvm_rip_read(vcpu)))
4962 			return 1;
4963 		fallthrough;
4964 	default:
4965 		kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4966 		kvm_run->ex.exception = ex_no;
4967 		kvm_run->ex.error_code = error_code;
4968 		break;
4969 	}
4970 	return 0;
4971 }
4972 
handle_external_interrupt(struct kvm_vcpu * vcpu)4973 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
4974 {
4975 	++vcpu->stat.irq_exits;
4976 	return 1;
4977 }
4978 
handle_triple_fault(struct kvm_vcpu * vcpu)4979 static int handle_triple_fault(struct kvm_vcpu *vcpu)
4980 {
4981 	vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4982 	vcpu->mmio_needed = 0;
4983 	return 0;
4984 }
4985 
handle_io(struct kvm_vcpu * vcpu)4986 static int handle_io(struct kvm_vcpu *vcpu)
4987 {
4988 	unsigned long exit_qualification;
4989 	int size, in, string;
4990 	unsigned port;
4991 
4992 	exit_qualification = vmx_get_exit_qual(vcpu);
4993 	string = (exit_qualification & 16) != 0;
4994 
4995 	++vcpu->stat.io_exits;
4996 
4997 	if (string)
4998 		return kvm_emulate_instruction(vcpu, 0);
4999 
5000 	port = exit_qualification >> 16;
5001 	size = (exit_qualification & 7) + 1;
5002 	in = (exit_qualification & 8) != 0;
5003 
5004 	return kvm_fast_pio(vcpu, size, port, in);
5005 }
5006 
5007 static void
vmx_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)5008 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5009 {
5010 	/*
5011 	 * Patch in the VMCALL instruction:
5012 	 */
5013 	hypercall[0] = 0x0f;
5014 	hypercall[1] = 0x01;
5015 	hypercall[2] = 0xc1;
5016 }
5017 
5018 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
handle_set_cr0(struct kvm_vcpu * vcpu,unsigned long val)5019 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5020 {
5021 	if (is_guest_mode(vcpu)) {
5022 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5023 		unsigned long orig_val = val;
5024 
5025 		/*
5026 		 * We get here when L2 changed cr0 in a way that did not change
5027 		 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5028 		 * but did change L0 shadowed bits. So we first calculate the
5029 		 * effective cr0 value that L1 would like to write into the
5030 		 * hardware. It consists of the L2-owned bits from the new
5031 		 * value combined with the L1-owned bits from L1's guest_cr0.
5032 		 */
5033 		val = (val & ~vmcs12->cr0_guest_host_mask) |
5034 			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5035 
5036 		if (!nested_guest_cr0_valid(vcpu, val))
5037 			return 1;
5038 
5039 		if (kvm_set_cr0(vcpu, val))
5040 			return 1;
5041 		vmcs_writel(CR0_READ_SHADOW, orig_val);
5042 		return 0;
5043 	} else {
5044 		if (to_vmx(vcpu)->nested.vmxon &&
5045 		    !nested_host_cr0_valid(vcpu, val))
5046 			return 1;
5047 
5048 		return kvm_set_cr0(vcpu, val);
5049 	}
5050 }
5051 
handle_set_cr4(struct kvm_vcpu * vcpu,unsigned long val)5052 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5053 {
5054 	if (is_guest_mode(vcpu)) {
5055 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5056 		unsigned long orig_val = val;
5057 
5058 		/* analogously to handle_set_cr0 */
5059 		val = (val & ~vmcs12->cr4_guest_host_mask) |
5060 			(vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5061 		if (kvm_set_cr4(vcpu, val))
5062 			return 1;
5063 		vmcs_writel(CR4_READ_SHADOW, orig_val);
5064 		return 0;
5065 	} else
5066 		return kvm_set_cr4(vcpu, val);
5067 }
5068 
handle_desc(struct kvm_vcpu * vcpu)5069 static int handle_desc(struct kvm_vcpu *vcpu)
5070 {
5071 	WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
5072 	return kvm_emulate_instruction(vcpu, 0);
5073 }
5074 
handle_cr(struct kvm_vcpu * vcpu)5075 static int handle_cr(struct kvm_vcpu *vcpu)
5076 {
5077 	unsigned long exit_qualification, val;
5078 	int cr;
5079 	int reg;
5080 	int err;
5081 	int ret;
5082 
5083 	exit_qualification = vmx_get_exit_qual(vcpu);
5084 	cr = exit_qualification & 15;
5085 	reg = (exit_qualification >> 8) & 15;
5086 	switch ((exit_qualification >> 4) & 3) {
5087 	case 0: /* mov to cr */
5088 		val = kvm_register_readl(vcpu, reg);
5089 		trace_kvm_cr_write(cr, val);
5090 		switch (cr) {
5091 		case 0:
5092 			err = handle_set_cr0(vcpu, val);
5093 			return kvm_complete_insn_gp(vcpu, err);
5094 		case 3:
5095 			WARN_ON_ONCE(enable_unrestricted_guest);
5096 			err = kvm_set_cr3(vcpu, val);
5097 			return kvm_complete_insn_gp(vcpu, err);
5098 		case 4:
5099 			err = handle_set_cr4(vcpu, val);
5100 			return kvm_complete_insn_gp(vcpu, err);
5101 		case 8: {
5102 				u8 cr8_prev = kvm_get_cr8(vcpu);
5103 				u8 cr8 = (u8)val;
5104 				err = kvm_set_cr8(vcpu, cr8);
5105 				ret = kvm_complete_insn_gp(vcpu, err);
5106 				if (lapic_in_kernel(vcpu))
5107 					return ret;
5108 				if (cr8_prev <= cr8)
5109 					return ret;
5110 				/*
5111 				 * TODO: we might be squashing a
5112 				 * KVM_GUESTDBG_SINGLESTEP-triggered
5113 				 * KVM_EXIT_DEBUG here.
5114 				 */
5115 				vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5116 				return 0;
5117 			}
5118 		}
5119 		break;
5120 	case 2: /* clts */
5121 		WARN_ONCE(1, "Guest should always own CR0.TS");
5122 		vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
5123 		trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
5124 		return kvm_skip_emulated_instruction(vcpu);
5125 	case 1: /*mov from cr*/
5126 		switch (cr) {
5127 		case 3:
5128 			WARN_ON_ONCE(enable_unrestricted_guest);
5129 			val = kvm_read_cr3(vcpu);
5130 			kvm_register_write(vcpu, reg, val);
5131 			trace_kvm_cr_read(cr, val);
5132 			return kvm_skip_emulated_instruction(vcpu);
5133 		case 8:
5134 			val = kvm_get_cr8(vcpu);
5135 			kvm_register_write(vcpu, reg, val);
5136 			trace_kvm_cr_read(cr, val);
5137 			return kvm_skip_emulated_instruction(vcpu);
5138 		}
5139 		break;
5140 	case 3: /* lmsw */
5141 		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5142 		trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
5143 		kvm_lmsw(vcpu, val);
5144 
5145 		return kvm_skip_emulated_instruction(vcpu);
5146 	default:
5147 		break;
5148 	}
5149 	vcpu->run->exit_reason = 0;
5150 	vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5151 	       (int)(exit_qualification >> 4) & 3, cr);
5152 	return 0;
5153 }
5154 
handle_dr(struct kvm_vcpu * vcpu)5155 static int handle_dr(struct kvm_vcpu *vcpu)
5156 {
5157 	unsigned long exit_qualification;
5158 	int dr, dr7, reg;
5159 
5160 	exit_qualification = vmx_get_exit_qual(vcpu);
5161 	dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5162 
5163 	/* First, if DR does not exist, trigger UD */
5164 	if (!kvm_require_dr(vcpu, dr))
5165 		return 1;
5166 
5167 	/* Do not handle if the CPL > 0, will trigger GP on re-entry */
5168 	if (!kvm_require_cpl(vcpu, 0))
5169 		return 1;
5170 	dr7 = vmcs_readl(GUEST_DR7);
5171 	if (dr7 & DR7_GD) {
5172 		/*
5173 		 * As the vm-exit takes precedence over the debug trap, we
5174 		 * need to emulate the latter, either for the host or the
5175 		 * guest debugging itself.
5176 		 */
5177 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5178 			vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
5179 			vcpu->run->debug.arch.dr7 = dr7;
5180 			vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5181 			vcpu->run->debug.arch.exception = DB_VECTOR;
5182 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5183 			return 0;
5184 		} else {
5185 			kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5186 			return 1;
5187 		}
5188 	}
5189 
5190 	if (vcpu->guest_debug == 0) {
5191 		exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5192 
5193 		/*
5194 		 * No more DR vmexits; force a reload of the debug registers
5195 		 * and reenter on this instruction.  The next vmexit will
5196 		 * retrieve the full state of the debug registers.
5197 		 */
5198 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5199 		return 1;
5200 	}
5201 
5202 	reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5203 	if (exit_qualification & TYPE_MOV_FROM_DR) {
5204 		unsigned long val;
5205 
5206 		if (kvm_get_dr(vcpu, dr, &val))
5207 			return 1;
5208 		kvm_register_write(vcpu, reg, val);
5209 	} else
5210 		if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
5211 			return 1;
5212 
5213 	return kvm_skip_emulated_instruction(vcpu);
5214 }
5215 
vmx_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)5216 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5217 {
5218 	get_debugreg(vcpu->arch.db[0], 0);
5219 	get_debugreg(vcpu->arch.db[1], 1);
5220 	get_debugreg(vcpu->arch.db[2], 2);
5221 	get_debugreg(vcpu->arch.db[3], 3);
5222 	get_debugreg(vcpu->arch.dr6, 6);
5223 	vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5224 
5225 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5226 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5227 }
5228 
vmx_set_dr7(struct kvm_vcpu * vcpu,unsigned long val)5229 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5230 {
5231 	vmcs_writel(GUEST_DR7, val);
5232 }
5233 
handle_tpr_below_threshold(struct kvm_vcpu * vcpu)5234 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5235 {
5236 	kvm_apic_update_ppr(vcpu);
5237 	return 1;
5238 }
5239 
handle_interrupt_window(struct kvm_vcpu * vcpu)5240 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5241 {
5242 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5243 
5244 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5245 
5246 	++vcpu->stat.irq_window_exits;
5247 	return 1;
5248 }
5249 
handle_vmcall(struct kvm_vcpu * vcpu)5250 static int handle_vmcall(struct kvm_vcpu *vcpu)
5251 {
5252 	return kvm_emulate_hypercall(vcpu);
5253 }
5254 
handle_invd(struct kvm_vcpu * vcpu)5255 static int handle_invd(struct kvm_vcpu *vcpu)
5256 {
5257 	/* Treat an INVD instruction as a NOP and just skip it. */
5258 	return kvm_skip_emulated_instruction(vcpu);
5259 }
5260 
handle_invlpg(struct kvm_vcpu * vcpu)5261 static int handle_invlpg(struct kvm_vcpu *vcpu)
5262 {
5263 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5264 
5265 	kvm_mmu_invlpg(vcpu, exit_qualification);
5266 	return kvm_skip_emulated_instruction(vcpu);
5267 }
5268 
handle_rdpmc(struct kvm_vcpu * vcpu)5269 static int handle_rdpmc(struct kvm_vcpu *vcpu)
5270 {
5271 	int err;
5272 
5273 	err = kvm_rdpmc(vcpu);
5274 	return kvm_complete_insn_gp(vcpu, err);
5275 }
5276 
handle_wbinvd(struct kvm_vcpu * vcpu)5277 static int handle_wbinvd(struct kvm_vcpu *vcpu)
5278 {
5279 	return kvm_emulate_wbinvd(vcpu);
5280 }
5281 
handle_xsetbv(struct kvm_vcpu * vcpu)5282 static int handle_xsetbv(struct kvm_vcpu *vcpu)
5283 {
5284 	u64 new_bv = kvm_read_edx_eax(vcpu);
5285 	u32 index = kvm_rcx_read(vcpu);
5286 
5287 	if (kvm_set_xcr(vcpu, index, new_bv) == 0)
5288 		return kvm_skip_emulated_instruction(vcpu);
5289 	return 1;
5290 }
5291 
handle_apic_access(struct kvm_vcpu * vcpu)5292 static int handle_apic_access(struct kvm_vcpu *vcpu)
5293 {
5294 	if (likely(fasteoi)) {
5295 		unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5296 		int access_type, offset;
5297 
5298 		access_type = exit_qualification & APIC_ACCESS_TYPE;
5299 		offset = exit_qualification & APIC_ACCESS_OFFSET;
5300 		/*
5301 		 * Sane guest uses MOV to write EOI, with written value
5302 		 * not cared. So make a short-circuit here by avoiding
5303 		 * heavy instruction emulation.
5304 		 */
5305 		if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5306 		    (offset == APIC_EOI)) {
5307 			kvm_lapic_set_eoi(vcpu);
5308 			return kvm_skip_emulated_instruction(vcpu);
5309 		}
5310 	}
5311 	return kvm_emulate_instruction(vcpu, 0);
5312 }
5313 
handle_apic_eoi_induced(struct kvm_vcpu * vcpu)5314 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5315 {
5316 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5317 	int vector = exit_qualification & 0xff;
5318 
5319 	/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5320 	kvm_apic_set_eoi_accelerated(vcpu, vector);
5321 	return 1;
5322 }
5323 
handle_apic_write(struct kvm_vcpu * vcpu)5324 static int handle_apic_write(struct kvm_vcpu *vcpu)
5325 {
5326 	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5327 	u32 offset = exit_qualification & 0xfff;
5328 
5329 	/* APIC-write VM exit is trap-like and thus no need to adjust IP */
5330 	kvm_apic_write_nodecode(vcpu, offset);
5331 	return 1;
5332 }
5333 
handle_task_switch(struct kvm_vcpu * vcpu)5334 static int handle_task_switch(struct kvm_vcpu *vcpu)
5335 {
5336 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5337 	unsigned long exit_qualification;
5338 	bool has_error_code = false;
5339 	u32 error_code = 0;
5340 	u16 tss_selector;
5341 	int reason, type, idt_v, idt_index;
5342 
5343 	idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5344 	idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5345 	type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5346 
5347 	exit_qualification = vmx_get_exit_qual(vcpu);
5348 
5349 	reason = (u32)exit_qualification >> 30;
5350 	if (reason == TASK_SWITCH_GATE && idt_v) {
5351 		switch (type) {
5352 		case INTR_TYPE_NMI_INTR:
5353 			vcpu->arch.nmi_injected = false;
5354 			vmx_set_nmi_mask(vcpu, true);
5355 			break;
5356 		case INTR_TYPE_EXT_INTR:
5357 		case INTR_TYPE_SOFT_INTR:
5358 			kvm_clear_interrupt_queue(vcpu);
5359 			break;
5360 		case INTR_TYPE_HARD_EXCEPTION:
5361 			if (vmx->idt_vectoring_info &
5362 			    VECTORING_INFO_DELIVER_CODE_MASK) {
5363 				has_error_code = true;
5364 				error_code =
5365 					vmcs_read32(IDT_VECTORING_ERROR_CODE);
5366 			}
5367 			fallthrough;
5368 		case INTR_TYPE_SOFT_EXCEPTION:
5369 			kvm_clear_exception_queue(vcpu);
5370 			break;
5371 		default:
5372 			break;
5373 		}
5374 	}
5375 	tss_selector = exit_qualification;
5376 
5377 	if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5378 		       type != INTR_TYPE_EXT_INTR &&
5379 		       type != INTR_TYPE_NMI_INTR))
5380 		WARN_ON(!skip_emulated_instruction(vcpu));
5381 
5382 	/*
5383 	 * TODO: What about debug traps on tss switch?
5384 	 *       Are we supposed to inject them and update dr6?
5385 	 */
5386 	return kvm_task_switch(vcpu, tss_selector,
5387 			       type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5388 			       reason, has_error_code, error_code);
5389 }
5390 
handle_ept_violation(struct kvm_vcpu * vcpu)5391 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5392 {
5393 	unsigned long exit_qualification;
5394 	gpa_t gpa;
5395 	u64 error_code;
5396 
5397 	exit_qualification = vmx_get_exit_qual(vcpu);
5398 
5399 	/*
5400 	 * EPT violation happened while executing iret from NMI,
5401 	 * "blocked by NMI" bit has to be set before next VM entry.
5402 	 * There are errata that may cause this bit to not be set:
5403 	 * AAK134, BY25.
5404 	 */
5405 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5406 			enable_vnmi &&
5407 			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
5408 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5409 
5410 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5411 	trace_kvm_page_fault(gpa, exit_qualification);
5412 
5413 	/* Is it a read fault? */
5414 	error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
5415 		     ? PFERR_USER_MASK : 0;
5416 	/* Is it a write fault? */
5417 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
5418 		      ? PFERR_WRITE_MASK : 0;
5419 	/* Is it a fetch fault? */
5420 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
5421 		      ? PFERR_FETCH_MASK : 0;
5422 	/* ept page table entry is present? */
5423 	error_code |= (exit_qualification &
5424 		       (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
5425 			EPT_VIOLATION_EXECUTABLE))
5426 		      ? PFERR_PRESENT_MASK : 0;
5427 
5428 	error_code |= (exit_qualification & 0x100) != 0 ?
5429 	       PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5430 
5431 	vcpu->arch.exit_qualification = exit_qualification;
5432 
5433 	/*
5434 	 * Check that the GPA doesn't exceed physical memory limits, as that is
5435 	 * a guest page fault.  We have to emulate the instruction here, because
5436 	 * if the illegal address is that of a paging structure, then
5437 	 * EPT_VIOLATION_ACC_WRITE bit is set.  Alternatively, if supported we
5438 	 * would also use advanced VM-exit information for EPT violations to
5439 	 * reconstruct the page fault error code.
5440 	 */
5441 	if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
5442 		return kvm_emulate_instruction(vcpu, 0);
5443 
5444 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5445 }
5446 
handle_ept_misconfig(struct kvm_vcpu * vcpu)5447 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5448 {
5449 	gpa_t gpa;
5450 
5451 	/*
5452 	 * A nested guest cannot optimize MMIO vmexits, because we have an
5453 	 * nGPA here instead of the required GPA.
5454 	 */
5455 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5456 	if (!is_guest_mode(vcpu) &&
5457 	    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5458 		trace_kvm_fast_mmio(gpa);
5459 		return kvm_skip_emulated_instruction(vcpu);
5460 	}
5461 
5462 	return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5463 }
5464 
handle_nmi_window(struct kvm_vcpu * vcpu)5465 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5466 {
5467 	WARN_ON_ONCE(!enable_vnmi);
5468 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5469 	++vcpu->stat.nmi_window_exits;
5470 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5471 
5472 	return 1;
5473 }
5474 
handle_invalid_guest_state(struct kvm_vcpu * vcpu)5475 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5476 {
5477 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5478 	bool intr_window_requested;
5479 	unsigned count = 130;
5480 
5481 	intr_window_requested = exec_controls_get(vmx) &
5482 				CPU_BASED_INTR_WINDOW_EXITING;
5483 
5484 	while (vmx->emulation_required && count-- != 0) {
5485 		if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
5486 			return handle_interrupt_window(&vmx->vcpu);
5487 
5488 		if (kvm_test_request(KVM_REQ_EVENT, vcpu))
5489 			return 1;
5490 
5491 		if (!kvm_emulate_instruction(vcpu, 0))
5492 			return 0;
5493 
5494 		if (vmx->emulation_required && !vmx->rmode.vm86_active &&
5495 		    vcpu->arch.exception.pending) {
5496 			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5497 			vcpu->run->internal.suberror =
5498 						KVM_INTERNAL_ERROR_EMULATION;
5499 			vcpu->run->internal.ndata = 0;
5500 			return 0;
5501 		}
5502 
5503 		if (vcpu->arch.halt_request) {
5504 			vcpu->arch.halt_request = 0;
5505 			return kvm_vcpu_halt(vcpu);
5506 		}
5507 
5508 		/*
5509 		 * Note, return 1 and not 0, vcpu_run() will invoke
5510 		 * xfer_to_guest_mode() which will create a proper return
5511 		 * code.
5512 		 */
5513 		if (__xfer_to_guest_mode_work_pending())
5514 			return 1;
5515 	}
5516 
5517 	return 1;
5518 }
5519 
grow_ple_window(struct kvm_vcpu * vcpu)5520 static void grow_ple_window(struct kvm_vcpu *vcpu)
5521 {
5522 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5523 	unsigned int old = vmx->ple_window;
5524 
5525 	vmx->ple_window = __grow_ple_window(old, ple_window,
5526 					    ple_window_grow,
5527 					    ple_window_max);
5528 
5529 	if (vmx->ple_window != old) {
5530 		vmx->ple_window_dirty = true;
5531 		trace_kvm_ple_window_update(vcpu->vcpu_id,
5532 					    vmx->ple_window, old);
5533 	}
5534 }
5535 
shrink_ple_window(struct kvm_vcpu * vcpu)5536 static void shrink_ple_window(struct kvm_vcpu *vcpu)
5537 {
5538 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5539 	unsigned int old = vmx->ple_window;
5540 
5541 	vmx->ple_window = __shrink_ple_window(old, ple_window,
5542 					      ple_window_shrink,
5543 					      ple_window);
5544 
5545 	if (vmx->ple_window != old) {
5546 		vmx->ple_window_dirty = true;
5547 		trace_kvm_ple_window_update(vcpu->vcpu_id,
5548 					    vmx->ple_window, old);
5549 	}
5550 }
5551 
vmx_enable_tdp(void)5552 static void vmx_enable_tdp(void)
5553 {
5554 	kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
5555 		enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
5556 		enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
5557 		0ull, VMX_EPT_EXECUTABLE_MASK,
5558 		cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
5559 		VMX_EPT_RWX_MASK, 0ull);
5560 
5561 	ept_set_mmio_spte_mask();
5562 }
5563 
5564 /*
5565  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5566  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5567  */
handle_pause(struct kvm_vcpu * vcpu)5568 static int handle_pause(struct kvm_vcpu *vcpu)
5569 {
5570 	if (!kvm_pause_in_guest(vcpu->kvm))
5571 		grow_ple_window(vcpu);
5572 
5573 	/*
5574 	 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5575 	 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5576 	 * never set PAUSE_EXITING and just set PLE if supported,
5577 	 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5578 	 */
5579 	kvm_vcpu_on_spin(vcpu, true);
5580 	return kvm_skip_emulated_instruction(vcpu);
5581 }
5582 
handle_nop(struct kvm_vcpu * vcpu)5583 static int handle_nop(struct kvm_vcpu *vcpu)
5584 {
5585 	return kvm_skip_emulated_instruction(vcpu);
5586 }
5587 
handle_mwait(struct kvm_vcpu * vcpu)5588 static int handle_mwait(struct kvm_vcpu *vcpu)
5589 {
5590 	printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
5591 	return handle_nop(vcpu);
5592 }
5593 
handle_invalid_op(struct kvm_vcpu * vcpu)5594 static int handle_invalid_op(struct kvm_vcpu *vcpu)
5595 {
5596 	kvm_queue_exception(vcpu, UD_VECTOR);
5597 	return 1;
5598 }
5599 
handle_monitor_trap(struct kvm_vcpu * vcpu)5600 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
5601 {
5602 	return 1;
5603 }
5604 
handle_monitor(struct kvm_vcpu * vcpu)5605 static int handle_monitor(struct kvm_vcpu *vcpu)
5606 {
5607 	printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
5608 	return handle_nop(vcpu);
5609 }
5610 
handle_invpcid(struct kvm_vcpu * vcpu)5611 static int handle_invpcid(struct kvm_vcpu *vcpu)
5612 {
5613 	u32 vmx_instruction_info;
5614 	unsigned long type;
5615 	gva_t gva;
5616 	struct {
5617 		u64 pcid;
5618 		u64 gla;
5619 	} operand;
5620 
5621 	if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
5622 		kvm_queue_exception(vcpu, UD_VECTOR);
5623 		return 1;
5624 	}
5625 
5626 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5627 	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
5628 
5629 	if (type > 3) {
5630 		kvm_inject_gp(vcpu, 0);
5631 		return 1;
5632 	}
5633 
5634 	/* According to the Intel instruction reference, the memory operand
5635 	 * is read even if it isn't needed (e.g., for type==all)
5636 	 */
5637 	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5638 				vmx_instruction_info, false,
5639 				sizeof(operand), &gva))
5640 		return 1;
5641 
5642 	return kvm_handle_invpcid(vcpu, type, gva);
5643 }
5644 
handle_pml_full(struct kvm_vcpu * vcpu)5645 static int handle_pml_full(struct kvm_vcpu *vcpu)
5646 {
5647 	unsigned long exit_qualification;
5648 
5649 	trace_kvm_pml_full(vcpu->vcpu_id);
5650 
5651 	exit_qualification = vmx_get_exit_qual(vcpu);
5652 
5653 	/*
5654 	 * PML buffer FULL happened while executing iret from NMI,
5655 	 * "blocked by NMI" bit has to be set before next VM entry.
5656 	 */
5657 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5658 			enable_vnmi &&
5659 			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
5660 		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5661 				GUEST_INTR_STATE_NMI);
5662 
5663 	/*
5664 	 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
5665 	 * here.., and there's no userspace involvement needed for PML.
5666 	 */
5667 	return 1;
5668 }
5669 
handle_fastpath_preemption_timer(struct kvm_vcpu * vcpu)5670 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
5671 {
5672 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5673 
5674 	if (!vmx->req_immediate_exit &&
5675 	    !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
5676 		kvm_lapic_expired_hv_timer(vcpu);
5677 		return EXIT_FASTPATH_REENTER_GUEST;
5678 	}
5679 
5680 	return EXIT_FASTPATH_NONE;
5681 }
5682 
handle_preemption_timer(struct kvm_vcpu * vcpu)5683 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
5684 {
5685 	handle_fastpath_preemption_timer(vcpu);
5686 	return 1;
5687 }
5688 
5689 /*
5690  * When nested=0, all VMX instruction VM Exits filter here.  The handlers
5691  * are overwritten by nested_vmx_setup() when nested=1.
5692  */
handle_vmx_instruction(struct kvm_vcpu * vcpu)5693 static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
5694 {
5695 	kvm_queue_exception(vcpu, UD_VECTOR);
5696 	return 1;
5697 }
5698 
handle_encls(struct kvm_vcpu * vcpu)5699 static int handle_encls(struct kvm_vcpu *vcpu)
5700 {
5701 	/*
5702 	 * SGX virtualization is not yet supported.  There is no software
5703 	 * enable bit for SGX, so we have to trap ENCLS and inject a #UD
5704 	 * to prevent the guest from executing ENCLS.
5705 	 */
5706 	kvm_queue_exception(vcpu, UD_VECTOR);
5707 	return 1;
5708 }
5709 
5710 /*
5711  * The exit handlers return 1 if the exit was handled fully and guest execution
5712  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
5713  * to be done to userspace and return 0.
5714  */
5715 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
5716 	[EXIT_REASON_EXCEPTION_NMI]           = handle_exception_nmi,
5717 	[EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
5718 	[EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
5719 	[EXIT_REASON_NMI_WINDOW]	      = handle_nmi_window,
5720 	[EXIT_REASON_IO_INSTRUCTION]          = handle_io,
5721 	[EXIT_REASON_CR_ACCESS]               = handle_cr,
5722 	[EXIT_REASON_DR_ACCESS]               = handle_dr,
5723 	[EXIT_REASON_CPUID]                   = kvm_emulate_cpuid,
5724 	[EXIT_REASON_MSR_READ]                = kvm_emulate_rdmsr,
5725 	[EXIT_REASON_MSR_WRITE]               = kvm_emulate_wrmsr,
5726 	[EXIT_REASON_INTERRUPT_WINDOW]        = handle_interrupt_window,
5727 	[EXIT_REASON_HLT]                     = kvm_emulate_halt,
5728 	[EXIT_REASON_INVD]		      = handle_invd,
5729 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
5730 	[EXIT_REASON_RDPMC]                   = handle_rdpmc,
5731 	[EXIT_REASON_VMCALL]                  = handle_vmcall,
5732 	[EXIT_REASON_VMCLEAR]		      = handle_vmx_instruction,
5733 	[EXIT_REASON_VMLAUNCH]		      = handle_vmx_instruction,
5734 	[EXIT_REASON_VMPTRLD]		      = handle_vmx_instruction,
5735 	[EXIT_REASON_VMPTRST]		      = handle_vmx_instruction,
5736 	[EXIT_REASON_VMREAD]		      = handle_vmx_instruction,
5737 	[EXIT_REASON_VMRESUME]		      = handle_vmx_instruction,
5738 	[EXIT_REASON_VMWRITE]		      = handle_vmx_instruction,
5739 	[EXIT_REASON_VMOFF]		      = handle_vmx_instruction,
5740 	[EXIT_REASON_VMON]		      = handle_vmx_instruction,
5741 	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
5742 	[EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
5743 	[EXIT_REASON_APIC_WRITE]              = handle_apic_write,
5744 	[EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
5745 	[EXIT_REASON_WBINVD]                  = handle_wbinvd,
5746 	[EXIT_REASON_XSETBV]                  = handle_xsetbv,
5747 	[EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
5748 	[EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
5749 	[EXIT_REASON_GDTR_IDTR]		      = handle_desc,
5750 	[EXIT_REASON_LDTR_TR]		      = handle_desc,
5751 	[EXIT_REASON_EPT_VIOLATION]	      = handle_ept_violation,
5752 	[EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
5753 	[EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
5754 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = handle_mwait,
5755 	[EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
5756 	[EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
5757 	[EXIT_REASON_INVEPT]                  = handle_vmx_instruction,
5758 	[EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
5759 	[EXIT_REASON_RDRAND]                  = handle_invalid_op,
5760 	[EXIT_REASON_RDSEED]                  = handle_invalid_op,
5761 	[EXIT_REASON_PML_FULL]		      = handle_pml_full,
5762 	[EXIT_REASON_INVPCID]                 = handle_invpcid,
5763 	[EXIT_REASON_VMFUNC]		      = handle_vmx_instruction,
5764 	[EXIT_REASON_PREEMPTION_TIMER]	      = handle_preemption_timer,
5765 	[EXIT_REASON_ENCLS]		      = handle_encls,
5766 };
5767 
5768 static const int kvm_vmx_max_exit_handlers =
5769 	ARRAY_SIZE(kvm_vmx_exit_handlers);
5770 
vmx_get_exit_info(struct kvm_vcpu * vcpu,u64 * info1,u64 * info2,u32 * intr_info,u32 * error_code)5771 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
5772 			      u32 *intr_info, u32 *error_code)
5773 {
5774 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5775 
5776 	*info1 = vmx_get_exit_qual(vcpu);
5777 	if (!(vmx->exit_reason.failed_vmentry)) {
5778 		*info2 = vmx->idt_vectoring_info;
5779 		*intr_info = vmx_get_intr_info(vcpu);
5780 		if (is_exception_with_error_code(*intr_info))
5781 			*error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5782 		else
5783 			*error_code = 0;
5784 	} else {
5785 		*info2 = 0;
5786 		*intr_info = 0;
5787 		*error_code = 0;
5788 	}
5789 }
5790 
vmx_destroy_pml_buffer(struct vcpu_vmx * vmx)5791 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
5792 {
5793 	if (vmx->pml_pg) {
5794 		__free_page(vmx->pml_pg);
5795 		vmx->pml_pg = NULL;
5796 	}
5797 }
5798 
vmx_flush_pml_buffer(struct kvm_vcpu * vcpu)5799 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
5800 {
5801 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5802 	u64 *pml_buf;
5803 	u16 pml_idx;
5804 
5805 	pml_idx = vmcs_read16(GUEST_PML_INDEX);
5806 
5807 	/* Do nothing if PML buffer is empty */
5808 	if (pml_idx == (PML_ENTITY_NUM - 1))
5809 		return;
5810 
5811 	/* PML index always points to next available PML buffer entity */
5812 	if (pml_idx >= PML_ENTITY_NUM)
5813 		pml_idx = 0;
5814 	else
5815 		pml_idx++;
5816 
5817 	pml_buf = page_address(vmx->pml_pg);
5818 	for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
5819 		u64 gpa;
5820 
5821 		gpa = pml_buf[pml_idx];
5822 		WARN_ON(gpa & (PAGE_SIZE - 1));
5823 		kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
5824 	}
5825 
5826 	/* reset PML index */
5827 	vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
5828 }
5829 
5830 /*
5831  * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
5832  * Called before reporting dirty_bitmap to userspace.
5833  */
kvm_flush_pml_buffers(struct kvm * kvm)5834 static void kvm_flush_pml_buffers(struct kvm *kvm)
5835 {
5836 	int i;
5837 	struct kvm_vcpu *vcpu;
5838 	/*
5839 	 * We only need to kick vcpu out of guest mode here, as PML buffer
5840 	 * is flushed at beginning of all VMEXITs, and it's obvious that only
5841 	 * vcpus running in guest are possible to have unflushed GPAs in PML
5842 	 * buffer.
5843 	 */
5844 	kvm_for_each_vcpu(i, vcpu, kvm)
5845 		kvm_vcpu_kick(vcpu);
5846 }
5847 
vmx_dump_sel(char * name,uint32_t sel)5848 static void vmx_dump_sel(char *name, uint32_t sel)
5849 {
5850 	pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
5851 	       name, vmcs_read16(sel),
5852 	       vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
5853 	       vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
5854 	       vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
5855 }
5856 
vmx_dump_dtsel(char * name,uint32_t limit)5857 static void vmx_dump_dtsel(char *name, uint32_t limit)
5858 {
5859 	pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
5860 	       name, vmcs_read32(limit),
5861 	       vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
5862 }
5863 
dump_vmcs(void)5864 void dump_vmcs(void)
5865 {
5866 	u32 vmentry_ctl, vmexit_ctl;
5867 	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
5868 	unsigned long cr4;
5869 
5870 	if (!dump_invalid_vmcs) {
5871 		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
5872 		return;
5873 	}
5874 
5875 	vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
5876 	vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
5877 	cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5878 	pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
5879 	cr4 = vmcs_readl(GUEST_CR4);
5880 	secondary_exec_control = 0;
5881 	if (cpu_has_secondary_exec_ctrls())
5882 		secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
5883 
5884 	pr_err("*** Guest State ***\n");
5885 	pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
5886 	       vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
5887 	       vmcs_readl(CR0_GUEST_HOST_MASK));
5888 	pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
5889 	       cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
5890 	pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
5891 	if (cpu_has_vmx_ept()) {
5892 		pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
5893 		       vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
5894 		pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
5895 		       vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
5896 	}
5897 	pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
5898 	       vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
5899 	pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
5900 	       vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
5901 	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
5902 	       vmcs_readl(GUEST_SYSENTER_ESP),
5903 	       vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
5904 	vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
5905 	vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
5906 	vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
5907 	vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
5908 	vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
5909 	vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
5910 	vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
5911 	vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
5912 	vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
5913 	vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
5914 	if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
5915 	    (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
5916 		pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
5917 		       vmcs_read64(GUEST_IA32_EFER),
5918 		       vmcs_read64(GUEST_IA32_PAT));
5919 	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
5920 	       vmcs_read64(GUEST_IA32_DEBUGCTL),
5921 	       vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
5922 	if (cpu_has_load_perf_global_ctrl() &&
5923 	    vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
5924 		pr_err("PerfGlobCtl = 0x%016llx\n",
5925 		       vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
5926 	if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
5927 		pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
5928 	pr_err("Interruptibility = %08x  ActivityState = %08x\n",
5929 	       vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
5930 	       vmcs_read32(GUEST_ACTIVITY_STATE));
5931 	if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
5932 		pr_err("InterruptStatus = %04x\n",
5933 		       vmcs_read16(GUEST_INTR_STATUS));
5934 
5935 	pr_err("*** Host State ***\n");
5936 	pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
5937 	       vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
5938 	pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
5939 	       vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
5940 	       vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
5941 	       vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
5942 	       vmcs_read16(HOST_TR_SELECTOR));
5943 	pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
5944 	       vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
5945 	       vmcs_readl(HOST_TR_BASE));
5946 	pr_err("GDTBase=%016lx IDTBase=%016lx\n",
5947 	       vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
5948 	pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
5949 	       vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
5950 	       vmcs_readl(HOST_CR4));
5951 	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
5952 	       vmcs_readl(HOST_IA32_SYSENTER_ESP),
5953 	       vmcs_read32(HOST_IA32_SYSENTER_CS),
5954 	       vmcs_readl(HOST_IA32_SYSENTER_EIP));
5955 	if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
5956 		pr_err("EFER = 0x%016llx  PAT = 0x%016llx\n",
5957 		       vmcs_read64(HOST_IA32_EFER),
5958 		       vmcs_read64(HOST_IA32_PAT));
5959 	if (cpu_has_load_perf_global_ctrl() &&
5960 	    vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
5961 		pr_err("PerfGlobCtl = 0x%016llx\n",
5962 		       vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
5963 
5964 	pr_err("*** Control State ***\n");
5965 	pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
5966 	       pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
5967 	pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
5968 	pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
5969 	       vmcs_read32(EXCEPTION_BITMAP),
5970 	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
5971 	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
5972 	pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
5973 	       vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
5974 	       vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
5975 	       vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
5976 	pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
5977 	       vmcs_read32(VM_EXIT_INTR_INFO),
5978 	       vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5979 	       vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
5980 	pr_err("        reason=%08x qualification=%016lx\n",
5981 	       vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
5982 	pr_err("IDTVectoring: info=%08x errcode=%08x\n",
5983 	       vmcs_read32(IDT_VECTORING_INFO_FIELD),
5984 	       vmcs_read32(IDT_VECTORING_ERROR_CODE));
5985 	pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
5986 	if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
5987 		pr_err("TSC Multiplier = 0x%016llx\n",
5988 		       vmcs_read64(TSC_MULTIPLIER));
5989 	if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
5990 		if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
5991 			u16 status = vmcs_read16(GUEST_INTR_STATUS);
5992 			pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
5993 		}
5994 		pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
5995 		if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
5996 			pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
5997 		pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
5998 	}
5999 	if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6000 		pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6001 	if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6002 		pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6003 	if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6004 		pr_err("PLE Gap=%08x Window=%08x\n",
6005 		       vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6006 	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6007 		pr_err("Virtual processor ID = 0x%04x\n",
6008 		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
6009 }
6010 
6011 /*
6012  * The guest has exited.  See if we can fix it or if we need userspace
6013  * assistance.
6014  */
vmx_handle_exit(struct kvm_vcpu * vcpu,fastpath_t exit_fastpath)6015 static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6016 {
6017 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6018 	union vmx_exit_reason exit_reason = vmx->exit_reason;
6019 	u32 vectoring_info = vmx->idt_vectoring_info;
6020 	u16 exit_handler_index;
6021 
6022 	/*
6023 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6024 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6025 	 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6026 	 * mode as if vcpus is in root mode, the PML buffer must has been
6027 	 * flushed already.
6028 	 */
6029 	if (enable_pml)
6030 		vmx_flush_pml_buffer(vcpu);
6031 
6032 	/*
6033 	 * We should never reach this point with a pending nested VM-Enter, and
6034 	 * more specifically emulation of L2 due to invalid guest state (see
6035 	 * below) should never happen as that means we incorrectly allowed a
6036 	 * nested VM-Enter with an invalid vmcs12.
6037 	 */
6038 	WARN_ON_ONCE(vmx->nested.nested_run_pending);
6039 
6040 	/* If guest state is invalid, start emulating */
6041 	if (vmx->emulation_required)
6042 		return handle_invalid_guest_state(vcpu);
6043 
6044 	if (is_guest_mode(vcpu)) {
6045 		/*
6046 		 * The host physical addresses of some pages of guest memory
6047 		 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6048 		 * Page). The CPU may write to these pages via their host
6049 		 * physical address while L2 is running, bypassing any
6050 		 * address-translation-based dirty tracking (e.g. EPT write
6051 		 * protection).
6052 		 *
6053 		 * Mark them dirty on every exit from L2 to prevent them from
6054 		 * getting out of sync with dirty tracking.
6055 		 */
6056 		nested_mark_vmcs12_pages_dirty(vcpu);
6057 
6058 		if (nested_vmx_reflect_vmexit(vcpu))
6059 			return 1;
6060 	}
6061 
6062 	if (exit_reason.failed_vmentry) {
6063 		dump_vmcs();
6064 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6065 		vcpu->run->fail_entry.hardware_entry_failure_reason
6066 			= exit_reason.full;
6067 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6068 		return 0;
6069 	}
6070 
6071 	if (unlikely(vmx->fail)) {
6072 		dump_vmcs();
6073 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6074 		vcpu->run->fail_entry.hardware_entry_failure_reason
6075 			= vmcs_read32(VM_INSTRUCTION_ERROR);
6076 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6077 		return 0;
6078 	}
6079 
6080 	/*
6081 	 * Note:
6082 	 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6083 	 * delivery event since it indicates guest is accessing MMIO.
6084 	 * The vm-exit can be triggered again after return to guest that
6085 	 * will cause infinite loop.
6086 	 */
6087 	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6088 	    (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
6089 	     exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
6090 	     exit_reason.basic != EXIT_REASON_PML_FULL &&
6091 	     exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
6092 	     exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
6093 		int ndata = 3;
6094 
6095 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6096 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6097 		vcpu->run->internal.data[0] = vectoring_info;
6098 		vcpu->run->internal.data[1] = exit_reason.full;
6099 		vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6100 		if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
6101 			vcpu->run->internal.data[ndata++] =
6102 				vmcs_read64(GUEST_PHYSICAL_ADDRESS);
6103 		}
6104 		vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6105 		vcpu->run->internal.ndata = ndata;
6106 		return 0;
6107 	}
6108 
6109 	if (unlikely(!enable_vnmi &&
6110 		     vmx->loaded_vmcs->soft_vnmi_blocked)) {
6111 		if (!vmx_interrupt_blocked(vcpu)) {
6112 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6113 		} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6114 			   vcpu->arch.nmi_pending) {
6115 			/*
6116 			 * This CPU don't support us in finding the end of an
6117 			 * NMI-blocked window if the guest runs with IRQs
6118 			 * disabled. So we pull the trigger after 1 s of
6119 			 * futile waiting, but inform the user about this.
6120 			 */
6121 			printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6122 			       "state on VCPU %d after 1 s timeout\n",
6123 			       __func__, vcpu->vcpu_id);
6124 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6125 		}
6126 	}
6127 
6128 	if (exit_fastpath != EXIT_FASTPATH_NONE)
6129 		return 1;
6130 
6131 	if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
6132 		goto unexpected_vmexit;
6133 #ifdef CONFIG_RETPOLINE
6134 	if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6135 		return kvm_emulate_wrmsr(vcpu);
6136 	else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
6137 		return handle_preemption_timer(vcpu);
6138 	else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
6139 		return handle_interrupt_window(vcpu);
6140 	else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6141 		return handle_external_interrupt(vcpu);
6142 	else if (exit_reason.basic == EXIT_REASON_HLT)
6143 		return kvm_emulate_halt(vcpu);
6144 	else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
6145 		return handle_ept_misconfig(vcpu);
6146 #endif
6147 
6148 	exit_handler_index = array_index_nospec((u16)exit_reason.basic,
6149 						kvm_vmx_max_exit_handlers);
6150 	if (!kvm_vmx_exit_handlers[exit_handler_index])
6151 		goto unexpected_vmexit;
6152 
6153 	return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6154 
6155 unexpected_vmexit:
6156 	vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6157 		    exit_reason.full);
6158 	dump_vmcs();
6159 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6160 	vcpu->run->internal.suberror =
6161 			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
6162 	vcpu->run->internal.ndata = 2;
6163 	vcpu->run->internal.data[0] = exit_reason.full;
6164 	vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6165 	return 0;
6166 }
6167 
6168 /*
6169  * Software based L1D cache flush which is used when microcode providing
6170  * the cache control MSR is not loaded.
6171  *
6172  * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6173  * flush it is required to read in 64 KiB because the replacement algorithm
6174  * is not exactly LRU. This could be sized at runtime via topology
6175  * information but as all relevant affected CPUs have 32KiB L1D cache size
6176  * there is no point in doing so.
6177  */
vmx_l1d_flush(struct kvm_vcpu * vcpu)6178 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
6179 {
6180 	int size = PAGE_SIZE << L1D_CACHE_ORDER;
6181 
6182 	/*
6183 	 * This code is only executed when the the flush mode is 'cond' or
6184 	 * 'always'
6185 	 */
6186 	if (static_branch_likely(&vmx_l1d_flush_cond)) {
6187 		bool flush_l1d;
6188 
6189 		/*
6190 		 * Clear the per-vcpu flush bit, it gets set again
6191 		 * either from vcpu_run() or from one of the unsafe
6192 		 * VMEXIT handlers.
6193 		 */
6194 		flush_l1d = vcpu->arch.l1tf_flush_l1d;
6195 		vcpu->arch.l1tf_flush_l1d = false;
6196 
6197 		/*
6198 		 * Clear the per-cpu flush bit, it gets set again from
6199 		 * the interrupt handlers.
6200 		 */
6201 		flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
6202 		kvm_clear_cpu_l1tf_flush_l1d();
6203 
6204 		if (!flush_l1d)
6205 			return;
6206 	}
6207 
6208 	vcpu->stat.l1d_flush++;
6209 
6210 	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
6211 		native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
6212 		return;
6213 	}
6214 
6215 	asm volatile(
6216 		/* First ensure the pages are in the TLB */
6217 		"xorl	%%eax, %%eax\n"
6218 		".Lpopulate_tlb:\n\t"
6219 		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6220 		"addl	$4096, %%eax\n\t"
6221 		"cmpl	%%eax, %[size]\n\t"
6222 		"jne	.Lpopulate_tlb\n\t"
6223 		"xorl	%%eax, %%eax\n\t"
6224 		"cpuid\n\t"
6225 		/* Now fill the cache */
6226 		"xorl	%%eax, %%eax\n"
6227 		".Lfill_cache:\n"
6228 		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6229 		"addl	$64, %%eax\n\t"
6230 		"cmpl	%%eax, %[size]\n\t"
6231 		"jne	.Lfill_cache\n\t"
6232 		"lfence\n"
6233 		:: [flush_pages] "r" (vmx_l1d_flush_pages),
6234 		    [size] "r" (size)
6235 		: "eax", "ebx", "ecx", "edx");
6236 }
6237 
update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)6238 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6239 {
6240 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6241 	int tpr_threshold;
6242 
6243 	if (is_guest_mode(vcpu) &&
6244 		nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
6245 		return;
6246 
6247 	tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
6248 	if (is_guest_mode(vcpu))
6249 		to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6250 	else
6251 		vmcs_write32(TPR_THRESHOLD, tpr_threshold);
6252 }
6253 
vmx_set_virtual_apic_mode(struct kvm_vcpu * vcpu)6254 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6255 {
6256 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6257 	u32 sec_exec_control;
6258 
6259 	if (!lapic_in_kernel(vcpu))
6260 		return;
6261 
6262 	if (!flexpriority_enabled &&
6263 	    !cpu_has_vmx_virtualize_x2apic_mode())
6264 		return;
6265 
6266 	/* Postpone execution until vmcs01 is the current VMCS. */
6267 	if (is_guest_mode(vcpu)) {
6268 		vmx->nested.change_vmcs01_virtual_apic_mode = true;
6269 		return;
6270 	}
6271 
6272 	sec_exec_control = secondary_exec_controls_get(vmx);
6273 	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6274 			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6275 
6276 	switch (kvm_get_apic_mode(vcpu)) {
6277 	case LAPIC_MODE_INVALID:
6278 		WARN_ONCE(true, "Invalid local APIC state");
6279 	case LAPIC_MODE_DISABLED:
6280 		break;
6281 	case LAPIC_MODE_XAPIC:
6282 		if (flexpriority_enabled) {
6283 			sec_exec_control |=
6284 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6285 			kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6286 
6287 			/*
6288 			 * Flush the TLB, reloading the APIC access page will
6289 			 * only do so if its physical address has changed, but
6290 			 * the guest may have inserted a non-APIC mapping into
6291 			 * the TLB while the APIC access page was disabled.
6292 			 */
6293 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6294 		}
6295 		break;
6296 	case LAPIC_MODE_X2APIC:
6297 		if (cpu_has_vmx_virtualize_x2apic_mode())
6298 			sec_exec_control |=
6299 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6300 		break;
6301 	}
6302 	secondary_exec_controls_set(vmx, sec_exec_control);
6303 
6304 	vmx_update_msr_bitmap(vcpu);
6305 }
6306 
vmx_set_apic_access_page_addr(struct kvm_vcpu * vcpu)6307 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6308 {
6309 	struct page *page;
6310 
6311 	/* Defer reload until vmcs01 is the current VMCS. */
6312 	if (is_guest_mode(vcpu)) {
6313 		to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6314 		return;
6315 	}
6316 
6317 	if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6318 	    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6319 		return;
6320 
6321 	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6322 	if (is_error_page(page))
6323 		return;
6324 
6325 	vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page));
6326 	vmx_flush_tlb_current(vcpu);
6327 
6328 	/*
6329 	 * Do not pin apic access page in memory, the MMU notifier
6330 	 * will call us again if it is migrated or swapped out.
6331 	 */
6332 	put_page(page);
6333 }
6334 
vmx_hwapic_isr_update(struct kvm_vcpu * vcpu,int max_isr)6335 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
6336 {
6337 	u16 status;
6338 	u8 old;
6339 
6340 	if (max_isr == -1)
6341 		max_isr = 0;
6342 
6343 	status = vmcs_read16(GUEST_INTR_STATUS);
6344 	old = status >> 8;
6345 	if (max_isr != old) {
6346 		status &= 0xff;
6347 		status |= max_isr << 8;
6348 		vmcs_write16(GUEST_INTR_STATUS, status);
6349 	}
6350 }
6351 
vmx_set_rvi(int vector)6352 static void vmx_set_rvi(int vector)
6353 {
6354 	u16 status;
6355 	u8 old;
6356 
6357 	if (vector == -1)
6358 		vector = 0;
6359 
6360 	status = vmcs_read16(GUEST_INTR_STATUS);
6361 	old = (u8)status & 0xff;
6362 	if ((u8)vector != old) {
6363 		status &= ~0xff;
6364 		status |= (u8)vector;
6365 		vmcs_write16(GUEST_INTR_STATUS, status);
6366 	}
6367 }
6368 
vmx_hwapic_irr_update(struct kvm_vcpu * vcpu,int max_irr)6369 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6370 {
6371 	/*
6372 	 * When running L2, updating RVI is only relevant when
6373 	 * vmcs12 virtual-interrupt-delivery enabled.
6374 	 * However, it can be enabled only when L1 also
6375 	 * intercepts external-interrupts and in that case
6376 	 * we should not update vmcs02 RVI but instead intercept
6377 	 * interrupt. Therefore, do nothing when running L2.
6378 	 */
6379 	if (!is_guest_mode(vcpu))
6380 		vmx_set_rvi(max_irr);
6381 }
6382 
vmx_sync_pir_to_irr(struct kvm_vcpu * vcpu)6383 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6384 {
6385 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6386 	int max_irr;
6387 	bool max_irr_updated;
6388 
6389 	WARN_ON(!vcpu->arch.apicv_active);
6390 	if (pi_test_on(&vmx->pi_desc)) {
6391 		pi_clear_on(&vmx->pi_desc);
6392 		/*
6393 		 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6394 		 * But on x86 this is just a compiler barrier anyway.
6395 		 */
6396 		smp_mb__after_atomic();
6397 		max_irr_updated =
6398 			kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6399 
6400 		/*
6401 		 * If we are running L2 and L1 has a new pending interrupt
6402 		 * which can be injected, this may cause a vmexit or it may
6403 		 * be injected into L2.  Either way, this interrupt will be
6404 		 * processed via KVM_REQ_EVENT, not RVI, because we do not use
6405 		 * virtual interrupt delivery to inject L1 interrupts into L2.
6406 		 */
6407 		if (is_guest_mode(vcpu) && max_irr_updated)
6408 			kvm_make_request(KVM_REQ_EVENT, vcpu);
6409 	} else {
6410 		max_irr = kvm_lapic_find_highest_irr(vcpu);
6411 	}
6412 	vmx_hwapic_irr_update(vcpu, max_irr);
6413 	return max_irr;
6414 }
6415 
vmx_load_eoi_exitmap(struct kvm_vcpu * vcpu,u64 * eoi_exit_bitmap)6416 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6417 {
6418 	if (!kvm_vcpu_apicv_active(vcpu))
6419 		return;
6420 
6421 	vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6422 	vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6423 	vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6424 	vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6425 }
6426 
vmx_apicv_post_state_restore(struct kvm_vcpu * vcpu)6427 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
6428 {
6429 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6430 
6431 	pi_clear_on(&vmx->pi_desc);
6432 	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6433 }
6434 
6435 void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
6436 
handle_interrupt_nmi_irqoff(struct kvm_vcpu * vcpu,unsigned long entry)6437 static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
6438 					unsigned long entry)
6439 {
6440 	kvm_before_interrupt(vcpu);
6441 	vmx_do_interrupt_nmi_irqoff(entry);
6442 	kvm_after_interrupt(vcpu);
6443 }
6444 
handle_exception_nmi_irqoff(struct vcpu_vmx * vmx)6445 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
6446 {
6447 	const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
6448 	u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
6449 
6450 	/* if exit due to PF check for async PF */
6451 	if (is_page_fault(intr_info))
6452 		vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6453 	/* Handle machine checks before interrupts are enabled */
6454 	else if (is_machine_check(intr_info))
6455 		kvm_machine_check();
6456 	/* We need to handle NMIs before interrupts are enabled */
6457 	else if (is_nmi(intr_info))
6458 		handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
6459 }
6460 
handle_external_interrupt_irqoff(struct kvm_vcpu * vcpu)6461 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
6462 {
6463 	u32 intr_info = vmx_get_intr_info(vcpu);
6464 	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
6465 	gate_desc *desc = (gate_desc *)host_idt_base + vector;
6466 
6467 	if (WARN_ONCE(!is_external_intr(intr_info),
6468 	    "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
6469 		return;
6470 
6471 	handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
6472 	vcpu->arch.at_instruction_boundary = true;
6473 }
6474 
vmx_handle_exit_irqoff(struct kvm_vcpu * vcpu)6475 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6476 {
6477 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6478 
6479 	if (vmx->emulation_required)
6480 		return;
6481 
6482 	if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6483 		handle_external_interrupt_irqoff(vcpu);
6484 	else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
6485 		handle_exception_nmi_irqoff(vmx);
6486 }
6487 
vmx_has_emulated_msr(u32 index)6488 static bool vmx_has_emulated_msr(u32 index)
6489 {
6490 	switch (index) {
6491 	case MSR_IA32_SMBASE:
6492 		/*
6493 		 * We cannot do SMM unless we can run the guest in big
6494 		 * real mode.
6495 		 */
6496 		return enable_unrestricted_guest || emulate_invalid_guest_state;
6497 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
6498 		return nested;
6499 	case MSR_AMD64_VIRT_SPEC_CTRL:
6500 		/* This is AMD only.  */
6501 		return false;
6502 	default:
6503 		return true;
6504 	}
6505 }
6506 
vmx_recover_nmi_blocking(struct vcpu_vmx * vmx)6507 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6508 {
6509 	u32 exit_intr_info;
6510 	bool unblock_nmi;
6511 	u8 vector;
6512 	bool idtv_info_valid;
6513 
6514 	idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6515 
6516 	if (enable_vnmi) {
6517 		if (vmx->loaded_vmcs->nmi_known_unmasked)
6518 			return;
6519 
6520 		exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
6521 		unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
6522 		vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
6523 		/*
6524 		 * SDM 3: 27.7.1.2 (September 2008)
6525 		 * Re-set bit "block by NMI" before VM entry if vmexit caused by
6526 		 * a guest IRET fault.
6527 		 * SDM 3: 23.2.2 (September 2008)
6528 		 * Bit 12 is undefined in any of the following cases:
6529 		 *  If the VM exit sets the valid bit in the IDT-vectoring
6530 		 *   information field.
6531 		 *  If the VM exit is due to a double fault.
6532 		 */
6533 		if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
6534 		    vector != DF_VECTOR && !idtv_info_valid)
6535 			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6536 				      GUEST_INTR_STATE_NMI);
6537 		else
6538 			vmx->loaded_vmcs->nmi_known_unmasked =
6539 				!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
6540 				  & GUEST_INTR_STATE_NMI);
6541 	} else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
6542 		vmx->loaded_vmcs->vnmi_blocked_time +=
6543 			ktime_to_ns(ktime_sub(ktime_get(),
6544 					      vmx->loaded_vmcs->entry_time));
6545 }
6546 
__vmx_complete_interrupts(struct kvm_vcpu * vcpu,u32 idt_vectoring_info,int instr_len_field,int error_code_field)6547 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
6548 				      u32 idt_vectoring_info,
6549 				      int instr_len_field,
6550 				      int error_code_field)
6551 {
6552 	u8 vector;
6553 	int type;
6554 	bool idtv_info_valid;
6555 
6556 	idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6557 
6558 	vcpu->arch.nmi_injected = false;
6559 	kvm_clear_exception_queue(vcpu);
6560 	kvm_clear_interrupt_queue(vcpu);
6561 
6562 	if (!idtv_info_valid)
6563 		return;
6564 
6565 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6566 
6567 	vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
6568 	type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
6569 
6570 	switch (type) {
6571 	case INTR_TYPE_NMI_INTR:
6572 		vcpu->arch.nmi_injected = true;
6573 		/*
6574 		 * SDM 3: 27.7.1.2 (September 2008)
6575 		 * Clear bit "block by NMI" before VM entry if a NMI
6576 		 * delivery faulted.
6577 		 */
6578 		vmx_set_nmi_mask(vcpu, false);
6579 		break;
6580 	case INTR_TYPE_SOFT_EXCEPTION:
6581 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
6582 		fallthrough;
6583 	case INTR_TYPE_HARD_EXCEPTION:
6584 		if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
6585 			u32 err = vmcs_read32(error_code_field);
6586 			kvm_requeue_exception_e(vcpu, vector, err);
6587 		} else
6588 			kvm_requeue_exception(vcpu, vector);
6589 		break;
6590 	case INTR_TYPE_SOFT_INTR:
6591 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
6592 		fallthrough;
6593 	case INTR_TYPE_EXT_INTR:
6594 		kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
6595 		break;
6596 	default:
6597 		break;
6598 	}
6599 }
6600 
vmx_complete_interrupts(struct vcpu_vmx * vmx)6601 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
6602 {
6603 	__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
6604 				  VM_EXIT_INSTRUCTION_LEN,
6605 				  IDT_VECTORING_ERROR_CODE);
6606 }
6607 
vmx_cancel_injection(struct kvm_vcpu * vcpu)6608 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
6609 {
6610 	__vmx_complete_interrupts(vcpu,
6611 				  vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6612 				  VM_ENTRY_INSTRUCTION_LEN,
6613 				  VM_ENTRY_EXCEPTION_ERROR_CODE);
6614 
6615 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
6616 }
6617 
atomic_switch_perf_msrs(struct vcpu_vmx * vmx)6618 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
6619 {
6620 	int i, nr_msrs;
6621 	struct perf_guest_switch_msr *msrs;
6622 
6623 	msrs = perf_guest_get_msrs(&nr_msrs);
6624 
6625 	if (!msrs)
6626 		return;
6627 
6628 	for (i = 0; i < nr_msrs; i++)
6629 		if (msrs[i].host == msrs[i].guest)
6630 			clear_atomic_switch_msr(vmx, msrs[i].msr);
6631 		else
6632 			add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
6633 					msrs[i].host, false);
6634 }
6635 
vmx_update_hv_timer(struct kvm_vcpu * vcpu)6636 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
6637 {
6638 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6639 	u64 tscl;
6640 	u32 delta_tsc;
6641 
6642 	if (vmx->req_immediate_exit) {
6643 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
6644 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
6645 	} else if (vmx->hv_deadline_tsc != -1) {
6646 		tscl = rdtsc();
6647 		if (vmx->hv_deadline_tsc > tscl)
6648 			/* set_hv_timer ensures the delta fits in 32-bits */
6649 			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
6650 				cpu_preemption_timer_multi);
6651 		else
6652 			delta_tsc = 0;
6653 
6654 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
6655 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
6656 	} else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
6657 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1);
6658 		vmx->loaded_vmcs->hv_timer_soft_disabled = true;
6659 	}
6660 }
6661 
vmx_update_host_rsp(struct vcpu_vmx * vmx,unsigned long host_rsp)6662 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
6663 {
6664 	if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
6665 		vmx->loaded_vmcs->host_state.rsp = host_rsp;
6666 		vmcs_writel(HOST_RSP, host_rsp);
6667 	}
6668 }
6669 
vmx_spec_ctrl_restore_host(struct vcpu_vmx * vmx,unsigned int flags)6670 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
6671 					unsigned int flags)
6672 {
6673 	u64 hostval = this_cpu_read(x86_spec_ctrl_current);
6674 
6675 	if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
6676 		return;
6677 
6678 	if (flags & VMX_RUN_SAVE_SPEC_CTRL)
6679 		vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
6680 
6681 	/*
6682 	 * If the guest/host SPEC_CTRL values differ, restore the host value.
6683 	 *
6684 	 * For legacy IBRS, the IBRS bit always needs to be written after
6685 	 * transitioning from a less privileged predictor mode, regardless of
6686 	 * whether the guest/host values differ.
6687 	 */
6688 	if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
6689 	    vmx->spec_ctrl != hostval)
6690 		native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
6691 
6692 	barrier_nospec();
6693 }
6694 
vmx_exit_handlers_fastpath(struct kvm_vcpu * vcpu)6695 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
6696 {
6697 	switch (to_vmx(vcpu)->exit_reason.basic) {
6698 	case EXIT_REASON_MSR_WRITE:
6699 		return handle_fastpath_set_msr_irqoff(vcpu);
6700 	case EXIT_REASON_PREEMPTION_TIMER:
6701 		return handle_fastpath_preemption_timer(vcpu);
6702 	default:
6703 		return EXIT_FASTPATH_NONE;
6704 	}
6705 }
6706 
vmx_vcpu_enter_exit(struct kvm_vcpu * vcpu,struct vcpu_vmx * vmx,unsigned long flags)6707 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
6708 					struct vcpu_vmx *vmx,
6709 					unsigned long flags)
6710 {
6711 	/*
6712 	 * VMENTER enables interrupts (host state), but the kernel state is
6713 	 * interrupts disabled when this is invoked. Also tell RCU about
6714 	 * it. This is the same logic as for exit_to_user_mode().
6715 	 *
6716 	 * This ensures that e.g. latency analysis on the host observes
6717 	 * guest mode as interrupt enabled.
6718 	 *
6719 	 * guest_enter_irqoff() informs context tracking about the
6720 	 * transition to guest mode and if enabled adjusts RCU state
6721 	 * accordingly.
6722 	 */
6723 	instrumentation_begin();
6724 	trace_hardirqs_on_prepare();
6725 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
6726 	instrumentation_end();
6727 
6728 	guest_enter_irqoff();
6729 	lockdep_hardirqs_on(CALLER_ADDR0);
6730 
6731 	/* L1D Flush includes CPU buffer clear to mitigate MDS */
6732 	if (static_branch_unlikely(&vmx_l1d_should_flush))
6733 		vmx_l1d_flush(vcpu);
6734 	else if (static_branch_unlikely(&mds_user_clear))
6735 		mds_clear_cpu_buffers();
6736 	else if (static_branch_unlikely(&mmio_stale_data_clear) &&
6737 		 kvm_arch_has_assigned_device(vcpu->kvm))
6738 		mds_clear_cpu_buffers();
6739 
6740 	vmx_disable_fb_clear(vmx);
6741 
6742 	if (vcpu->arch.cr2 != native_read_cr2())
6743 		native_write_cr2(vcpu->arch.cr2);
6744 
6745 	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
6746 				   flags);
6747 
6748 	vcpu->arch.cr2 = native_read_cr2();
6749 
6750 	vmx_enable_fb_clear(vmx);
6751 
6752 	/*
6753 	 * VMEXIT disables interrupts (host state), but tracing and lockdep
6754 	 * have them in state 'on' as recorded before entering guest mode.
6755 	 * Same as enter_from_user_mode().
6756 	 *
6757 	 * context_tracking_guest_exit() restores host context and reinstates
6758 	 * RCU if enabled and required.
6759 	 *
6760 	 * This needs to be done before the below as native_read_msr()
6761 	 * contains a tracepoint and x86_spec_ctrl_restore_host() calls
6762 	 * into world and some more.
6763 	 */
6764 	lockdep_hardirqs_off(CALLER_ADDR0);
6765 	context_tracking_guest_exit();
6766 
6767 	instrumentation_begin();
6768 	trace_hardirqs_off_finish();
6769 	instrumentation_end();
6770 }
6771 
vmx_vcpu_run(struct kvm_vcpu * vcpu)6772 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
6773 {
6774 	fastpath_t exit_fastpath;
6775 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6776 	unsigned long cr3, cr4;
6777 
6778 reenter_guest:
6779 	/* Record the guest's net vcpu time for enforced NMI injections. */
6780 	if (unlikely(!enable_vnmi &&
6781 		     vmx->loaded_vmcs->soft_vnmi_blocked))
6782 		vmx->loaded_vmcs->entry_time = ktime_get();
6783 
6784 	/* Don't enter VMX if guest state is invalid, let the exit handler
6785 	   start emulation until we arrive back to a valid state */
6786 	if (vmx->emulation_required)
6787 		return EXIT_FASTPATH_NONE;
6788 
6789 	if (vmx->ple_window_dirty) {
6790 		vmx->ple_window_dirty = false;
6791 		vmcs_write32(PLE_WINDOW, vmx->ple_window);
6792 	}
6793 
6794 	/*
6795 	 * We did this in prepare_switch_to_guest, because it needs to
6796 	 * be within srcu_read_lock.
6797 	 */
6798 	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
6799 
6800 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
6801 		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6802 	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
6803 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6804 
6805 	cr3 = __get_current_cr3_fast();
6806 	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
6807 		vmcs_writel(HOST_CR3, cr3);
6808 		vmx->loaded_vmcs->host_state.cr3 = cr3;
6809 	}
6810 
6811 	cr4 = cr4_read_shadow();
6812 	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
6813 		vmcs_writel(HOST_CR4, cr4);
6814 		vmx->loaded_vmcs->host_state.cr4 = cr4;
6815 	}
6816 
6817 	/* When single-stepping over STI and MOV SS, we must clear the
6818 	 * corresponding interruptibility bits in the guest state. Otherwise
6819 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
6820 	 * exceptions being set, but that's not correct for the guest debugging
6821 	 * case. */
6822 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6823 		vmx_set_interrupt_shadow(vcpu, 0);
6824 
6825 	kvm_load_guest_xsave_state(vcpu);
6826 
6827 	pt_guest_enter(vmx);
6828 
6829 	atomic_switch_perf_msrs(vmx);
6830 
6831 	if (enable_preemption_timer)
6832 		vmx_update_hv_timer(vcpu);
6833 
6834 	kvm_wait_lapic_expire(vcpu);
6835 
6836 	/*
6837 	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
6838 	 * it's non-zero. Since vmentry is serialising on affected CPUs, there
6839 	 * is no need to worry about the conditional branch over the wrmsr
6840 	 * being speculatively taken.
6841 	 */
6842 	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6843 
6844 	/* The actual VMENTER/EXIT is in the .noinstr.text section. */
6845 	vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
6846 
6847 	/* All fields are clean at this point */
6848 	if (static_branch_unlikely(&enable_evmcs))
6849 		current_evmcs->hv_clean_fields |=
6850 			HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
6851 
6852 	if (static_branch_unlikely(&enable_evmcs))
6853 		current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
6854 
6855 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
6856 	if (vmx->host_debugctlmsr)
6857 		update_debugctlmsr(vmx->host_debugctlmsr);
6858 
6859 #ifndef CONFIG_X86_64
6860 	/*
6861 	 * The sysexit path does not restore ds/es, so we must set them to
6862 	 * a reasonable value ourselves.
6863 	 *
6864 	 * We can't defer this to vmx_prepare_switch_to_host() since that
6865 	 * function may be executed in interrupt context, which saves and
6866 	 * restore segments around it, nullifying its effect.
6867 	 */
6868 	loadsegment(ds, __USER_DS);
6869 	loadsegment(es, __USER_DS);
6870 #endif
6871 
6872 	vmx_register_cache_reset(vcpu);
6873 
6874 	pt_guest_exit(vmx);
6875 
6876 	kvm_load_host_xsave_state(vcpu);
6877 
6878 	vmx->nested.nested_run_pending = 0;
6879 	vmx->idt_vectoring_info = 0;
6880 
6881 	if (unlikely(vmx->fail)) {
6882 		vmx->exit_reason.full = 0xdead;
6883 		return EXIT_FASTPATH_NONE;
6884 	}
6885 
6886 	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
6887 	if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
6888 		kvm_machine_check();
6889 
6890 	trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
6891 
6892 	if (unlikely(vmx->exit_reason.failed_vmentry))
6893 		return EXIT_FASTPATH_NONE;
6894 
6895 	vmx->loaded_vmcs->launched = 1;
6896 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
6897 
6898 	vmx_recover_nmi_blocking(vmx);
6899 	vmx_complete_interrupts(vmx);
6900 
6901 	if (is_guest_mode(vcpu))
6902 		return EXIT_FASTPATH_NONE;
6903 
6904 	exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
6905 	if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
6906 		if (!kvm_vcpu_exit_request(vcpu)) {
6907 			/*
6908 			 * FIXME: this goto should be a loop in vcpu_enter_guest,
6909 			 * but it would incur the cost of a retpoline for now.
6910 			 * Revisit once static calls are available.
6911 			 */
6912 			if (vcpu->arch.apicv_active)
6913 				vmx_sync_pir_to_irr(vcpu);
6914 			goto reenter_guest;
6915 		}
6916 		exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
6917 	}
6918 
6919 	return exit_fastpath;
6920 }
6921 
vmx_free_vcpu(struct kvm_vcpu * vcpu)6922 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
6923 {
6924 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6925 
6926 	if (enable_pml)
6927 		vmx_destroy_pml_buffer(vmx);
6928 	free_vpid(vmx->vpid);
6929 	nested_vmx_free_vcpu(vcpu);
6930 	free_loaded_vmcs(vmx->loaded_vmcs);
6931 }
6932 
vmx_create_vcpu(struct kvm_vcpu * vcpu)6933 static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
6934 {
6935 	struct vcpu_vmx *vmx;
6936 	int i, cpu, err;
6937 
6938 	BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
6939 	vmx = to_vmx(vcpu);
6940 
6941 	err = -ENOMEM;
6942 
6943 	vmx->vpid = allocate_vpid();
6944 
6945 	/*
6946 	 * If PML is turned on, failure on enabling PML just results in failure
6947 	 * of creating the vcpu, therefore we can simplify PML logic (by
6948 	 * avoiding dealing with cases, such as enabling PML partially on vcpus
6949 	 * for the guest), etc.
6950 	 */
6951 	if (enable_pml) {
6952 		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
6953 		if (!vmx->pml_pg)
6954 			goto free_vpid;
6955 	}
6956 
6957 	BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
6958 
6959 	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
6960 		u32 index = vmx_uret_msrs_list[i];
6961 		int j = vmx->nr_uret_msrs;
6962 
6963 		if (kvm_probe_user_return_msr(index))
6964 			continue;
6965 
6966 		vmx->guest_uret_msrs[j].slot = i;
6967 		vmx->guest_uret_msrs[j].data = 0;
6968 		switch (index) {
6969 		case MSR_IA32_TSX_CTRL:
6970 			/*
6971 			 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
6972 			 * interception.  Keep the host value unchanged to avoid
6973 			 * changing CPUID bits under the host kernel's feet.
6974 			 *
6975 			 * hle=0, rtm=0, tsx_ctrl=1 can be found with some
6976 			 * combinations of new kernel and old userspace.  If
6977 			 * those guests run on a tsx=off host, do allow guests
6978 			 * to use TSX_CTRL, but do not change the value on the
6979 			 * host so that TSX remains always disabled.
6980 			 */
6981 			if (boot_cpu_has(X86_FEATURE_RTM))
6982 				vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
6983 			else
6984 				vmx->guest_uret_msrs[j].mask = 0;
6985 			break;
6986 		default:
6987 			vmx->guest_uret_msrs[j].mask = -1ull;
6988 			break;
6989 		}
6990 		++vmx->nr_uret_msrs;
6991 	}
6992 
6993 	err = alloc_loaded_vmcs(&vmx->vmcs01);
6994 	if (err < 0)
6995 		goto free_pml;
6996 
6997 	/* The MSR bitmap starts with all ones */
6998 	bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
6999 	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7000 
7001 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
7002 #ifdef CONFIG_X86_64
7003 	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
7004 	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
7005 	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
7006 #endif
7007 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
7008 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
7009 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
7010 	if (kvm_cstate_in_guest(vcpu->kvm)) {
7011 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
7012 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
7013 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
7014 		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
7015 	}
7016 	vmx->msr_bitmap_mode = 0;
7017 
7018 	vmx->loaded_vmcs = &vmx->vmcs01;
7019 	cpu = get_cpu();
7020 	vmx_vcpu_load(vcpu, cpu);
7021 	vcpu->cpu = cpu;
7022 	init_vmcs(vmx);
7023 	vmx_vcpu_put(vcpu);
7024 	put_cpu();
7025 	if (cpu_need_virtualize_apic_accesses(vcpu)) {
7026 		err = alloc_apic_access_page(vcpu->kvm);
7027 		if (err)
7028 			goto free_vmcs;
7029 	}
7030 
7031 	if (enable_ept && !enable_unrestricted_guest) {
7032 		err = init_rmode_identity_map(vcpu->kvm);
7033 		if (err)
7034 			goto free_vmcs;
7035 	}
7036 
7037 	if (nested)
7038 		memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
7039 	else
7040 		memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
7041 
7042 	vmx->nested.posted_intr_nv = -1;
7043 	vmx->nested.current_vmptr = -1ull;
7044 
7045 	vcpu->arch.microcode_version = 0x100000000ULL;
7046 	vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
7047 
7048 	/*
7049 	 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
7050 	 * or POSTED_INTR_WAKEUP_VECTOR.
7051 	 */
7052 	vmx->pi_desc.nv = POSTED_INTR_VECTOR;
7053 	vmx->pi_desc.sn = 1;
7054 
7055 	vmx->ept_pointer = INVALID_PAGE;
7056 
7057 	return 0;
7058 
7059 free_vmcs:
7060 	free_loaded_vmcs(vmx->loaded_vmcs);
7061 free_pml:
7062 	vmx_destroy_pml_buffer(vmx);
7063 free_vpid:
7064 	free_vpid(vmx->vpid);
7065 	return err;
7066 }
7067 
7068 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7069 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7070 
vmx_vm_init(struct kvm * kvm)7071 static int vmx_vm_init(struct kvm *kvm)
7072 {
7073 	spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
7074 
7075 	if (!ple_gap)
7076 		kvm->arch.pause_in_guest = true;
7077 
7078 	if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
7079 		switch (l1tf_mitigation) {
7080 		case L1TF_MITIGATION_OFF:
7081 		case L1TF_MITIGATION_FLUSH_NOWARN:
7082 			/* 'I explicitly don't care' is set */
7083 			break;
7084 		case L1TF_MITIGATION_FLUSH:
7085 		case L1TF_MITIGATION_FLUSH_NOSMT:
7086 		case L1TF_MITIGATION_FULL:
7087 			/*
7088 			 * Warn upon starting the first VM in a potentially
7089 			 * insecure environment.
7090 			 */
7091 			if (sched_smt_active())
7092 				pr_warn_once(L1TF_MSG_SMT);
7093 			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7094 				pr_warn_once(L1TF_MSG_L1D);
7095 			break;
7096 		case L1TF_MITIGATION_FULL_FORCE:
7097 			/* Flush is enforced */
7098 			break;
7099 		}
7100 	}
7101 	kvm_apicv_init(kvm, enable_apicv);
7102 	return 0;
7103 }
7104 
vmx_check_processor_compat(void)7105 static int __init vmx_check_processor_compat(void)
7106 {
7107 	struct vmcs_config vmcs_conf;
7108 	struct vmx_capability vmx_cap;
7109 
7110 	if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
7111 	    !this_cpu_has(X86_FEATURE_VMX)) {
7112 		pr_err("kvm: VMX is disabled on CPU %d\n", smp_processor_id());
7113 		return -EIO;
7114 	}
7115 
7116 	if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
7117 		return -EIO;
7118 	if (nested)
7119 		nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
7120 	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
7121 		printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
7122 				smp_processor_id());
7123 		return -EIO;
7124 	}
7125 	return 0;
7126 }
7127 
vmx_get_mt_mask(struct kvm_vcpu * vcpu,gfn_t gfn,bool is_mmio)7128 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7129 {
7130 	u8 cache;
7131 	u64 ipat = 0;
7132 
7133 	/* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
7134 	 * memory aliases with conflicting memory types and sometimes MCEs.
7135 	 * We have to be careful as to what are honored and when.
7136 	 *
7137 	 * For MMIO, guest CD/MTRR are ignored.  The EPT memory type is set to
7138 	 * UC.  The effective memory type is UC or WC depending on guest PAT.
7139 	 * This was historically the source of MCEs and we want to be
7140 	 * conservative.
7141 	 *
7142 	 * When there is no need to deal with noncoherent DMA (e.g., no VT-d
7143 	 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored.  The
7144 	 * EPT memory type is set to WB.  The effective memory type is forced
7145 	 * WB.
7146 	 *
7147 	 * Otherwise, we trust guest.  Guest CD/MTRR/PAT are all honored.  The
7148 	 * EPT memory type is used to emulate guest CD/MTRR.
7149 	 */
7150 
7151 	if (is_mmio) {
7152 		cache = MTRR_TYPE_UNCACHABLE;
7153 		goto exit;
7154 	}
7155 
7156 	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
7157 		ipat = VMX_EPT_IPAT_BIT;
7158 		cache = MTRR_TYPE_WRBACK;
7159 		goto exit;
7160 	}
7161 
7162 	if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
7163 		ipat = VMX_EPT_IPAT_BIT;
7164 		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
7165 			cache = MTRR_TYPE_WRBACK;
7166 		else
7167 			cache = MTRR_TYPE_UNCACHABLE;
7168 		goto exit;
7169 	}
7170 
7171 	cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
7172 
7173 exit:
7174 	return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
7175 }
7176 
vmcs_set_secondary_exec_control(struct vcpu_vmx * vmx)7177 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx)
7178 {
7179 	/*
7180 	 * These bits in the secondary execution controls field
7181 	 * are dynamic, the others are mostly based on the hypervisor
7182 	 * architecture and the guest's CPUID.  Do not touch the
7183 	 * dynamic bits.
7184 	 */
7185 	u32 mask =
7186 		SECONDARY_EXEC_SHADOW_VMCS |
7187 		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7188 		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7189 		SECONDARY_EXEC_DESC;
7190 
7191 	u32 new_ctl = vmx->secondary_exec_control;
7192 	u32 cur_ctl = secondary_exec_controls_get(vmx);
7193 
7194 	secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7195 }
7196 
7197 /*
7198  * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7199  * (indicating "allowed-1") if they are supported in the guest's CPUID.
7200  */
nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu * vcpu)7201 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7202 {
7203 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7204 	struct kvm_cpuid_entry2 *entry;
7205 
7206 	vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7207 	vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7208 
7209 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {		\
7210 	if (entry && (entry->_reg & (_cpuid_mask)))			\
7211 		vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);	\
7212 } while (0)
7213 
7214 	entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
7215 	cr4_fixed1_update(X86_CR4_VME,        edx, feature_bit(VME));
7216 	cr4_fixed1_update(X86_CR4_PVI,        edx, feature_bit(VME));
7217 	cr4_fixed1_update(X86_CR4_TSD,        edx, feature_bit(TSC));
7218 	cr4_fixed1_update(X86_CR4_DE,         edx, feature_bit(DE));
7219 	cr4_fixed1_update(X86_CR4_PSE,        edx, feature_bit(PSE));
7220 	cr4_fixed1_update(X86_CR4_PAE,        edx, feature_bit(PAE));
7221 	cr4_fixed1_update(X86_CR4_MCE,        edx, feature_bit(MCE));
7222 	cr4_fixed1_update(X86_CR4_PGE,        edx, feature_bit(PGE));
7223 	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, feature_bit(FXSR));
7224 	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
7225 	cr4_fixed1_update(X86_CR4_VMXE,       ecx, feature_bit(VMX));
7226 	cr4_fixed1_update(X86_CR4_SMXE,       ecx, feature_bit(SMX));
7227 	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, feature_bit(PCID));
7228 	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, feature_bit(XSAVE));
7229 
7230 	entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
7231 	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, feature_bit(FSGSBASE));
7232 	cr4_fixed1_update(X86_CR4_SMEP,       ebx, feature_bit(SMEP));
7233 	cr4_fixed1_update(X86_CR4_SMAP,       ebx, feature_bit(SMAP));
7234 	cr4_fixed1_update(X86_CR4_PKE,        ecx, feature_bit(PKU));
7235 	cr4_fixed1_update(X86_CR4_UMIP,       ecx, feature_bit(UMIP));
7236 	cr4_fixed1_update(X86_CR4_LA57,       ecx, feature_bit(LA57));
7237 
7238 #undef cr4_fixed1_update
7239 }
7240 
nested_vmx_entry_exit_ctls_update(struct kvm_vcpu * vcpu)7241 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
7242 {
7243 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7244 
7245 	if (kvm_mpx_supported()) {
7246 		bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
7247 
7248 		if (mpx_enabled) {
7249 			vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
7250 			vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
7251 		} else {
7252 			vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
7253 			vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
7254 		}
7255 	}
7256 }
7257 
update_intel_pt_cfg(struct kvm_vcpu * vcpu)7258 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7259 {
7260 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7261 	struct kvm_cpuid_entry2 *best = NULL;
7262 	int i;
7263 
7264 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
7265 		best = kvm_find_cpuid_entry(vcpu, 0x14, i);
7266 		if (!best)
7267 			return;
7268 		vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7269 		vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7270 		vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7271 		vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7272 	}
7273 
7274 	/* Get the number of configurable Address Ranges for filtering */
7275 	vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps,
7276 						PT_CAP_num_address_ranges);
7277 
7278 	/* Initialize and clear the no dependency bits */
7279 	vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7280 			RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC);
7281 
7282 	/*
7283 	 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7284 	 * will inject an #GP
7285 	 */
7286 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7287 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7288 
7289 	/*
7290 	 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7291 	 * PSBFreq can be set
7292 	 */
7293 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7294 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7295 				RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7296 
7297 	/*
7298 	 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and
7299 	 * MTCFreq can be set
7300 	 */
7301 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7302 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7303 				RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE);
7304 
7305 	/* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7306 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7307 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7308 							RTIT_CTL_PTW_EN);
7309 
7310 	/* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7311 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7312 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7313 
7314 	/* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7315 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7316 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7317 
7318 	/* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */
7319 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7320 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7321 
7322 	/* unmask address range configure area */
7323 	for (i = 0; i < vmx->pt_desc.addr_range; i++)
7324 		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7325 }
7326 
vmx_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)7327 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7328 {
7329 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7330 
7331 	/* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */
7332 	vcpu->arch.xsaves_enabled = false;
7333 
7334 	if (cpu_has_secondary_exec_ctrls()) {
7335 		vmx_compute_secondary_exec_control(vmx);
7336 		vmcs_set_secondary_exec_control(vmx);
7337 	}
7338 
7339 	if (nested_vmx_allowed(vcpu))
7340 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
7341 			FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7342 			FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
7343 	else
7344 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
7345 			~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7346 			  FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
7347 
7348 	if (nested_vmx_allowed(vcpu)) {
7349 		nested_vmx_cr_fixed1_bits_update(vcpu);
7350 		nested_vmx_entry_exit_ctls_update(vcpu);
7351 	}
7352 
7353 	if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
7354 			guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
7355 		update_intel_pt_cfg(vcpu);
7356 
7357 	if (boot_cpu_has(X86_FEATURE_RTM)) {
7358 		struct vmx_uret_msr *msr;
7359 		msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7360 		if (msr) {
7361 			bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
7362 			vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7363 		}
7364 	}
7365 
7366 	set_cr4_guest_host_mask(vmx);
7367 
7368 	/* Refresh #PF interception to account for MAXPHYADDR changes. */
7369 	update_exception_bitmap(vcpu);
7370 }
7371 
vmx_set_cpu_caps(void)7372 static __init void vmx_set_cpu_caps(void)
7373 {
7374 	kvm_set_cpu_caps();
7375 
7376 	/* CPUID 0x1 */
7377 	if (nested)
7378 		kvm_cpu_cap_set(X86_FEATURE_VMX);
7379 
7380 	/* CPUID 0x7 */
7381 	if (kvm_mpx_supported())
7382 		kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
7383 	if (cpu_has_vmx_invpcid())
7384 		kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID);
7385 	if (vmx_pt_mode_is_host_guest())
7386 		kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
7387 
7388 	if (vmx_umip_emulated())
7389 		kvm_cpu_cap_set(X86_FEATURE_UMIP);
7390 
7391 	/* CPUID 0xD.1 */
7392 	supported_xss = 0;
7393 	if (!cpu_has_vmx_xsaves())
7394 		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
7395 
7396 	/* CPUID 0x80000001 and 0x7 (RDPID) */
7397 	if (!cpu_has_vmx_rdtscp()) {
7398 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
7399 		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
7400 	}
7401 
7402 	if (cpu_has_vmx_waitpkg())
7403 		kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
7404 }
7405 
vmx_request_immediate_exit(struct kvm_vcpu * vcpu)7406 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
7407 {
7408 	to_vmx(vcpu)->req_immediate_exit = true;
7409 }
7410 
vmx_check_intercept_io(struct kvm_vcpu * vcpu,struct x86_instruction_info * info)7411 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
7412 				  struct x86_instruction_info *info)
7413 {
7414 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7415 	unsigned short port;
7416 	bool intercept;
7417 	int size;
7418 
7419 	if (info->intercept == x86_intercept_in ||
7420 	    info->intercept == x86_intercept_ins) {
7421 		port = info->src_val;
7422 		size = info->dst_bytes;
7423 	} else {
7424 		port = info->dst_val;
7425 		size = info->src_bytes;
7426 	}
7427 
7428 	/*
7429 	 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
7430 	 * VM-exits depend on the 'unconditional IO exiting' VM-execution
7431 	 * control.
7432 	 *
7433 	 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
7434 	 */
7435 	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
7436 		intercept = nested_cpu_has(vmcs12,
7437 					   CPU_BASED_UNCOND_IO_EXITING);
7438 	else
7439 		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
7440 
7441 	/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
7442 	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
7443 }
7444 
vmx_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage,struct x86_exception * exception)7445 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
7446 			       struct x86_instruction_info *info,
7447 			       enum x86_intercept_stage stage,
7448 			       struct x86_exception *exception)
7449 {
7450 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7451 
7452 	switch (info->intercept) {
7453 	/*
7454 	 * RDPID causes #UD if disabled through secondary execution controls.
7455 	 * Because it is marked as EmulateOnUD, we need to intercept it here.
7456 	 * Note, RDPID is hidden behind ENABLE_RDTSCP.
7457 	 */
7458 	case x86_intercept_rdpid:
7459 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
7460 			exception->vector = UD_VECTOR;
7461 			exception->error_code_valid = false;
7462 			return X86EMUL_PROPAGATE_FAULT;
7463 		}
7464 		break;
7465 
7466 	case x86_intercept_in:
7467 	case x86_intercept_ins:
7468 	case x86_intercept_out:
7469 	case x86_intercept_outs:
7470 		return vmx_check_intercept_io(vcpu, info);
7471 
7472 	case x86_intercept_lgdt:
7473 	case x86_intercept_lidt:
7474 	case x86_intercept_lldt:
7475 	case x86_intercept_ltr:
7476 	case x86_intercept_sgdt:
7477 	case x86_intercept_sidt:
7478 	case x86_intercept_sldt:
7479 	case x86_intercept_str:
7480 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
7481 			return X86EMUL_CONTINUE;
7482 
7483 		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
7484 		break;
7485 
7486 	/* TODO: check more intercepts... */
7487 	default:
7488 		break;
7489 	}
7490 
7491 	return X86EMUL_UNHANDLEABLE;
7492 }
7493 
7494 #ifdef CONFIG_X86_64
7495 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
u64_shl_div_u64(u64 a,unsigned int shift,u64 divisor,u64 * result)7496 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
7497 				  u64 divisor, u64 *result)
7498 {
7499 	u64 low = a << shift, high = a >> (64 - shift);
7500 
7501 	/* To avoid the overflow on divq */
7502 	if (high >= divisor)
7503 		return 1;
7504 
7505 	/* Low hold the result, high hold rem which is discarded */
7506 	asm("divq %2\n\t" : "=a" (low), "=d" (high) :
7507 	    "rm" (divisor), "0" (low), "1" (high));
7508 	*result = low;
7509 
7510 	return 0;
7511 }
7512 
vmx_set_hv_timer(struct kvm_vcpu * vcpu,u64 guest_deadline_tsc,bool * expired)7513 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
7514 			    bool *expired)
7515 {
7516 	struct vcpu_vmx *vmx;
7517 	u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
7518 	struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
7519 
7520 	vmx = to_vmx(vcpu);
7521 	tscl = rdtsc();
7522 	guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
7523 	delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
7524 	lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
7525 						    ktimer->timer_advance_ns);
7526 
7527 	if (delta_tsc > lapic_timer_advance_cycles)
7528 		delta_tsc -= lapic_timer_advance_cycles;
7529 	else
7530 		delta_tsc = 0;
7531 
7532 	/* Convert to host delta tsc if tsc scaling is enabled */
7533 	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
7534 	    delta_tsc && u64_shl_div_u64(delta_tsc,
7535 				kvm_tsc_scaling_ratio_frac_bits,
7536 				vcpu->arch.tsc_scaling_ratio, &delta_tsc))
7537 		return -ERANGE;
7538 
7539 	/*
7540 	 * If the delta tsc can't fit in the 32 bit after the multi shift,
7541 	 * we can't use the preemption timer.
7542 	 * It's possible that it fits on later vmentries, but checking
7543 	 * on every vmentry is costly so we just use an hrtimer.
7544 	 */
7545 	if (delta_tsc >> (cpu_preemption_timer_multi + 32))
7546 		return -ERANGE;
7547 
7548 	vmx->hv_deadline_tsc = tscl + delta_tsc;
7549 	*expired = !delta_tsc;
7550 	return 0;
7551 }
7552 
vmx_cancel_hv_timer(struct kvm_vcpu * vcpu)7553 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
7554 {
7555 	to_vmx(vcpu)->hv_deadline_tsc = -1;
7556 }
7557 #endif
7558 
vmx_sched_in(struct kvm_vcpu * vcpu,int cpu)7559 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
7560 {
7561 	if (!kvm_pause_in_guest(vcpu->kvm))
7562 		shrink_ple_window(vcpu);
7563 }
7564 
vmx_slot_enable_log_dirty(struct kvm * kvm,struct kvm_memory_slot * slot)7565 static void vmx_slot_enable_log_dirty(struct kvm *kvm,
7566 				     struct kvm_memory_slot *slot)
7567 {
7568 	if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
7569 		kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
7570 	kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
7571 }
7572 
vmx_slot_disable_log_dirty(struct kvm * kvm,struct kvm_memory_slot * slot)7573 static void vmx_slot_disable_log_dirty(struct kvm *kvm,
7574 				       struct kvm_memory_slot *slot)
7575 {
7576 	kvm_mmu_slot_set_dirty(kvm, slot);
7577 }
7578 
vmx_flush_log_dirty(struct kvm * kvm)7579 static void vmx_flush_log_dirty(struct kvm *kvm)
7580 {
7581 	kvm_flush_pml_buffers(kvm);
7582 }
7583 
vmx_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t offset,unsigned long mask)7584 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
7585 					   struct kvm_memory_slot *memslot,
7586 					   gfn_t offset, unsigned long mask)
7587 {
7588 	kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
7589 }
7590 
vmx_pre_block(struct kvm_vcpu * vcpu)7591 static int vmx_pre_block(struct kvm_vcpu *vcpu)
7592 {
7593 	if (pi_pre_block(vcpu))
7594 		return 1;
7595 
7596 	if (kvm_lapic_hv_timer_in_use(vcpu))
7597 		kvm_lapic_switch_to_sw_timer(vcpu);
7598 
7599 	return 0;
7600 }
7601 
vmx_post_block(struct kvm_vcpu * vcpu)7602 static void vmx_post_block(struct kvm_vcpu *vcpu)
7603 {
7604 	if (kvm_x86_ops.set_hv_timer)
7605 		kvm_lapic_switch_to_hv_timer(vcpu);
7606 
7607 	pi_post_block(vcpu);
7608 }
7609 
vmx_setup_mce(struct kvm_vcpu * vcpu)7610 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
7611 {
7612 	if (vcpu->arch.mcg_cap & MCG_LMCE_P)
7613 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
7614 			FEAT_CTL_LMCE_ENABLED;
7615 	else
7616 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
7617 			~FEAT_CTL_LMCE_ENABLED;
7618 }
7619 
vmx_smi_allowed(struct kvm_vcpu * vcpu,bool for_injection)7620 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
7621 {
7622 	/* we need a nested vmexit to enter SMM, postpone if run is pending */
7623 	if (to_vmx(vcpu)->nested.nested_run_pending)
7624 		return -EBUSY;
7625 	return !is_smm(vcpu);
7626 }
7627 
vmx_pre_enter_smm(struct kvm_vcpu * vcpu,char * smstate)7628 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
7629 {
7630 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7631 
7632 	vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
7633 	if (vmx->nested.smm.guest_mode)
7634 		nested_vmx_vmexit(vcpu, -1, 0, 0);
7635 
7636 	vmx->nested.smm.vmxon = vmx->nested.vmxon;
7637 	vmx->nested.vmxon = false;
7638 	vmx_clear_hlt(vcpu);
7639 	return 0;
7640 }
7641 
vmx_pre_leave_smm(struct kvm_vcpu * vcpu,const char * smstate)7642 static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
7643 {
7644 	struct vcpu_vmx *vmx = to_vmx(vcpu);
7645 	int ret;
7646 
7647 	if (vmx->nested.smm.vmxon) {
7648 		vmx->nested.vmxon = true;
7649 		vmx->nested.smm.vmxon = false;
7650 	}
7651 
7652 	if (vmx->nested.smm.guest_mode) {
7653 		ret = nested_vmx_enter_non_root_mode(vcpu, false);
7654 		if (ret)
7655 			return ret;
7656 
7657 		vmx->nested.smm.guest_mode = false;
7658 	}
7659 	return 0;
7660 }
7661 
enable_smi_window(struct kvm_vcpu * vcpu)7662 static void enable_smi_window(struct kvm_vcpu *vcpu)
7663 {
7664 	/* RSM will cause a vmexit anyway.  */
7665 }
7666 
vmx_apic_init_signal_blocked(struct kvm_vcpu * vcpu)7667 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
7668 {
7669 	return to_vmx(vcpu)->nested.vmxon;
7670 }
7671 
vmx_migrate_timers(struct kvm_vcpu * vcpu)7672 static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
7673 {
7674 	if (is_guest_mode(vcpu)) {
7675 		struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
7676 
7677 		if (hrtimer_try_to_cancel(timer) == 1)
7678 			hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
7679 	}
7680 }
7681 
hardware_unsetup(void)7682 static void hardware_unsetup(void)
7683 {
7684 	if (nested)
7685 		nested_vmx_hardware_unsetup();
7686 
7687 	free_kvm_area();
7688 }
7689 
vmx_check_apicv_inhibit_reasons(ulong bit)7690 static bool vmx_check_apicv_inhibit_reasons(ulong bit)
7691 {
7692 	ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
7693 			  BIT(APICV_INHIBIT_REASON_HYPERV);
7694 
7695 	return supported & BIT(bit);
7696 }
7697 
7698 static struct kvm_x86_ops vmx_x86_ops __initdata = {
7699 	.hardware_unsetup = hardware_unsetup,
7700 
7701 	.hardware_enable = hardware_enable,
7702 	.hardware_disable = hardware_disable,
7703 	.cpu_has_accelerated_tpr = report_flexpriority,
7704 	.has_emulated_msr = vmx_has_emulated_msr,
7705 
7706 	.vm_size = sizeof(struct kvm_vmx),
7707 	.vm_init = vmx_vm_init,
7708 
7709 	.vcpu_create = vmx_create_vcpu,
7710 	.vcpu_free = vmx_free_vcpu,
7711 	.vcpu_reset = vmx_vcpu_reset,
7712 
7713 	.prepare_guest_switch = vmx_prepare_switch_to_guest,
7714 	.vcpu_load = vmx_vcpu_load,
7715 	.vcpu_put = vmx_vcpu_put,
7716 
7717 	.update_exception_bitmap = update_exception_bitmap,
7718 	.get_msr_feature = vmx_get_msr_feature,
7719 	.get_msr = vmx_get_msr,
7720 	.set_msr = vmx_set_msr,
7721 	.get_segment_base = vmx_get_segment_base,
7722 	.get_segment = vmx_get_segment,
7723 	.set_segment = vmx_set_segment,
7724 	.get_cpl = vmx_get_cpl,
7725 	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
7726 	.set_cr0 = vmx_set_cr0,
7727 	.set_cr4 = vmx_set_cr4,
7728 	.set_efer = vmx_set_efer,
7729 	.get_idt = vmx_get_idt,
7730 	.set_idt = vmx_set_idt,
7731 	.get_gdt = vmx_get_gdt,
7732 	.set_gdt = vmx_set_gdt,
7733 	.set_dr7 = vmx_set_dr7,
7734 	.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
7735 	.cache_reg = vmx_cache_reg,
7736 	.get_rflags = vmx_get_rflags,
7737 	.set_rflags = vmx_set_rflags,
7738 
7739 	.tlb_flush_all = vmx_flush_tlb_all,
7740 	.tlb_flush_current = vmx_flush_tlb_current,
7741 	.tlb_flush_gva = vmx_flush_tlb_gva,
7742 	.tlb_flush_guest = vmx_flush_tlb_guest,
7743 
7744 	.run = vmx_vcpu_run,
7745 	.handle_exit = vmx_handle_exit,
7746 	.skip_emulated_instruction = vmx_skip_emulated_instruction,
7747 	.update_emulated_instruction = vmx_update_emulated_instruction,
7748 	.set_interrupt_shadow = vmx_set_interrupt_shadow,
7749 	.get_interrupt_shadow = vmx_get_interrupt_shadow,
7750 	.patch_hypercall = vmx_patch_hypercall,
7751 	.set_irq = vmx_inject_irq,
7752 	.set_nmi = vmx_inject_nmi,
7753 	.queue_exception = vmx_queue_exception,
7754 	.cancel_injection = vmx_cancel_injection,
7755 	.interrupt_allowed = vmx_interrupt_allowed,
7756 	.nmi_allowed = vmx_nmi_allowed,
7757 	.get_nmi_mask = vmx_get_nmi_mask,
7758 	.set_nmi_mask = vmx_set_nmi_mask,
7759 	.enable_nmi_window = enable_nmi_window,
7760 	.enable_irq_window = enable_irq_window,
7761 	.update_cr8_intercept = update_cr8_intercept,
7762 	.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
7763 	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
7764 	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
7765 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
7766 	.apicv_post_state_restore = vmx_apicv_post_state_restore,
7767 	.check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
7768 	.hwapic_irr_update = vmx_hwapic_irr_update,
7769 	.hwapic_isr_update = vmx_hwapic_isr_update,
7770 	.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
7771 	.sync_pir_to_irr = vmx_sync_pir_to_irr,
7772 	.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
7773 	.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
7774 
7775 	.set_tss_addr = vmx_set_tss_addr,
7776 	.set_identity_map_addr = vmx_set_identity_map_addr,
7777 	.get_mt_mask = vmx_get_mt_mask,
7778 
7779 	.get_exit_info = vmx_get_exit_info,
7780 
7781 	.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
7782 
7783 	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
7784 
7785 	.write_l1_tsc_offset = vmx_write_l1_tsc_offset,
7786 
7787 	.load_mmu_pgd = vmx_load_mmu_pgd,
7788 
7789 	.check_intercept = vmx_check_intercept,
7790 	.handle_exit_irqoff = vmx_handle_exit_irqoff,
7791 
7792 	.request_immediate_exit = vmx_request_immediate_exit,
7793 
7794 	.sched_in = vmx_sched_in,
7795 
7796 	.slot_enable_log_dirty = vmx_slot_enable_log_dirty,
7797 	.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
7798 	.flush_log_dirty = vmx_flush_log_dirty,
7799 	.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
7800 
7801 	.pre_block = vmx_pre_block,
7802 	.post_block = vmx_post_block,
7803 
7804 	.pmu_ops = &intel_pmu_ops,
7805 	.nested_ops = &vmx_nested_ops,
7806 
7807 	.update_pi_irte = pi_update_irte,
7808 
7809 #ifdef CONFIG_X86_64
7810 	.set_hv_timer = vmx_set_hv_timer,
7811 	.cancel_hv_timer = vmx_cancel_hv_timer,
7812 #endif
7813 
7814 	.setup_mce = vmx_setup_mce,
7815 
7816 	.smi_allowed = vmx_smi_allowed,
7817 	.pre_enter_smm = vmx_pre_enter_smm,
7818 	.pre_leave_smm = vmx_pre_leave_smm,
7819 	.enable_smi_window = enable_smi_window,
7820 
7821 	.can_emulate_instruction = vmx_can_emulate_instruction,
7822 	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
7823 	.migrate_timers = vmx_migrate_timers,
7824 
7825 	.msr_filter_changed = vmx_msr_filter_changed,
7826 };
7827 
hardware_setup(void)7828 static __init int hardware_setup(void)
7829 {
7830 	unsigned long host_bndcfgs;
7831 	struct desc_ptr dt;
7832 	int r, i, ept_lpage_level;
7833 
7834 	store_idt(&dt);
7835 	host_idt_base = dt.address;
7836 
7837 	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
7838 		kvm_define_user_return_msr(i, vmx_uret_msrs_list[i]);
7839 
7840 	if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
7841 		return -EIO;
7842 
7843 	if (boot_cpu_has(X86_FEATURE_NX))
7844 		kvm_enable_efer_bits(EFER_NX);
7845 
7846 	if (boot_cpu_has(X86_FEATURE_MPX)) {
7847 		rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
7848 		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
7849 	}
7850 
7851 	if (!cpu_has_vmx_mpx())
7852 		supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
7853 				    XFEATURE_MASK_BNDCSR);
7854 
7855 	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
7856 	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
7857 		enable_vpid = 0;
7858 
7859 	if (!cpu_has_vmx_ept() ||
7860 	    !cpu_has_vmx_ept_4levels() ||
7861 	    !cpu_has_vmx_ept_mt_wb() ||
7862 	    !cpu_has_vmx_invept_global())
7863 		enable_ept = 0;
7864 
7865 	if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
7866 		enable_ept_ad_bits = 0;
7867 
7868 	if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
7869 		enable_unrestricted_guest = 0;
7870 
7871 	if (!cpu_has_vmx_flexpriority())
7872 		flexpriority_enabled = 0;
7873 
7874 	if (!cpu_has_virtual_nmis())
7875 		enable_vnmi = 0;
7876 
7877 	/*
7878 	 * set_apic_access_page_addr() is used to reload apic access
7879 	 * page upon invalidation.  No need to do anything if not
7880 	 * using the APIC_ACCESS_ADDR VMCS field.
7881 	 */
7882 	if (!flexpriority_enabled)
7883 		vmx_x86_ops.set_apic_access_page_addr = NULL;
7884 
7885 	if (!cpu_has_vmx_tpr_shadow())
7886 		vmx_x86_ops.update_cr8_intercept = NULL;
7887 
7888 #if IS_ENABLED(CONFIG_HYPERV)
7889 	if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
7890 	    && enable_ept) {
7891 		vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
7892 		vmx_x86_ops.tlb_remote_flush_with_range =
7893 				hv_remote_flush_tlb_with_range;
7894 	}
7895 #endif
7896 
7897 	if (!cpu_has_vmx_ple()) {
7898 		ple_gap = 0;
7899 		ple_window = 0;
7900 		ple_window_grow = 0;
7901 		ple_window_max = 0;
7902 		ple_window_shrink = 0;
7903 	}
7904 
7905 	if (!cpu_has_vmx_apicv()) {
7906 		enable_apicv = 0;
7907 		vmx_x86_ops.sync_pir_to_irr = NULL;
7908 	}
7909 
7910 	if (cpu_has_vmx_tsc_scaling()) {
7911 		kvm_has_tsc_control = true;
7912 		kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
7913 		kvm_tsc_scaling_ratio_frac_bits = 48;
7914 	}
7915 
7916 	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
7917 
7918 	if (enable_ept)
7919 		vmx_enable_tdp();
7920 
7921 	if (!enable_ept)
7922 		ept_lpage_level = 0;
7923 	else if (cpu_has_vmx_ept_1g_page())
7924 		ept_lpage_level = PG_LEVEL_1G;
7925 	else if (cpu_has_vmx_ept_2m_page())
7926 		ept_lpage_level = PG_LEVEL_2M;
7927 	else
7928 		ept_lpage_level = PG_LEVEL_4K;
7929 	kvm_configure_mmu(enable_ept, vmx_get_max_tdp_level(), ept_lpage_level);
7930 
7931 	/*
7932 	 * Only enable PML when hardware supports PML feature, and both EPT
7933 	 * and EPT A/D bit features are enabled -- PML depends on them to work.
7934 	 */
7935 	if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
7936 		enable_pml = 0;
7937 
7938 	if (!enable_pml) {
7939 		vmx_x86_ops.slot_enable_log_dirty = NULL;
7940 		vmx_x86_ops.slot_disable_log_dirty = NULL;
7941 		vmx_x86_ops.flush_log_dirty = NULL;
7942 		vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
7943 	}
7944 
7945 	if (!cpu_has_vmx_preemption_timer())
7946 		enable_preemption_timer = false;
7947 
7948 	if (enable_preemption_timer) {
7949 		u64 use_timer_freq = 5000ULL * 1000 * 1000;
7950 		u64 vmx_msr;
7951 
7952 		rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
7953 		cpu_preemption_timer_multi =
7954 			vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
7955 
7956 		if (tsc_khz)
7957 			use_timer_freq = (u64)tsc_khz * 1000;
7958 		use_timer_freq >>= cpu_preemption_timer_multi;
7959 
7960 		/*
7961 		 * KVM "disables" the preemption timer by setting it to its max
7962 		 * value.  Don't use the timer if it might cause spurious exits
7963 		 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
7964 		 */
7965 		if (use_timer_freq > 0xffffffffu / 10)
7966 			enable_preemption_timer = false;
7967 	}
7968 
7969 	if (!enable_preemption_timer) {
7970 		vmx_x86_ops.set_hv_timer = NULL;
7971 		vmx_x86_ops.cancel_hv_timer = NULL;
7972 		vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
7973 	}
7974 
7975 	kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
7976 
7977 	kvm_mce_cap_supported |= MCG_LMCE_P;
7978 
7979 	if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
7980 		return -EINVAL;
7981 	if (!enable_ept || !cpu_has_vmx_intel_pt())
7982 		pt_mode = PT_MODE_SYSTEM;
7983 
7984 	if (nested) {
7985 		nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
7986 					   vmx_capability.ept);
7987 
7988 		r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
7989 		if (r)
7990 			return r;
7991 	}
7992 
7993 	vmx_set_cpu_caps();
7994 
7995 	r = alloc_kvm_area();
7996 	if (r)
7997 		nested_vmx_hardware_unsetup();
7998 	return r;
7999 }
8000 
8001 static struct kvm_x86_init_ops vmx_init_ops __initdata = {
8002 	.cpu_has_kvm_support = cpu_has_kvm_support,
8003 	.disabled_by_bios = vmx_disabled_by_bios,
8004 	.check_processor_compatibility = vmx_check_processor_compat,
8005 	.hardware_setup = hardware_setup,
8006 
8007 	.runtime_ops = &vmx_x86_ops,
8008 };
8009 
vmx_cleanup_l1d_flush(void)8010 static void vmx_cleanup_l1d_flush(void)
8011 {
8012 	if (vmx_l1d_flush_pages) {
8013 		free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
8014 		vmx_l1d_flush_pages = NULL;
8015 	}
8016 	/* Restore state so sysfs ignores VMX */
8017 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
8018 }
8019 
vmx_exit(void)8020 static void vmx_exit(void)
8021 {
8022 #ifdef CONFIG_KEXEC_CORE
8023 	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
8024 	synchronize_rcu();
8025 #endif
8026 
8027 	kvm_exit();
8028 
8029 #if IS_ENABLED(CONFIG_HYPERV)
8030 	if (static_branch_unlikely(&enable_evmcs)) {
8031 		int cpu;
8032 		struct hv_vp_assist_page *vp_ap;
8033 		/*
8034 		 * Reset everything to support using non-enlightened VMCS
8035 		 * access later (e.g. when we reload the module with
8036 		 * enlightened_vmcs=0)
8037 		 */
8038 		for_each_online_cpu(cpu) {
8039 			vp_ap =	hv_get_vp_assist_page(cpu);
8040 
8041 			if (!vp_ap)
8042 				continue;
8043 
8044 			vp_ap->nested_control.features.directhypercall = 0;
8045 			vp_ap->current_nested_vmcs = 0;
8046 			vp_ap->enlighten_vmentry = 0;
8047 		}
8048 
8049 		static_branch_disable(&enable_evmcs);
8050 	}
8051 #endif
8052 	vmx_cleanup_l1d_flush();
8053 }
8054 module_exit(vmx_exit);
8055 
vmx_init(void)8056 static int __init vmx_init(void)
8057 {
8058 	int r, cpu;
8059 
8060 #if IS_ENABLED(CONFIG_HYPERV)
8061 	/*
8062 	 * Enlightened VMCS usage should be recommended and the host needs
8063 	 * to support eVMCS v1 or above. We can also disable eVMCS support
8064 	 * with module parameter.
8065 	 */
8066 	if (enlightened_vmcs &&
8067 	    ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
8068 	    (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
8069 	    KVM_EVMCS_VERSION) {
8070 		int cpu;
8071 
8072 		/* Check that we have assist pages on all online CPUs */
8073 		for_each_online_cpu(cpu) {
8074 			if (!hv_get_vp_assist_page(cpu)) {
8075 				enlightened_vmcs = false;
8076 				break;
8077 			}
8078 		}
8079 
8080 		if (enlightened_vmcs) {
8081 			pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
8082 			static_branch_enable(&enable_evmcs);
8083 		}
8084 
8085 		if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
8086 			vmx_x86_ops.enable_direct_tlbflush
8087 				= hv_enable_direct_tlbflush;
8088 
8089 	} else {
8090 		enlightened_vmcs = false;
8091 	}
8092 #endif
8093 
8094 	r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
8095 		     __alignof__(struct vcpu_vmx), THIS_MODULE);
8096 	if (r)
8097 		return r;
8098 
8099 	/*
8100 	 * Must be called after kvm_init() so enable_ept is properly set
8101 	 * up. Hand the parameter mitigation value in which was stored in
8102 	 * the pre module init parser. If no parameter was given, it will
8103 	 * contain 'auto' which will be turned into the default 'cond'
8104 	 * mitigation mode.
8105 	 */
8106 	r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
8107 	if (r) {
8108 		vmx_exit();
8109 		return r;
8110 	}
8111 
8112 	vmx_setup_fb_clear_ctrl();
8113 
8114 	for_each_possible_cpu(cpu) {
8115 		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8116 
8117 		pi_init_cpu(cpu);
8118 	}
8119 
8120 #ifdef CONFIG_KEXEC_CORE
8121 	rcu_assign_pointer(crash_vmclear_loaded_vmcss,
8122 			   crash_vmclear_local_loaded_vmcss);
8123 #endif
8124 	vmx_check_vmcs12_offsets();
8125 
8126 	/*
8127 	 * Shadow paging doesn't have a (further) performance penalty
8128 	 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8129 	 * by default
8130 	 */
8131 	if (!enable_ept)
8132 		allow_smaller_maxphyaddr = true;
8133 
8134 	return 0;
8135 }
8136 module_init(vmx_init);
8137