• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * derived from drivers/kvm/kvm_main.c
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright (C) 2008 Qumranet, Inc.
9  * Copyright IBM Corporation, 2008
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  *
12  * Authors:
13  *   Avi Kivity   <avi@qumranet.com>
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Amit Shah    <amit.shah@qumranet.com>
16  *   Ben-Ami Yassour <benami@il.ibm.com>
17  */
18 
19 #include <linux/kvm_host.h>
20 #include "irq.h"
21 #include "ioapic.h"
22 #include "mmu.h"
23 #include "i8254.h"
24 #include "tss.h"
25 #include "kvm_cache_regs.h"
26 #include "kvm_emulate.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "pmu.h"
30 #include "hyperv.h"
31 #include "lapic.h"
32 
33 #include <linux/clocksource.h>
34 #include <linux/interrupt.h>
35 #include <linux/kvm.h>
36 #include <linux/fs.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <linux/moduleparam.h>
40 #include <linux/mman.h>
41 #include <linux/highmem.h>
42 #include <linux/iommu.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/cpufreq.h>
45 #include <linux/user-return-notifier.h>
46 #include <linux/srcu.h>
47 #include <linux/slab.h>
48 #include <linux/perf_event.h>
49 #include <linux/uaccess.h>
50 #include <linux/hash.h>
51 #include <linux/pci.h>
52 #include <linux/timekeeper_internal.h>
53 #include <linux/pvclock_gtod.h>
54 #include <linux/kvm_irqfd.h>
55 #include <linux/irqbypass.h>
56 #include <linux/sched/stat.h>
57 #include <linux/sched/isolation.h>
58 #include <linux/mem_encrypt.h>
59 #include <linux/entry-kvm.h>
60 
61 #include <trace/events/kvm.h>
62 
63 #include <asm/debugreg.h>
64 #include <asm/msr.h>
65 #include <asm/desc.h>
66 #include <asm/mce.h>
67 #include <linux/kernel_stat.h>
68 #include <asm/fpu/internal.h> /* Ugh! */
69 #include <asm/pvclock.h>
70 #include <asm/div64.h>
71 #include <asm/irq_remapping.h>
72 #include <asm/mshyperv.h>
73 #include <asm/hypervisor.h>
74 #include <asm/tlbflush.h>
75 #include <asm/intel_pt.h>
76 #include <asm/emulate_prefix.h>
77 #include <clocksource/hyperv_timer.h>
78 
79 #define CREATE_TRACE_POINTS
80 #include "trace.h"
81 
82 #define MAX_IO_MSRS 256
83 #define KVM_MAX_MCE_BANKS 32
84 u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
85 EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
86 
87 #define emul_to_vcpu(ctxt) \
88 	((struct kvm_vcpu *)(ctxt)->vcpu)
89 
90 /* EFER defaults:
91  * - enable syscall per default because its emulated by KVM
92  * - enable LME and LMA per default on 64 bit KVM
93  */
94 #ifdef CONFIG_X86_64
95 static
96 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
97 #else
98 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
99 #endif
100 
101 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
102 
103 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
104                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
105 
106 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
107 static void process_nmi(struct kvm_vcpu *vcpu);
108 static void process_smi(struct kvm_vcpu *vcpu);
109 static void enter_smm(struct kvm_vcpu *vcpu);
110 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
111 static void store_regs(struct kvm_vcpu *vcpu);
112 static int sync_regs(struct kvm_vcpu *vcpu);
113 
114 struct kvm_x86_ops kvm_x86_ops __read_mostly;
115 EXPORT_SYMBOL_GPL(kvm_x86_ops);
116 
117 static bool __read_mostly ignore_msrs = 0;
118 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
119 
120 static bool __read_mostly report_ignored_msrs = true;
121 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
122 
123 unsigned int min_timer_period_us = 200;
124 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
125 
126 static bool __read_mostly kvmclock_periodic_sync = true;
127 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
128 
129 bool __read_mostly kvm_has_tsc_control;
130 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
131 u32  __read_mostly kvm_max_guest_tsc_khz;
132 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
133 u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
134 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
135 u64  __read_mostly kvm_max_tsc_scaling_ratio;
136 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
137 u64 __read_mostly kvm_default_tsc_scaling_ratio;
138 EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
139 
140 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
141 static u32 __read_mostly tsc_tolerance_ppm = 250;
142 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
143 
144 /*
145  * lapic timer advance (tscdeadline mode only) in nanoseconds.  '-1' enables
146  * adaptive tuning starting from default advancment of 1000ns.  '0' disables
147  * advancement entirely.  Any other value is used as-is and disables adaptive
148  * tuning, i.e. allows priveleged userspace to set an exact advancement time.
149  */
150 static int __read_mostly lapic_timer_advance_ns = -1;
151 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
152 
153 static bool __read_mostly vector_hashing = true;
154 module_param(vector_hashing, bool, S_IRUGO);
155 
156 bool __read_mostly enable_vmware_backdoor = false;
157 module_param(enable_vmware_backdoor, bool, S_IRUGO);
158 EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
159 
160 static bool __read_mostly force_emulation_prefix = false;
161 module_param(force_emulation_prefix, bool, S_IRUGO);
162 
163 int __read_mostly pi_inject_timer = -1;
164 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
165 
166 /*
167  * Restoring the host value for MSRs that are only consumed when running in
168  * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
169  * returns to userspace, i.e. the kernel can run with the guest's value.
170  */
171 #define KVM_MAX_NR_USER_RETURN_MSRS 16
172 
173 struct kvm_user_return_msrs_global {
174 	int nr;
175 	u32 msrs[KVM_MAX_NR_USER_RETURN_MSRS];
176 };
177 
178 struct kvm_user_return_msrs {
179 	struct user_return_notifier urn;
180 	bool registered;
181 	struct kvm_user_return_msr_values {
182 		u64 host;
183 		u64 curr;
184 	} values[KVM_MAX_NR_USER_RETURN_MSRS];
185 };
186 
187 static struct kvm_user_return_msrs_global __read_mostly user_return_msrs_global;
188 static struct kvm_user_return_msrs __percpu *user_return_msrs;
189 
190 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
191 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
192 				| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
193 				| XFEATURE_MASK_PKRU)
194 
195 u64 __read_mostly host_efer;
196 EXPORT_SYMBOL_GPL(host_efer);
197 
198 bool __read_mostly allow_smaller_maxphyaddr = 0;
199 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
200 
201 static u64 __read_mostly host_xss;
202 u64 __read_mostly supported_xss;
203 EXPORT_SYMBOL_GPL(supported_xss);
204 
205 struct kvm_stats_debugfs_item debugfs_entries[] = {
206 	VCPU_STAT("pf_fixed", pf_fixed),
207 	VCPU_STAT("pf_guest", pf_guest),
208 	VCPU_STAT("tlb_flush", tlb_flush),
209 	VCPU_STAT("invlpg", invlpg),
210 	VCPU_STAT("exits", exits),
211 	VCPU_STAT("io_exits", io_exits),
212 	VCPU_STAT("mmio_exits", mmio_exits),
213 	VCPU_STAT("signal_exits", signal_exits),
214 	VCPU_STAT("irq_window", irq_window_exits),
215 	VCPU_STAT("nmi_window", nmi_window_exits),
216 	VCPU_STAT("halt_exits", halt_exits),
217 	VCPU_STAT("halt_successful_poll", halt_successful_poll),
218 	VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
219 	VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
220 	VCPU_STAT("halt_wakeup", halt_wakeup),
221 	VCPU_STAT("hypercalls", hypercalls),
222 	VCPU_STAT("request_irq", request_irq_exits),
223 	VCPU_STAT("irq_exits", irq_exits),
224 	VCPU_STAT("host_state_reload", host_state_reload),
225 	VCPU_STAT("fpu_reload", fpu_reload),
226 	VCPU_STAT("insn_emulation", insn_emulation),
227 	VCPU_STAT("insn_emulation_fail", insn_emulation_fail),
228 	VCPU_STAT("irq_injections", irq_injections),
229 	VCPU_STAT("nmi_injections", nmi_injections),
230 	VCPU_STAT("req_event", req_event),
231 	VCPU_STAT("l1d_flush", l1d_flush),
232 	VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
233 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
234 	VCPU_STAT("preemption_reported", preemption_reported),
235 	VCPU_STAT("preemption_other", preemption_other),
236 	VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
237 	VM_STAT("mmu_pte_write", mmu_pte_write),
238 	VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
239 	VM_STAT("mmu_flooded", mmu_flooded),
240 	VM_STAT("mmu_recycled", mmu_recycled),
241 	VM_STAT("mmu_cache_miss", mmu_cache_miss),
242 	VM_STAT("mmu_unsync", mmu_unsync),
243 	VM_STAT("remote_tlb_flush", remote_tlb_flush),
244 	VM_STAT("largepages", lpages, .mode = 0444),
245 	VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
246 	VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
247 	{ NULL }
248 };
249 
250 u64 __read_mostly host_xcr0;
251 u64 __read_mostly supported_xcr0;
252 EXPORT_SYMBOL_GPL(supported_xcr0);
253 
254 static struct kmem_cache *x86_fpu_cache;
255 
256 static struct kmem_cache *x86_emulator_cache;
257 
258 /*
259  * When called, it means the previous get/set msr reached an invalid msr.
260  * Return true if we want to ignore/silent this failed msr access.
261  */
kvm_msr_ignored_check(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool write)262 static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
263 				  u64 data, bool write)
264 {
265 	const char *op = write ? "wrmsr" : "rdmsr";
266 
267 	if (ignore_msrs) {
268 		if (report_ignored_msrs)
269 			kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
270 				      op, msr, data);
271 		/* Mask the error */
272 		return true;
273 	} else {
274 		kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
275 				      op, msr, data);
276 		return false;
277 	}
278 }
279 
kvm_alloc_emulator_cache(void)280 static struct kmem_cache *kvm_alloc_emulator_cache(void)
281 {
282 	unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
283 	unsigned int size = sizeof(struct x86_emulate_ctxt);
284 
285 	return kmem_cache_create_usercopy("x86_emulator", size,
286 					  __alignof__(struct x86_emulate_ctxt),
287 					  SLAB_ACCOUNT, useroffset,
288 					  size - useroffset, NULL);
289 }
290 
291 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
292 
kvm_async_pf_hash_reset(struct kvm_vcpu * vcpu)293 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
294 {
295 	int i;
296 	for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
297 		vcpu->arch.apf.gfns[i] = ~0;
298 }
299 
kvm_on_user_return(struct user_return_notifier * urn)300 static void kvm_on_user_return(struct user_return_notifier *urn)
301 {
302 	unsigned slot;
303 	struct kvm_user_return_msrs *msrs
304 		= container_of(urn, struct kvm_user_return_msrs, urn);
305 	struct kvm_user_return_msr_values *values;
306 	unsigned long flags;
307 
308 	/*
309 	 * Disabling irqs at this point since the following code could be
310 	 * interrupted and executed through kvm_arch_hardware_disable()
311 	 */
312 	local_irq_save(flags);
313 	if (msrs->registered) {
314 		msrs->registered = false;
315 		user_return_notifier_unregister(urn);
316 	}
317 	local_irq_restore(flags);
318 	for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
319 		values = &msrs->values[slot];
320 		if (values->host != values->curr) {
321 			wrmsrl(user_return_msrs_global.msrs[slot], values->host);
322 			values->curr = values->host;
323 		}
324 	}
325 }
326 
kvm_probe_user_return_msr(u32 msr)327 int kvm_probe_user_return_msr(u32 msr)
328 {
329 	u64 val;
330 	int ret;
331 
332 	preempt_disable();
333 	ret = rdmsrl_safe(msr, &val);
334 	if (ret)
335 		goto out;
336 	ret = wrmsrl_safe(msr, val);
337 out:
338 	preempt_enable();
339 	return ret;
340 }
341 EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
342 
kvm_define_user_return_msr(unsigned slot,u32 msr)343 void kvm_define_user_return_msr(unsigned slot, u32 msr)
344 {
345 	BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
346 	user_return_msrs_global.msrs[slot] = msr;
347 	if (slot >= user_return_msrs_global.nr)
348 		user_return_msrs_global.nr = slot + 1;
349 }
350 EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
351 
kvm_user_return_msr_cpu_online(void)352 static void kvm_user_return_msr_cpu_online(void)
353 {
354 	unsigned int cpu = smp_processor_id();
355 	struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
356 	u64 value;
357 	int i;
358 
359 	for (i = 0; i < user_return_msrs_global.nr; ++i) {
360 		rdmsrl_safe(user_return_msrs_global.msrs[i], &value);
361 		msrs->values[i].host = value;
362 		msrs->values[i].curr = value;
363 	}
364 }
365 
kvm_set_user_return_msr(unsigned slot,u64 value,u64 mask)366 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
367 {
368 	unsigned int cpu = smp_processor_id();
369 	struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
370 	int err;
371 
372 	value = (value & mask) | (msrs->values[slot].host & ~mask);
373 	if (value == msrs->values[slot].curr)
374 		return 0;
375 	err = wrmsrl_safe(user_return_msrs_global.msrs[slot], value);
376 	if (err)
377 		return 1;
378 
379 	msrs->values[slot].curr = value;
380 	if (!msrs->registered) {
381 		msrs->urn.on_user_return = kvm_on_user_return;
382 		user_return_notifier_register(&msrs->urn);
383 		msrs->registered = true;
384 	}
385 	return 0;
386 }
387 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
388 
drop_user_return_notifiers(void)389 static void drop_user_return_notifiers(void)
390 {
391 	unsigned int cpu = smp_processor_id();
392 	struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
393 
394 	if (msrs->registered)
395 		kvm_on_user_return(&msrs->urn);
396 }
397 
kvm_get_apic_base(struct kvm_vcpu * vcpu)398 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
399 {
400 	return vcpu->arch.apic_base;
401 }
402 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
403 
kvm_get_apic_mode(struct kvm_vcpu * vcpu)404 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
405 {
406 	return kvm_apic_mode(kvm_get_apic_base(vcpu));
407 }
408 EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
409 
kvm_set_apic_base(struct kvm_vcpu * vcpu,struct msr_data * msr_info)410 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
411 {
412 	enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
413 	enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
414 	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
415 		(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
416 
417 	if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
418 		return 1;
419 	if (!msr_info->host_initiated) {
420 		if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
421 			return 1;
422 		if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
423 			return 1;
424 	}
425 
426 	kvm_lapic_set_base(vcpu, msr_info->data);
427 	kvm_recalculate_apic_map(vcpu->kvm);
428 	return 0;
429 }
430 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
431 
kvm_spurious_fault(void)432 asmlinkage __visible noinstr void kvm_spurious_fault(void)
433 {
434 	/* Fault while not rebooting.  We want the trace. */
435 	BUG_ON(!kvm_rebooting);
436 }
437 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
438 
439 #define EXCPT_BENIGN		0
440 #define EXCPT_CONTRIBUTORY	1
441 #define EXCPT_PF		2
442 
exception_class(int vector)443 static int exception_class(int vector)
444 {
445 	switch (vector) {
446 	case PF_VECTOR:
447 		return EXCPT_PF;
448 	case DE_VECTOR:
449 	case TS_VECTOR:
450 	case NP_VECTOR:
451 	case SS_VECTOR:
452 	case GP_VECTOR:
453 		return EXCPT_CONTRIBUTORY;
454 	default:
455 		break;
456 	}
457 	return EXCPT_BENIGN;
458 }
459 
460 #define EXCPT_FAULT		0
461 #define EXCPT_TRAP		1
462 #define EXCPT_ABORT		2
463 #define EXCPT_INTERRUPT		3
464 
exception_type(int vector)465 static int exception_type(int vector)
466 {
467 	unsigned int mask;
468 
469 	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
470 		return EXCPT_INTERRUPT;
471 
472 	mask = 1 << vector;
473 
474 	/* #DB is trap, as instruction watchpoints are handled elsewhere */
475 	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
476 		return EXCPT_TRAP;
477 
478 	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
479 		return EXCPT_ABORT;
480 
481 	/* Reserved exceptions will result in fault */
482 	return EXCPT_FAULT;
483 }
484 
kvm_deliver_exception_payload(struct kvm_vcpu * vcpu)485 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
486 {
487 	unsigned nr = vcpu->arch.exception.nr;
488 	bool has_payload = vcpu->arch.exception.has_payload;
489 	unsigned long payload = vcpu->arch.exception.payload;
490 
491 	if (!has_payload)
492 		return;
493 
494 	switch (nr) {
495 	case DB_VECTOR:
496 		/*
497 		 * "Certain debug exceptions may clear bit 0-3.  The
498 		 * remaining contents of the DR6 register are never
499 		 * cleared by the processor".
500 		 */
501 		vcpu->arch.dr6 &= ~DR_TRAP_BITS;
502 		/*
503 		 * DR6.RTM is set by all #DB exceptions that don't clear it.
504 		 */
505 		vcpu->arch.dr6 |= DR6_RTM;
506 		vcpu->arch.dr6 |= payload;
507 		/*
508 		 * Bit 16 should be set in the payload whenever the #DB
509 		 * exception should clear DR6.RTM. This makes the payload
510 		 * compatible with the pending debug exceptions under VMX.
511 		 * Though not currently documented in the SDM, this also
512 		 * makes the payload compatible with the exit qualification
513 		 * for #DB exceptions under VMX.
514 		 */
515 		vcpu->arch.dr6 ^= payload & DR6_RTM;
516 
517 		/*
518 		 * The #DB payload is defined as compatible with the 'pending
519 		 * debug exceptions' field under VMX, not DR6. While bit 12 is
520 		 * defined in the 'pending debug exceptions' field (enabled
521 		 * breakpoint), it is reserved and must be zero in DR6.
522 		 */
523 		vcpu->arch.dr6 &= ~BIT(12);
524 		break;
525 	case PF_VECTOR:
526 		vcpu->arch.cr2 = payload;
527 		break;
528 	}
529 
530 	vcpu->arch.exception.has_payload = false;
531 	vcpu->arch.exception.payload = 0;
532 }
533 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
534 
kvm_multiple_exception(struct kvm_vcpu * vcpu,unsigned nr,bool has_error,u32 error_code,bool has_payload,unsigned long payload,bool reinject)535 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
536 		unsigned nr, bool has_error, u32 error_code,
537 	        bool has_payload, unsigned long payload, bool reinject)
538 {
539 	u32 prev_nr;
540 	int class1, class2;
541 
542 	kvm_make_request(KVM_REQ_EVENT, vcpu);
543 
544 	if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
545 	queue:
546 		if (reinject) {
547 			/*
548 			 * On vmentry, vcpu->arch.exception.pending is only
549 			 * true if an event injection was blocked by
550 			 * nested_run_pending.  In that case, however,
551 			 * vcpu_enter_guest requests an immediate exit,
552 			 * and the guest shouldn't proceed far enough to
553 			 * need reinjection.
554 			 */
555 			WARN_ON_ONCE(vcpu->arch.exception.pending);
556 			vcpu->arch.exception.injected = true;
557 			if (WARN_ON_ONCE(has_payload)) {
558 				/*
559 				 * A reinjected event has already
560 				 * delivered its payload.
561 				 */
562 				has_payload = false;
563 				payload = 0;
564 			}
565 		} else {
566 			vcpu->arch.exception.pending = true;
567 			vcpu->arch.exception.injected = false;
568 		}
569 		vcpu->arch.exception.has_error_code = has_error;
570 		vcpu->arch.exception.nr = nr;
571 		vcpu->arch.exception.error_code = error_code;
572 		vcpu->arch.exception.has_payload = has_payload;
573 		vcpu->arch.exception.payload = payload;
574 		if (!is_guest_mode(vcpu))
575 			kvm_deliver_exception_payload(vcpu);
576 		return;
577 	}
578 
579 	/* to check exception */
580 	prev_nr = vcpu->arch.exception.nr;
581 	if (prev_nr == DF_VECTOR) {
582 		/* triple fault -> shutdown */
583 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
584 		return;
585 	}
586 	class1 = exception_class(prev_nr);
587 	class2 = exception_class(nr);
588 	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
589 		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
590 		/*
591 		 * Generate double fault per SDM Table 5-5.  Set
592 		 * exception.pending = true so that the double fault
593 		 * can trigger a nested vmexit.
594 		 */
595 		vcpu->arch.exception.pending = true;
596 		vcpu->arch.exception.injected = false;
597 		vcpu->arch.exception.has_error_code = true;
598 		vcpu->arch.exception.nr = DF_VECTOR;
599 		vcpu->arch.exception.error_code = 0;
600 		vcpu->arch.exception.has_payload = false;
601 		vcpu->arch.exception.payload = 0;
602 	} else
603 		/* replace previous exception with a new one in a hope
604 		   that instruction re-execution will regenerate lost
605 		   exception */
606 		goto queue;
607 }
608 
kvm_queue_exception(struct kvm_vcpu * vcpu,unsigned nr)609 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
610 {
611 	kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
612 }
613 EXPORT_SYMBOL_GPL(kvm_queue_exception);
614 
kvm_requeue_exception(struct kvm_vcpu * vcpu,unsigned nr)615 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
616 {
617 	kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
618 }
619 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
620 
kvm_queue_exception_p(struct kvm_vcpu * vcpu,unsigned nr,unsigned long payload)621 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
622 			   unsigned long payload)
623 {
624 	kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
625 }
626 EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
627 
kvm_queue_exception_e_p(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code,unsigned long payload)628 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
629 				    u32 error_code, unsigned long payload)
630 {
631 	kvm_multiple_exception(vcpu, nr, true, error_code,
632 			       true, payload, false);
633 }
634 
kvm_complete_insn_gp(struct kvm_vcpu * vcpu,int err)635 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
636 {
637 	if (err)
638 		kvm_inject_gp(vcpu, 0);
639 	else
640 		return kvm_skip_emulated_instruction(vcpu);
641 
642 	return 1;
643 }
644 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
645 
kvm_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)646 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
647 {
648 	++vcpu->stat.pf_guest;
649 	vcpu->arch.exception.nested_apf =
650 		is_guest_mode(vcpu) && fault->async_page_fault;
651 	if (vcpu->arch.exception.nested_apf) {
652 		vcpu->arch.apf.nested_apf_token = fault->address;
653 		kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
654 	} else {
655 		kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
656 					fault->address);
657 	}
658 }
659 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
660 
kvm_inject_emulated_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)661 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
662 				    struct x86_exception *fault)
663 {
664 	struct kvm_mmu *fault_mmu;
665 	WARN_ON_ONCE(fault->vector != PF_VECTOR);
666 
667 	fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
668 					       vcpu->arch.walk_mmu;
669 
670 	/*
671 	 * Invalidate the TLB entry for the faulting address, if it exists,
672 	 * else the access will fault indefinitely (and to emulate hardware).
673 	 */
674 	if ((fault->error_code & PFERR_PRESENT_MASK) &&
675 	    !(fault->error_code & PFERR_RSVD_MASK))
676 		kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
677 				       fault_mmu->root_hpa);
678 
679 	fault_mmu->inject_page_fault(vcpu, fault);
680 	return fault->nested_page_fault;
681 }
682 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
683 
kvm_inject_nmi(struct kvm_vcpu * vcpu)684 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
685 {
686 	atomic_inc(&vcpu->arch.nmi_queued);
687 	kvm_make_request(KVM_REQ_NMI, vcpu);
688 }
689 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
690 
kvm_queue_exception_e(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code)691 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
692 {
693 	kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
694 }
695 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
696 
kvm_requeue_exception_e(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code)697 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
698 {
699 	kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
700 }
701 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
702 
703 /*
704  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
705  * a #GP and return false.
706  */
kvm_require_cpl(struct kvm_vcpu * vcpu,int required_cpl)707 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
708 {
709 	if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl)
710 		return true;
711 	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
712 	return false;
713 }
714 EXPORT_SYMBOL_GPL(kvm_require_cpl);
715 
kvm_require_dr(struct kvm_vcpu * vcpu,int dr)716 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
717 {
718 	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
719 		return true;
720 
721 	kvm_queue_exception(vcpu, UD_VECTOR);
722 	return false;
723 }
724 EXPORT_SYMBOL_GPL(kvm_require_dr);
725 
726 /*
727  * This function will be used to read from the physical memory of the currently
728  * running guest. The difference to kvm_vcpu_read_guest_page is that this function
729  * can read from guest physical or from the guest's guest physical memory.
730  */
kvm_read_guest_page_mmu(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gfn_t ngfn,void * data,int offset,int len,u32 access)731 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
732 			    gfn_t ngfn, void *data, int offset, int len,
733 			    u32 access)
734 {
735 	struct x86_exception exception;
736 	gfn_t real_gfn;
737 	gpa_t ngpa;
738 
739 	ngpa     = gfn_to_gpa(ngfn);
740 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
741 	if (real_gfn == UNMAPPED_GVA)
742 		return -EFAULT;
743 
744 	real_gfn = gpa_to_gfn(real_gfn);
745 
746 	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
747 }
748 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
749 
kvm_read_nested_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len,u32 access)750 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
751 			       void *data, int offset, int len, u32 access)
752 {
753 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
754 				       data, offset, len, access);
755 }
756 
pdptr_rsvd_bits(struct kvm_vcpu * vcpu)757 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
758 {
759 	return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
760 	       rsvd_bits(1, 2);
761 }
762 
763 /*
764  * Load the pae pdptrs.  Return 1 if they are all valid, 0 otherwise.
765  */
load_pdptrs(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,unsigned long cr3)766 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
767 {
768 	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
769 	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
770 	int i;
771 	int ret;
772 	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
773 
774 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
775 				      offset * sizeof(u64), sizeof(pdpte),
776 				      PFERR_USER_MASK|PFERR_WRITE_MASK);
777 	if (ret < 0) {
778 		ret = 0;
779 		goto out;
780 	}
781 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
782 		if ((pdpte[i] & PT_PRESENT_MASK) &&
783 		    (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
784 			ret = 0;
785 			goto out;
786 		}
787 	}
788 	ret = 1;
789 
790 	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
791 	kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
792 
793 out:
794 
795 	return ret;
796 }
797 EXPORT_SYMBOL_GPL(load_pdptrs);
798 
pdptrs_changed(struct kvm_vcpu * vcpu)799 bool pdptrs_changed(struct kvm_vcpu *vcpu)
800 {
801 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
802 	int offset;
803 	gfn_t gfn;
804 	int r;
805 
806 	if (!is_pae_paging(vcpu))
807 		return false;
808 
809 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
810 		return true;
811 
812 	gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
813 	offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
814 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
815 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
816 	if (r < 0)
817 		return true;
818 
819 	return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
820 }
821 EXPORT_SYMBOL_GPL(pdptrs_changed);
822 
kvm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)823 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
824 {
825 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
826 	unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG;
827 	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
828 
829 	cr0 |= X86_CR0_ET;
830 
831 #ifdef CONFIG_X86_64
832 	if (cr0 & 0xffffffff00000000UL)
833 		return 1;
834 #endif
835 
836 	cr0 &= ~CR0_RESERVED_BITS;
837 
838 	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
839 		return 1;
840 
841 	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
842 		return 1;
843 
844 #ifdef CONFIG_X86_64
845 	if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
846 	    (cr0 & X86_CR0_PG)) {
847 		int cs_db, cs_l;
848 
849 		if (!is_pae(vcpu))
850 			return 1;
851 		kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
852 		if (cs_l)
853 			return 1;
854 	}
855 #endif
856 	if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
857 	    is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) &&
858 	    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
859 		return 1;
860 
861 	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
862 		return 1;
863 
864 	kvm_x86_ops.set_cr0(vcpu, cr0);
865 
866 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
867 		kvm_clear_async_pf_completion_queue(vcpu);
868 		kvm_async_pf_hash_reset(vcpu);
869 	}
870 
871 	if ((cr0 ^ old_cr0) & update_bits)
872 		kvm_mmu_reset_context(vcpu);
873 
874 	if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
875 	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
876 	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
877 		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
878 
879 	return 0;
880 }
881 EXPORT_SYMBOL_GPL(kvm_set_cr0);
882 
kvm_lmsw(struct kvm_vcpu * vcpu,unsigned long msw)883 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
884 {
885 	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
886 }
887 EXPORT_SYMBOL_GPL(kvm_lmsw);
888 
kvm_load_guest_xsave_state(struct kvm_vcpu * vcpu)889 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
890 {
891 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
892 
893 		if (vcpu->arch.xcr0 != host_xcr0)
894 			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
895 
896 		if (vcpu->arch.xsaves_enabled &&
897 		    vcpu->arch.ia32_xss != host_xss)
898 			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
899 	}
900 
901 	if (static_cpu_has(X86_FEATURE_PKU) &&
902 	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
903 	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
904 	    vcpu->arch.pkru != vcpu->arch.host_pkru)
905 		__write_pkru(vcpu->arch.pkru);
906 }
907 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
908 
kvm_load_host_xsave_state(struct kvm_vcpu * vcpu)909 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
910 {
911 	if (static_cpu_has(X86_FEATURE_PKU) &&
912 	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
913 	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
914 		vcpu->arch.pkru = rdpkru();
915 		if (vcpu->arch.pkru != vcpu->arch.host_pkru)
916 			__write_pkru(vcpu->arch.host_pkru);
917 	}
918 
919 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
920 
921 		if (vcpu->arch.xcr0 != host_xcr0)
922 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
923 
924 		if (vcpu->arch.xsaves_enabled &&
925 		    vcpu->arch.ia32_xss != host_xss)
926 			wrmsrl(MSR_IA32_XSS, host_xss);
927 	}
928 
929 }
930 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
931 
__kvm_set_xcr(struct kvm_vcpu * vcpu,u32 index,u64 xcr)932 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
933 {
934 	u64 xcr0 = xcr;
935 	u64 old_xcr0 = vcpu->arch.xcr0;
936 	u64 valid_bits;
937 
938 	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
939 	if (index != XCR_XFEATURE_ENABLED_MASK)
940 		return 1;
941 	if (!(xcr0 & XFEATURE_MASK_FP))
942 		return 1;
943 	if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
944 		return 1;
945 
946 	/*
947 	 * Do not allow the guest to set bits that we do not support
948 	 * saving.  However, xcr0 bit 0 is always set, even if the
949 	 * emulated CPU does not support XSAVE (see fx_init).
950 	 */
951 	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
952 	if (xcr0 & ~valid_bits)
953 		return 1;
954 
955 	if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
956 	    (!(xcr0 & XFEATURE_MASK_BNDCSR)))
957 		return 1;
958 
959 	if (xcr0 & XFEATURE_MASK_AVX512) {
960 		if (!(xcr0 & XFEATURE_MASK_YMM))
961 			return 1;
962 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
963 			return 1;
964 	}
965 	vcpu->arch.xcr0 = xcr0;
966 
967 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
968 		kvm_update_cpuid_runtime(vcpu);
969 	return 0;
970 }
971 
kvm_set_xcr(struct kvm_vcpu * vcpu,u32 index,u64 xcr)972 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
973 {
974 	if (kvm_x86_ops.get_cpl(vcpu) != 0 ||
975 	    __kvm_set_xcr(vcpu, index, xcr)) {
976 		kvm_inject_gp(vcpu, 0);
977 		return 1;
978 	}
979 	return 0;
980 }
981 EXPORT_SYMBOL_GPL(kvm_set_xcr);
982 
kvm_valid_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)983 int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
984 {
985 	if (cr4 & cr4_reserved_bits)
986 		return -EINVAL;
987 
988 	if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
989 		return -EINVAL;
990 
991 	return 0;
992 }
993 EXPORT_SYMBOL_GPL(kvm_valid_cr4);
994 
kvm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)995 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
996 {
997 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
998 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
999 				   X86_CR4_SMEP;
1000 	unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
1001 
1002 	if (kvm_valid_cr4(vcpu, cr4))
1003 		return 1;
1004 
1005 	if (is_long_mode(vcpu)) {
1006 		if (!(cr4 & X86_CR4_PAE))
1007 			return 1;
1008 		if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1009 			return 1;
1010 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1011 		   && ((cr4 ^ old_cr4) & pdptr_bits)
1012 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
1013 				   kvm_read_cr3(vcpu)))
1014 		return 1;
1015 
1016 	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1017 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
1018 			return 1;
1019 
1020 		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1021 		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1022 			return 1;
1023 	}
1024 
1025 	if (kvm_x86_ops.set_cr4(vcpu, cr4))
1026 		return 1;
1027 
1028 	if (((cr4 ^ old_cr4) & mmu_role_bits) ||
1029 	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1030 		kvm_mmu_reset_context(vcpu);
1031 
1032 	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1033 		kvm_update_cpuid_runtime(vcpu);
1034 
1035 	return 0;
1036 }
1037 EXPORT_SYMBOL_GPL(kvm_set_cr4);
1038 
kvm_set_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)1039 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1040 {
1041 	bool skip_tlb_flush = false;
1042 #ifdef CONFIG_X86_64
1043 	bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
1044 
1045 	if (pcid_enabled) {
1046 		skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1047 		cr3 &= ~X86_CR3_PCID_NOFLUSH;
1048 	}
1049 #endif
1050 
1051 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
1052 		if (!skip_tlb_flush) {
1053 			kvm_mmu_sync_roots(vcpu);
1054 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1055 		}
1056 		return 0;
1057 	}
1058 
1059 	if (is_long_mode(vcpu) &&
1060 	    (cr3 & vcpu->arch.cr3_lm_rsvd_bits))
1061 		return 1;
1062 	else if (is_pae_paging(vcpu) &&
1063 		 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
1064 		return 1;
1065 
1066 	kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
1067 	vcpu->arch.cr3 = cr3;
1068 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
1069 
1070 	return 0;
1071 }
1072 EXPORT_SYMBOL_GPL(kvm_set_cr3);
1073 
kvm_set_cr8(struct kvm_vcpu * vcpu,unsigned long cr8)1074 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1075 {
1076 	if (cr8 & CR8_RESERVED_BITS)
1077 		return 1;
1078 	if (lapic_in_kernel(vcpu))
1079 		kvm_lapic_set_tpr(vcpu, cr8);
1080 	else
1081 		vcpu->arch.cr8 = cr8;
1082 	return 0;
1083 }
1084 EXPORT_SYMBOL_GPL(kvm_set_cr8);
1085 
kvm_get_cr8(struct kvm_vcpu * vcpu)1086 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1087 {
1088 	if (lapic_in_kernel(vcpu))
1089 		return kvm_lapic_get_cr8(vcpu);
1090 	else
1091 		return vcpu->arch.cr8;
1092 }
1093 EXPORT_SYMBOL_GPL(kvm_get_cr8);
1094 
kvm_update_dr0123(struct kvm_vcpu * vcpu)1095 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1096 {
1097 	int i;
1098 
1099 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1100 		for (i = 0; i < KVM_NR_DB_REGS; i++)
1101 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1102 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
1103 	}
1104 }
1105 
kvm_update_dr7(struct kvm_vcpu * vcpu)1106 void kvm_update_dr7(struct kvm_vcpu *vcpu)
1107 {
1108 	unsigned long dr7;
1109 
1110 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1111 		dr7 = vcpu->arch.guest_debug_dr7;
1112 	else
1113 		dr7 = vcpu->arch.dr7;
1114 	kvm_x86_ops.set_dr7(vcpu, dr7);
1115 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1116 	if (dr7 & DR7_BP_EN_MASK)
1117 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1118 }
1119 EXPORT_SYMBOL_GPL(kvm_update_dr7);
1120 
kvm_dr6_fixed(struct kvm_vcpu * vcpu)1121 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1122 {
1123 	u64 fixed = DR6_FIXED_1;
1124 
1125 	if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
1126 		fixed |= DR6_RTM;
1127 	return fixed;
1128 }
1129 
__kvm_set_dr(struct kvm_vcpu * vcpu,int dr,unsigned long val)1130 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1131 {
1132 	size_t size = ARRAY_SIZE(vcpu->arch.db);
1133 
1134 	switch (dr) {
1135 	case 0 ... 3:
1136 		vcpu->arch.db[array_index_nospec(dr, size)] = val;
1137 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1138 			vcpu->arch.eff_db[dr] = val;
1139 		break;
1140 	case 4:
1141 	case 6:
1142 		if (!kvm_dr6_valid(val))
1143 			return -1; /* #GP */
1144 		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1145 		break;
1146 	case 5:
1147 	default: /* 7 */
1148 		if (!kvm_dr7_valid(val))
1149 			return -1; /* #GP */
1150 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1151 		kvm_update_dr7(vcpu);
1152 		break;
1153 	}
1154 
1155 	return 0;
1156 }
1157 
kvm_set_dr(struct kvm_vcpu * vcpu,int dr,unsigned long val)1158 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1159 {
1160 	if (__kvm_set_dr(vcpu, dr, val)) {
1161 		kvm_inject_gp(vcpu, 0);
1162 		return 1;
1163 	}
1164 	return 0;
1165 }
1166 EXPORT_SYMBOL_GPL(kvm_set_dr);
1167 
kvm_get_dr(struct kvm_vcpu * vcpu,int dr,unsigned long * val)1168 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
1169 {
1170 	size_t size = ARRAY_SIZE(vcpu->arch.db);
1171 
1172 	switch (dr) {
1173 	case 0 ... 3:
1174 		*val = vcpu->arch.db[array_index_nospec(dr, size)];
1175 		break;
1176 	case 4:
1177 	case 6:
1178 		*val = vcpu->arch.dr6;
1179 		break;
1180 	case 5:
1181 	default: /* 7 */
1182 		*val = vcpu->arch.dr7;
1183 		break;
1184 	}
1185 	return 0;
1186 }
1187 EXPORT_SYMBOL_GPL(kvm_get_dr);
1188 
kvm_rdpmc(struct kvm_vcpu * vcpu)1189 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
1190 {
1191 	u32 ecx = kvm_rcx_read(vcpu);
1192 	u64 data;
1193 	int err;
1194 
1195 	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
1196 	if (err)
1197 		return err;
1198 	kvm_rax_write(vcpu, (u32)data);
1199 	kvm_rdx_write(vcpu, data >> 32);
1200 	return err;
1201 }
1202 EXPORT_SYMBOL_GPL(kvm_rdpmc);
1203 
1204 /*
1205  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1206  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1207  *
1208  * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
1209  * extract the supported MSRs from the related const lists.
1210  * msrs_to_save is selected from the msrs_to_save_all to reflect the
1211  * capabilities of the host cpu. This capabilities test skips MSRs that are
1212  * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1213  * may depend on host virtualization features rather than host cpu features.
1214  */
1215 
1216 static const u32 msrs_to_save_all[] = {
1217 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1218 	MSR_STAR,
1219 #ifdef CONFIG_X86_64
1220 	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1221 #endif
1222 	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1223 	MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1224 	MSR_IA32_SPEC_CTRL,
1225 	MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1226 	MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1227 	MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
1228 	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
1229 	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
1230 	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
1231 	MSR_IA32_UMWAIT_CONTROL,
1232 
1233 	MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
1234 	MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
1235 	MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
1236 	MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1237 	MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
1238 	MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
1239 	MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
1240 	MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
1241 	MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9,
1242 	MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11,
1243 	MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
1244 	MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
1245 	MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
1246 	MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1247 	MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1248 	MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
1249 	MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
1250 	MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9,
1251 	MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11,
1252 	MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
1253 	MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
1254 	MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
1255 
1256 	MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
1257 	MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
1258 	MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
1259 	MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
1260 	MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
1261 	MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
1262 };
1263 
1264 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
1265 static unsigned num_msrs_to_save;
1266 
1267 static const u32 emulated_msrs_all[] = {
1268 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1269 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1270 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1271 	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1272 	HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1273 	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1274 	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1275 	HV_X64_MSR_RESET,
1276 	HV_X64_MSR_VP_INDEX,
1277 	HV_X64_MSR_VP_RUNTIME,
1278 	HV_X64_MSR_SCONTROL,
1279 	HV_X64_MSR_STIMER0_CONFIG,
1280 	HV_X64_MSR_VP_ASSIST_PAGE,
1281 	HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
1282 	HV_X64_MSR_TSC_EMULATION_STATUS,
1283 	HV_X64_MSR_SYNDBG_OPTIONS,
1284 	HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
1285 	HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
1286 	HV_X64_MSR_SYNDBG_PENDING_BUFFER,
1287 
1288 	MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1289 	MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
1290 
1291 	MSR_IA32_TSC_ADJUST,
1292 	MSR_IA32_TSCDEADLINE,
1293 	MSR_IA32_ARCH_CAPABILITIES,
1294 	MSR_IA32_PERF_CAPABILITIES,
1295 	MSR_IA32_MISC_ENABLE,
1296 	MSR_IA32_MCG_STATUS,
1297 	MSR_IA32_MCG_CTL,
1298 	MSR_IA32_MCG_EXT_CTL,
1299 	MSR_IA32_SMBASE,
1300 	MSR_SMI_COUNT,
1301 	MSR_PLATFORM_INFO,
1302 	MSR_MISC_FEATURES_ENABLES,
1303 	MSR_AMD64_VIRT_SPEC_CTRL,
1304 	MSR_IA32_POWER_CTL,
1305 	MSR_IA32_UCODE_REV,
1306 
1307 	/*
1308 	 * The following list leaves out MSRs whose values are determined
1309 	 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
1310 	 * We always support the "true" VMX control MSRs, even if the host
1311 	 * processor does not, so I am putting these registers here rather
1312 	 * than in msrs_to_save_all.
1313 	 */
1314 	MSR_IA32_VMX_BASIC,
1315 	MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1316 	MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1317 	MSR_IA32_VMX_TRUE_EXIT_CTLS,
1318 	MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1319 	MSR_IA32_VMX_MISC,
1320 	MSR_IA32_VMX_CR0_FIXED0,
1321 	MSR_IA32_VMX_CR4_FIXED0,
1322 	MSR_IA32_VMX_VMCS_ENUM,
1323 	MSR_IA32_VMX_PROCBASED_CTLS2,
1324 	MSR_IA32_VMX_EPT_VPID_CAP,
1325 	MSR_IA32_VMX_VMFUNC,
1326 
1327 	MSR_K7_HWCR,
1328 	MSR_KVM_POLL_CONTROL,
1329 };
1330 
1331 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
1332 static unsigned num_emulated_msrs;
1333 
1334 /*
1335  * List of msr numbers which are used to expose MSR-based features that
1336  * can be used by a hypervisor to validate requested CPU features.
1337  */
1338 static const u32 msr_based_features_all[] = {
1339 	MSR_IA32_VMX_BASIC,
1340 	MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1341 	MSR_IA32_VMX_PINBASED_CTLS,
1342 	MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1343 	MSR_IA32_VMX_PROCBASED_CTLS,
1344 	MSR_IA32_VMX_TRUE_EXIT_CTLS,
1345 	MSR_IA32_VMX_EXIT_CTLS,
1346 	MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1347 	MSR_IA32_VMX_ENTRY_CTLS,
1348 	MSR_IA32_VMX_MISC,
1349 	MSR_IA32_VMX_CR0_FIXED0,
1350 	MSR_IA32_VMX_CR0_FIXED1,
1351 	MSR_IA32_VMX_CR4_FIXED0,
1352 	MSR_IA32_VMX_CR4_FIXED1,
1353 	MSR_IA32_VMX_VMCS_ENUM,
1354 	MSR_IA32_VMX_PROCBASED_CTLS2,
1355 	MSR_IA32_VMX_EPT_VPID_CAP,
1356 	MSR_IA32_VMX_VMFUNC,
1357 
1358 	MSR_F10H_DECFG,
1359 	MSR_IA32_UCODE_REV,
1360 	MSR_IA32_ARCH_CAPABILITIES,
1361 	MSR_IA32_PERF_CAPABILITIES,
1362 };
1363 
1364 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
1365 static unsigned int num_msr_based_features;
1366 
kvm_get_arch_capabilities(void)1367 static u64 kvm_get_arch_capabilities(void)
1368 {
1369 	u64 data = 0;
1370 
1371 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1372 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
1373 
1374 	/*
1375 	 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1376 	 * the nested hypervisor runs with NX huge pages.  If it is not,
1377 	 * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
1378 	 * L1 guests, so it need not worry about its own (L2) guests.
1379 	 */
1380 	data |= ARCH_CAP_PSCHANGE_MC_NO;
1381 
1382 	/*
1383 	 * If we're doing cache flushes (either "always" or "cond")
1384 	 * we will do one whenever the guest does a vmlaunch/vmresume.
1385 	 * If an outer hypervisor is doing the cache flush for us
1386 	 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
1387 	 * capability to the guest too, and if EPT is disabled we're not
1388 	 * vulnerable.  Overall, only VMENTER_L1D_FLUSH_NEVER will
1389 	 * require a nested hypervisor to do a flush of its own.
1390 	 */
1391 	if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1392 		data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1393 
1394 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1395 		data |= ARCH_CAP_RDCL_NO;
1396 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1397 		data |= ARCH_CAP_SSB_NO;
1398 	if (!boot_cpu_has_bug(X86_BUG_MDS))
1399 		data |= ARCH_CAP_MDS_NO;
1400 
1401 	if (!boot_cpu_has(X86_FEATURE_RTM)) {
1402 		/*
1403 		 * If RTM=0 because the kernel has disabled TSX, the host might
1404 		 * have TAA_NO or TSX_CTRL.  Clear TAA_NO (the guest sees RTM=0
1405 		 * and therefore knows that there cannot be TAA) but keep
1406 		 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1407 		 * and we want to allow migrating those guests to tsx=off hosts.
1408 		 */
1409 		data &= ~ARCH_CAP_TAA_NO;
1410 	} else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1411 		data |= ARCH_CAP_TAA_NO;
1412 	} else {
1413 		/*
1414 		 * Nothing to do here; we emulate TSX_CTRL if present on the
1415 		 * host so the guest can choose between disabling TSX or
1416 		 * using VERW to clear CPU buffers.
1417 		 */
1418 	}
1419 
1420 	/* Guests don't need to know "Fill buffer clear control" exists */
1421 	data &= ~ARCH_CAP_FB_CLEAR_CTRL;
1422 
1423 	return data;
1424 }
1425 
kvm_get_msr_feature(struct kvm_msr_entry * msr)1426 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
1427 {
1428 	switch (msr->index) {
1429 	case MSR_IA32_ARCH_CAPABILITIES:
1430 		msr->data = kvm_get_arch_capabilities();
1431 		break;
1432 	case MSR_IA32_UCODE_REV:
1433 		rdmsrl_safe(msr->index, &msr->data);
1434 		break;
1435 	default:
1436 		return kvm_x86_ops.get_msr_feature(msr);
1437 	}
1438 	return 0;
1439 }
1440 
do_get_msr_feature(struct kvm_vcpu * vcpu,unsigned index,u64 * data)1441 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1442 {
1443 	struct kvm_msr_entry msr;
1444 	int r;
1445 
1446 	msr.index = index;
1447 	r = kvm_get_msr_feature(&msr);
1448 
1449 	if (r == KVM_MSR_RET_INVALID) {
1450 		/* Unconditionally clear the output for simplicity */
1451 		*data = 0;
1452 		if (kvm_msr_ignored_check(vcpu, index, 0, false))
1453 			r = 0;
1454 	}
1455 
1456 	if (r)
1457 		return r;
1458 
1459 	*data = msr.data;
1460 
1461 	return 0;
1462 }
1463 
__kvm_valid_efer(struct kvm_vcpu * vcpu,u64 efer)1464 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1465 {
1466 	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1467 		return false;
1468 
1469 	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1470 		return false;
1471 
1472 	if (efer & (EFER_LME | EFER_LMA) &&
1473 	    !guest_cpuid_has(vcpu, X86_FEATURE_LM))
1474 		return false;
1475 
1476 	if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
1477 		return false;
1478 
1479 	return true;
1480 
1481 }
kvm_valid_efer(struct kvm_vcpu * vcpu,u64 efer)1482 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1483 {
1484 	if (efer & efer_reserved_bits)
1485 		return false;
1486 
1487 	return __kvm_valid_efer(vcpu, efer);
1488 }
1489 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1490 
set_efer(struct kvm_vcpu * vcpu,struct msr_data * msr_info)1491 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1492 {
1493 	u64 old_efer = vcpu->arch.efer;
1494 	u64 efer = msr_info->data;
1495 	int r;
1496 
1497 	if (efer & efer_reserved_bits)
1498 		return 1;
1499 
1500 	if (!msr_info->host_initiated) {
1501 		if (!__kvm_valid_efer(vcpu, efer))
1502 			return 1;
1503 
1504 		if (is_paging(vcpu) &&
1505 		    (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1506 			return 1;
1507 	}
1508 
1509 	efer &= ~EFER_LMA;
1510 	efer |= vcpu->arch.efer & EFER_LMA;
1511 
1512 	r = kvm_x86_ops.set_efer(vcpu, efer);
1513 	if (r) {
1514 		WARN_ON(r > 0);
1515 		return r;
1516 	}
1517 
1518 	/* Update reserved bits */
1519 	if ((efer ^ old_efer) & EFER_NX)
1520 		kvm_mmu_reset_context(vcpu);
1521 
1522 	return 0;
1523 }
1524 
kvm_enable_efer_bits(u64 mask)1525 void kvm_enable_efer_bits(u64 mask)
1526 {
1527        efer_reserved_bits &= ~mask;
1528 }
1529 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1530 
kvm_msr_allowed(struct kvm_vcpu * vcpu,u32 index,u32 type)1531 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1532 {
1533 	struct kvm_x86_msr_filter *msr_filter;
1534 	struct msr_bitmap_range *ranges;
1535 	struct kvm *kvm = vcpu->kvm;
1536 	bool allowed;
1537 	int idx;
1538 	u32 i;
1539 
1540 	/* x2APIC MSRs do not support filtering. */
1541 	if (index >= 0x800 && index <= 0x8ff)
1542 		return true;
1543 
1544 	idx = srcu_read_lock(&kvm->srcu);
1545 
1546 	msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1547 	if (!msr_filter) {
1548 		allowed = true;
1549 		goto out;
1550 	}
1551 
1552 	allowed = msr_filter->default_allow;
1553 	ranges = msr_filter->ranges;
1554 
1555 	for (i = 0; i < msr_filter->count; i++) {
1556 		u32 start = ranges[i].base;
1557 		u32 end = start + ranges[i].nmsrs;
1558 		u32 flags = ranges[i].flags;
1559 		unsigned long *bitmap = ranges[i].bitmap;
1560 
1561 		if ((index >= start) && (index < end) && (flags & type)) {
1562 			allowed = !!test_bit(index - start, bitmap);
1563 			break;
1564 		}
1565 	}
1566 
1567 out:
1568 	srcu_read_unlock(&kvm->srcu, idx);
1569 
1570 	return allowed;
1571 }
1572 EXPORT_SYMBOL_GPL(kvm_msr_allowed);
1573 
1574 /*
1575  * Write @data into the MSR specified by @index.  Select MSR specific fault
1576  * checks are bypassed if @host_initiated is %true.
1577  * Returns 0 on success, non-0 otherwise.
1578  * Assumes vcpu_load() was already called.
1579  */
__kvm_set_msr(struct kvm_vcpu * vcpu,u32 index,u64 data,bool host_initiated)1580 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1581 			 bool host_initiated)
1582 {
1583 	struct msr_data msr;
1584 
1585 	if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
1586 		return KVM_MSR_RET_FILTERED;
1587 
1588 	switch (index) {
1589 	case MSR_FS_BASE:
1590 	case MSR_GS_BASE:
1591 	case MSR_KERNEL_GS_BASE:
1592 	case MSR_CSTAR:
1593 	case MSR_LSTAR:
1594 		if (is_noncanonical_address(data, vcpu))
1595 			return 1;
1596 		break;
1597 	case MSR_IA32_SYSENTER_EIP:
1598 	case MSR_IA32_SYSENTER_ESP:
1599 		/*
1600 		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1601 		 * non-canonical address is written on Intel but not on
1602 		 * AMD (which ignores the top 32-bits, because it does
1603 		 * not implement 64-bit SYSENTER).
1604 		 *
1605 		 * 64-bit code should hence be able to write a non-canonical
1606 		 * value on AMD.  Making the address canonical ensures that
1607 		 * vmentry does not fail on Intel after writing a non-canonical
1608 		 * value, and that something deterministic happens if the guest
1609 		 * invokes 64-bit SYSENTER.
1610 		 */
1611 		data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
1612 	}
1613 
1614 	msr.data = data;
1615 	msr.index = index;
1616 	msr.host_initiated = host_initiated;
1617 
1618 	return kvm_x86_ops.set_msr(vcpu, &msr);
1619 }
1620 
kvm_set_msr_ignored_check(struct kvm_vcpu * vcpu,u32 index,u64 data,bool host_initiated)1621 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1622 				     u32 index, u64 data, bool host_initiated)
1623 {
1624 	int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1625 
1626 	if (ret == KVM_MSR_RET_INVALID)
1627 		if (kvm_msr_ignored_check(vcpu, index, data, true))
1628 			ret = 0;
1629 
1630 	return ret;
1631 }
1632 
1633 /*
1634  * Read the MSR specified by @index into @data.  Select MSR specific fault
1635  * checks are bypassed if @host_initiated is %true.
1636  * Returns 0 on success, non-0 otherwise.
1637  * Assumes vcpu_load() was already called.
1638  */
__kvm_get_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1639 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1640 		  bool host_initiated)
1641 {
1642 	struct msr_data msr;
1643 	int ret;
1644 
1645 	if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
1646 		return KVM_MSR_RET_FILTERED;
1647 
1648 	msr.index = index;
1649 	msr.host_initiated = host_initiated;
1650 
1651 	ret = kvm_x86_ops.get_msr(vcpu, &msr);
1652 	if (!ret)
1653 		*data = msr.data;
1654 	return ret;
1655 }
1656 
kvm_get_msr_ignored_check(struct kvm_vcpu * vcpu,u32 index,u64 * data,bool host_initiated)1657 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
1658 				     u32 index, u64 *data, bool host_initiated)
1659 {
1660 	int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1661 
1662 	if (ret == KVM_MSR_RET_INVALID) {
1663 		/* Unconditionally clear *data for simplicity */
1664 		*data = 0;
1665 		if (kvm_msr_ignored_check(vcpu, index, 0, false))
1666 			ret = 0;
1667 	}
1668 
1669 	return ret;
1670 }
1671 
kvm_get_msr(struct kvm_vcpu * vcpu,u32 index,u64 * data)1672 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1673 {
1674 	return kvm_get_msr_ignored_check(vcpu, index, data, false);
1675 }
1676 EXPORT_SYMBOL_GPL(kvm_get_msr);
1677 
kvm_set_msr(struct kvm_vcpu * vcpu,u32 index,u64 data)1678 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1679 {
1680 	return kvm_set_msr_ignored_check(vcpu, index, data, false);
1681 }
1682 EXPORT_SYMBOL_GPL(kvm_set_msr);
1683 
complete_emulated_msr(struct kvm_vcpu * vcpu,bool is_read)1684 static int complete_emulated_msr(struct kvm_vcpu *vcpu, bool is_read)
1685 {
1686 	if (vcpu->run->msr.error) {
1687 		kvm_inject_gp(vcpu, 0);
1688 		return 1;
1689 	} else if (is_read) {
1690 		kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
1691 		kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
1692 	}
1693 
1694 	return kvm_skip_emulated_instruction(vcpu);
1695 }
1696 
complete_emulated_rdmsr(struct kvm_vcpu * vcpu)1697 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
1698 {
1699 	return complete_emulated_msr(vcpu, true);
1700 }
1701 
complete_emulated_wrmsr(struct kvm_vcpu * vcpu)1702 static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
1703 {
1704 	return complete_emulated_msr(vcpu, false);
1705 }
1706 
kvm_msr_reason(int r)1707 static u64 kvm_msr_reason(int r)
1708 {
1709 	switch (r) {
1710 	case KVM_MSR_RET_INVALID:
1711 		return KVM_MSR_EXIT_REASON_UNKNOWN;
1712 	case KVM_MSR_RET_FILTERED:
1713 		return KVM_MSR_EXIT_REASON_FILTER;
1714 	default:
1715 		return KVM_MSR_EXIT_REASON_INVAL;
1716 	}
1717 }
1718 
kvm_msr_user_space(struct kvm_vcpu * vcpu,u32 index,u32 exit_reason,u64 data,int (* completion)(struct kvm_vcpu * vcpu),int r)1719 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
1720 			      u32 exit_reason, u64 data,
1721 			      int (*completion)(struct kvm_vcpu *vcpu),
1722 			      int r)
1723 {
1724 	u64 msr_reason = kvm_msr_reason(r);
1725 
1726 	/* Check if the user wanted to know about this MSR fault */
1727 	if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
1728 		return 0;
1729 
1730 	vcpu->run->exit_reason = exit_reason;
1731 	vcpu->run->msr.error = 0;
1732 	memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
1733 	vcpu->run->msr.reason = msr_reason;
1734 	vcpu->run->msr.index = index;
1735 	vcpu->run->msr.data = data;
1736 	vcpu->arch.complete_userspace_io = completion;
1737 
1738 	return 1;
1739 }
1740 
kvm_get_msr_user_space(struct kvm_vcpu * vcpu,u32 index,int r)1741 static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r)
1742 {
1743 	return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0,
1744 				   complete_emulated_rdmsr, r);
1745 }
1746 
kvm_set_msr_user_space(struct kvm_vcpu * vcpu,u32 index,u64 data,int r)1747 static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r)
1748 {
1749 	return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data,
1750 				   complete_emulated_wrmsr, r);
1751 }
1752 
kvm_emulate_rdmsr(struct kvm_vcpu * vcpu)1753 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
1754 {
1755 	u32 ecx = kvm_rcx_read(vcpu);
1756 	u64 data;
1757 	int r;
1758 
1759 	r = kvm_get_msr(vcpu, ecx, &data);
1760 
1761 	/* MSR read failed? See if we should ask user space */
1762 	if (r && kvm_get_msr_user_space(vcpu, ecx, r)) {
1763 		/* Bounce to user space */
1764 		return 0;
1765 	}
1766 
1767 	/* MSR read failed? Inject a #GP */
1768 	if (r) {
1769 		trace_kvm_msr_read_ex(ecx);
1770 		kvm_inject_gp(vcpu, 0);
1771 		return 1;
1772 	}
1773 
1774 	trace_kvm_msr_read(ecx, data);
1775 
1776 	kvm_rax_write(vcpu, data & -1u);
1777 	kvm_rdx_write(vcpu, (data >> 32) & -1u);
1778 	return kvm_skip_emulated_instruction(vcpu);
1779 }
1780 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
1781 
kvm_emulate_wrmsr(struct kvm_vcpu * vcpu)1782 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
1783 {
1784 	u32 ecx = kvm_rcx_read(vcpu);
1785 	u64 data = kvm_read_edx_eax(vcpu);
1786 	int r;
1787 
1788 	r = kvm_set_msr(vcpu, ecx, data);
1789 
1790 	/* MSR write failed? See if we should ask user space */
1791 	if (r && kvm_set_msr_user_space(vcpu, ecx, data, r))
1792 		/* Bounce to user space */
1793 		return 0;
1794 
1795 	/* Signal all other negative errors to userspace */
1796 	if (r < 0)
1797 		return r;
1798 
1799 	/* MSR write failed? Inject a #GP */
1800 	if (r > 0) {
1801 		trace_kvm_msr_write_ex(ecx, data);
1802 		kvm_inject_gp(vcpu, 0);
1803 		return 1;
1804 	}
1805 
1806 	trace_kvm_msr_write(ecx, data);
1807 	return kvm_skip_emulated_instruction(vcpu);
1808 }
1809 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
1810 
kvm_vcpu_exit_request(struct kvm_vcpu * vcpu)1811 bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
1812 {
1813 	return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
1814 		xfer_to_guest_mode_work_pending();
1815 }
1816 EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
1817 
1818 /*
1819  * The fast path for frequent and performance sensitive wrmsr emulation,
1820  * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
1821  * the latency of virtual IPI by avoiding the expensive bits of transitioning
1822  * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
1823  * other cases which must be called after interrupts are enabled on the host.
1824  */
handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu * vcpu,u64 data)1825 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
1826 {
1827 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
1828 		return 1;
1829 
1830 	if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
1831 		((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
1832 		((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
1833 		((u32)(data >> 32) != X2APIC_BROADCAST)) {
1834 
1835 		data &= ~(1 << 12);
1836 		kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32));
1837 		kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
1838 		kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data);
1839 		trace_kvm_apic_write(APIC_ICR, (u32)data);
1840 		return 0;
1841 	}
1842 
1843 	return 1;
1844 }
1845 
handle_fastpath_set_tscdeadline(struct kvm_vcpu * vcpu,u64 data)1846 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
1847 {
1848 	if (!kvm_can_use_hv_timer(vcpu))
1849 		return 1;
1850 
1851 	kvm_set_lapic_tscdeadline_msr(vcpu, data);
1852 	return 0;
1853 }
1854 
handle_fastpath_set_msr_irqoff(struct kvm_vcpu * vcpu)1855 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
1856 {
1857 	u32 msr = kvm_rcx_read(vcpu);
1858 	u64 data;
1859 	fastpath_t ret = EXIT_FASTPATH_NONE;
1860 
1861 	switch (msr) {
1862 	case APIC_BASE_MSR + (APIC_ICR >> 4):
1863 		data = kvm_read_edx_eax(vcpu);
1864 		if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
1865 			kvm_skip_emulated_instruction(vcpu);
1866 			ret = EXIT_FASTPATH_EXIT_HANDLED;
1867 		}
1868 		break;
1869 	case MSR_IA32_TSCDEADLINE:
1870 		data = kvm_read_edx_eax(vcpu);
1871 		if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
1872 			kvm_skip_emulated_instruction(vcpu);
1873 			ret = EXIT_FASTPATH_REENTER_GUEST;
1874 		}
1875 		break;
1876 	default:
1877 		break;
1878 	}
1879 
1880 	if (ret != EXIT_FASTPATH_NONE)
1881 		trace_kvm_msr_write(msr, data);
1882 
1883 	return ret;
1884 }
1885 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
1886 
1887 /*
1888  * Adapt set_msr() to msr_io()'s calling convention
1889  */
do_get_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)1890 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1891 {
1892 	return kvm_get_msr_ignored_check(vcpu, index, data, true);
1893 }
1894 
do_set_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)1895 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1896 {
1897 	return kvm_set_msr_ignored_check(vcpu, index, *data, true);
1898 }
1899 
1900 #ifdef CONFIG_X86_64
1901 struct pvclock_clock {
1902 	int vclock_mode;
1903 	u64 cycle_last;
1904 	u64 mask;
1905 	u32 mult;
1906 	u32 shift;
1907 	u64 base_cycles;
1908 	u64 offset;
1909 };
1910 
1911 struct pvclock_gtod_data {
1912 	seqcount_t	seq;
1913 
1914 	struct pvclock_clock clock; /* extract of a clocksource struct */
1915 	struct pvclock_clock raw_clock; /* extract of a clocksource struct */
1916 
1917 	ktime_t		offs_boot;
1918 	u64		wall_time_sec;
1919 };
1920 
1921 static struct pvclock_gtod_data pvclock_gtod_data;
1922 
update_pvclock_gtod(struct timekeeper * tk)1923 static void update_pvclock_gtod(struct timekeeper *tk)
1924 {
1925 	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1926 
1927 	write_seqcount_begin(&vdata->seq);
1928 
1929 	/* copy pvclock gtod data */
1930 	vdata->clock.vclock_mode	= tk->tkr_mono.clock->vdso_clock_mode;
1931 	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
1932 	vdata->clock.mask		= tk->tkr_mono.mask;
1933 	vdata->clock.mult		= tk->tkr_mono.mult;
1934 	vdata->clock.shift		= tk->tkr_mono.shift;
1935 	vdata->clock.base_cycles	= tk->tkr_mono.xtime_nsec;
1936 	vdata->clock.offset		= tk->tkr_mono.base;
1937 
1938 	vdata->raw_clock.vclock_mode	= tk->tkr_raw.clock->vdso_clock_mode;
1939 	vdata->raw_clock.cycle_last	= tk->tkr_raw.cycle_last;
1940 	vdata->raw_clock.mask		= tk->tkr_raw.mask;
1941 	vdata->raw_clock.mult		= tk->tkr_raw.mult;
1942 	vdata->raw_clock.shift		= tk->tkr_raw.shift;
1943 	vdata->raw_clock.base_cycles	= tk->tkr_raw.xtime_nsec;
1944 	vdata->raw_clock.offset		= tk->tkr_raw.base;
1945 
1946 	vdata->wall_time_sec            = tk->xtime_sec;
1947 
1948 	vdata->offs_boot		= tk->offs_boot;
1949 
1950 	write_seqcount_end(&vdata->seq);
1951 }
1952 
get_kvmclock_base_ns(void)1953 static s64 get_kvmclock_base_ns(void)
1954 {
1955 	/* Count up from boot time, but with the frequency of the raw clock.  */
1956 	return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
1957 }
1958 #else
get_kvmclock_base_ns(void)1959 static s64 get_kvmclock_base_ns(void)
1960 {
1961 	/* Master clock not used, so we can just use CLOCK_BOOTTIME.  */
1962 	return ktime_get_boottime_ns();
1963 }
1964 #endif
1965 
kvm_write_wall_clock(struct kvm * kvm,gpa_t wall_clock)1966 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1967 {
1968 	int version;
1969 	int r;
1970 	struct pvclock_wall_clock wc;
1971 	u64 wall_nsec;
1972 
1973 	kvm->arch.wall_clock = wall_clock;
1974 
1975 	if (!wall_clock)
1976 		return;
1977 
1978 	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1979 	if (r)
1980 		return;
1981 
1982 	if (version & 1)
1983 		++version;  /* first time write, random junk */
1984 
1985 	++version;
1986 
1987 	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
1988 		return;
1989 
1990 	/*
1991 	 * The guest calculates current wall clock time by adding
1992 	 * system time (updated by kvm_guest_time_update below) to the
1993 	 * wall clock specified here.  We do the reverse here.
1994 	 */
1995 	wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
1996 
1997 	wc.nsec = do_div(wall_nsec, 1000000000);
1998 	wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
1999 	wc.version = version;
2000 
2001 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2002 
2003 	version++;
2004 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2005 }
2006 
kvm_write_system_time(struct kvm_vcpu * vcpu,gpa_t system_time,bool old_msr,bool host_initiated)2007 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2008 				  bool old_msr, bool host_initiated)
2009 {
2010 	struct kvm_arch *ka = &vcpu->kvm->arch;
2011 
2012 	if (vcpu->vcpu_id == 0 && !host_initiated) {
2013 		if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2014 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2015 
2016 		ka->boot_vcpu_runs_old_kvmclock = old_msr;
2017 	}
2018 
2019 	vcpu->arch.time = system_time;
2020 	kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2021 
2022 	/* we verify if the enable bit is set... */
2023 	vcpu->arch.pv_time_enabled = false;
2024 	if (!(system_time & 1))
2025 		return;
2026 
2027 	if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
2028 				       &vcpu->arch.pv_time, system_time & ~1ULL,
2029 				       sizeof(struct pvclock_vcpu_time_info)))
2030 		vcpu->arch.pv_time_enabled = true;
2031 
2032 	return;
2033 }
2034 
div_frac(uint32_t dividend,uint32_t divisor)2035 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2036 {
2037 	do_shl32_div32(dividend, divisor);
2038 	return dividend;
2039 }
2040 
kvm_get_time_scale(uint64_t scaled_hz,uint64_t base_hz,s8 * pshift,u32 * pmultiplier)2041 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2042 			       s8 *pshift, u32 *pmultiplier)
2043 {
2044 	uint64_t scaled64;
2045 	int32_t  shift = 0;
2046 	uint64_t tps64;
2047 	uint32_t tps32;
2048 
2049 	tps64 = base_hz;
2050 	scaled64 = scaled_hz;
2051 	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2052 		tps64 >>= 1;
2053 		shift--;
2054 	}
2055 
2056 	tps32 = (uint32_t)tps64;
2057 	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2058 		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2059 			scaled64 >>= 1;
2060 		else
2061 			tps32 <<= 1;
2062 		shift++;
2063 	}
2064 
2065 	*pshift = shift;
2066 	*pmultiplier = div_frac(scaled64, tps32);
2067 }
2068 
2069 #ifdef CONFIG_X86_64
2070 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2071 #endif
2072 
2073 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2074 static unsigned long max_tsc_khz;
2075 
adjust_tsc_khz(u32 khz,s32 ppm)2076 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2077 {
2078 	u64 v = (u64)khz * (1000000 + ppm);
2079 	do_div(v, 1000000);
2080 	return v;
2081 }
2082 
set_tsc_khz(struct kvm_vcpu * vcpu,u32 user_tsc_khz,bool scale)2083 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2084 {
2085 	u64 ratio;
2086 
2087 	/* Guest TSC same frequency as host TSC? */
2088 	if (!scale) {
2089 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
2090 		return 0;
2091 	}
2092 
2093 	/* TSC scaling supported? */
2094 	if (!kvm_has_tsc_control) {
2095 		if (user_tsc_khz > tsc_khz) {
2096 			vcpu->arch.tsc_catchup = 1;
2097 			vcpu->arch.tsc_always_catchup = 1;
2098 			return 0;
2099 		} else {
2100 			pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2101 			return -1;
2102 		}
2103 	}
2104 
2105 	/* TSC scaling required  - calculate ratio */
2106 	ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
2107 				user_tsc_khz, tsc_khz);
2108 
2109 	if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
2110 		pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2111 			            user_tsc_khz);
2112 		return -1;
2113 	}
2114 
2115 	vcpu->arch.tsc_scaling_ratio = ratio;
2116 	return 0;
2117 }
2118 
kvm_set_tsc_khz(struct kvm_vcpu * vcpu,u32 user_tsc_khz)2119 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2120 {
2121 	u32 thresh_lo, thresh_hi;
2122 	int use_scaling = 0;
2123 
2124 	/* tsc_khz can be zero if TSC calibration fails */
2125 	if (user_tsc_khz == 0) {
2126 		/* set tsc_scaling_ratio to a safe value */
2127 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
2128 		return -1;
2129 	}
2130 
2131 	/* Compute a scale to convert nanoseconds in TSC cycles */
2132 	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2133 			   &vcpu->arch.virtual_tsc_shift,
2134 			   &vcpu->arch.virtual_tsc_mult);
2135 	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2136 
2137 	/*
2138 	 * Compute the variation in TSC rate which is acceptable
2139 	 * within the range of tolerance and decide if the
2140 	 * rate being applied is within that bounds of the hardware
2141 	 * rate.  If so, no scaling or compensation need be done.
2142 	 */
2143 	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2144 	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2145 	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2146 		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
2147 		use_scaling = 1;
2148 	}
2149 	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2150 }
2151 
compute_guest_tsc(struct kvm_vcpu * vcpu,s64 kernel_ns)2152 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2153 {
2154 	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2155 				      vcpu->arch.virtual_tsc_mult,
2156 				      vcpu->arch.virtual_tsc_shift);
2157 	tsc += vcpu->arch.this_tsc_write;
2158 	return tsc;
2159 }
2160 
gtod_is_based_on_tsc(int mode)2161 static inline int gtod_is_based_on_tsc(int mode)
2162 {
2163 	return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2164 }
2165 
kvm_track_tsc_matching(struct kvm_vcpu * vcpu)2166 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
2167 {
2168 #ifdef CONFIG_X86_64
2169 	bool vcpus_matched;
2170 	struct kvm_arch *ka = &vcpu->kvm->arch;
2171 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2172 
2173 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2174 			 atomic_read(&vcpu->kvm->online_vcpus));
2175 
2176 	/*
2177 	 * Once the masterclock is enabled, always perform request in
2178 	 * order to update it.
2179 	 *
2180 	 * In order to enable masterclock, the host clocksource must be TSC
2181 	 * and the vcpus need to have matched TSCs.  When that happens,
2182 	 * perform request to enable masterclock.
2183 	 */
2184 	if (ka->use_master_clock ||
2185 	    (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
2186 		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2187 
2188 	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2189 			    atomic_read(&vcpu->kvm->online_vcpus),
2190 		            ka->use_master_clock, gtod->clock.vclock_mode);
2191 #endif
2192 }
2193 
2194 /*
2195  * Multiply tsc by a fixed point number represented by ratio.
2196  *
2197  * The most significant 64-N bits (mult) of ratio represent the
2198  * integral part of the fixed point number; the remaining N bits
2199  * (frac) represent the fractional part, ie. ratio represents a fixed
2200  * point number (mult + frac * 2^(-N)).
2201  *
2202  * N equals to kvm_tsc_scaling_ratio_frac_bits.
2203  */
__scale_tsc(u64 ratio,u64 tsc)2204 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2205 {
2206 	return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
2207 }
2208 
kvm_scale_tsc(struct kvm_vcpu * vcpu,u64 tsc)2209 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
2210 {
2211 	u64 _tsc = tsc;
2212 	u64 ratio = vcpu->arch.tsc_scaling_ratio;
2213 
2214 	if (ratio != kvm_default_tsc_scaling_ratio)
2215 		_tsc = __scale_tsc(ratio, tsc);
2216 
2217 	return _tsc;
2218 }
2219 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
2220 
kvm_compute_tsc_offset(struct kvm_vcpu * vcpu,u64 target_tsc)2221 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2222 {
2223 	u64 tsc;
2224 
2225 	tsc = kvm_scale_tsc(vcpu, rdtsc());
2226 
2227 	return target_tsc - tsc;
2228 }
2229 
kvm_read_l1_tsc(struct kvm_vcpu * vcpu,u64 host_tsc)2230 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2231 {
2232 	return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
2233 }
2234 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
2235 
kvm_vcpu_write_tsc_offset(struct kvm_vcpu * vcpu,u64 offset)2236 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2237 {
2238 	vcpu->arch.l1_tsc_offset = offset;
2239 	vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset);
2240 }
2241 
kvm_check_tsc_unstable(void)2242 static inline bool kvm_check_tsc_unstable(void)
2243 {
2244 #ifdef CONFIG_X86_64
2245 	/*
2246 	 * TSC is marked unstable when we're running on Hyper-V,
2247 	 * 'TSC page' clocksource is good.
2248 	 */
2249 	if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2250 		return false;
2251 #endif
2252 	return check_tsc_unstable();
2253 }
2254 
kvm_synchronize_tsc(struct kvm_vcpu * vcpu,u64 data)2255 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2256 {
2257 	struct kvm *kvm = vcpu->kvm;
2258 	u64 offset, ns, elapsed;
2259 	unsigned long flags;
2260 	bool matched;
2261 	bool already_matched;
2262 	bool synchronizing = false;
2263 
2264 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2265 	offset = kvm_compute_tsc_offset(vcpu, data);
2266 	ns = get_kvmclock_base_ns();
2267 	elapsed = ns - kvm->arch.last_tsc_nsec;
2268 
2269 	if (vcpu->arch.virtual_tsc_khz) {
2270 		if (data == 0) {
2271 			/*
2272 			 * detection of vcpu initialization -- need to sync
2273 			 * with other vCPUs. This particularly helps to keep
2274 			 * kvm_clock stable after CPU hotplug
2275 			 */
2276 			synchronizing = true;
2277 		} else {
2278 			u64 tsc_exp = kvm->arch.last_tsc_write +
2279 						nsec_to_cycles(vcpu, elapsed);
2280 			u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2281 			/*
2282 			 * Special case: TSC write with a small delta (1 second)
2283 			 * of virtual cycle time against real time is
2284 			 * interpreted as an attempt to synchronize the CPU.
2285 			 */
2286 			synchronizing = data < tsc_exp + tsc_hz &&
2287 					data + tsc_hz > tsc_exp;
2288 		}
2289 	}
2290 
2291 	/*
2292 	 * For a reliable TSC, we can match TSC offsets, and for an unstable
2293 	 * TSC, we add elapsed time in this computation.  We could let the
2294 	 * compensation code attempt to catch up if we fall behind, but
2295 	 * it's better to try to match offsets from the beginning.
2296          */
2297 	if (synchronizing &&
2298 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2299 		if (!kvm_check_tsc_unstable()) {
2300 			offset = kvm->arch.cur_tsc_offset;
2301 		} else {
2302 			u64 delta = nsec_to_cycles(vcpu, elapsed);
2303 			data += delta;
2304 			offset = kvm_compute_tsc_offset(vcpu, data);
2305 		}
2306 		matched = true;
2307 		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
2308 	} else {
2309 		/*
2310 		 * We split periods of matched TSC writes into generations.
2311 		 * For each generation, we track the original measured
2312 		 * nanosecond time, offset, and write, so if TSCs are in
2313 		 * sync, we can match exact offset, and if not, we can match
2314 		 * exact software computation in compute_guest_tsc()
2315 		 *
2316 		 * These values are tracked in kvm->arch.cur_xxx variables.
2317 		 */
2318 		kvm->arch.cur_tsc_generation++;
2319 		kvm->arch.cur_tsc_nsec = ns;
2320 		kvm->arch.cur_tsc_write = data;
2321 		kvm->arch.cur_tsc_offset = offset;
2322 		matched = false;
2323 	}
2324 
2325 	/*
2326 	 * We also track th most recent recorded KHZ, write and time to
2327 	 * allow the matching interval to be extended at each write.
2328 	 */
2329 	kvm->arch.last_tsc_nsec = ns;
2330 	kvm->arch.last_tsc_write = data;
2331 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2332 
2333 	vcpu->arch.last_guest_tsc = data;
2334 
2335 	/* Keep track of which generation this VCPU has synchronized to */
2336 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2337 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2338 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2339 
2340 	kvm_vcpu_write_tsc_offset(vcpu, offset);
2341 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2342 
2343 	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
2344 	if (!matched) {
2345 		kvm->arch.nr_vcpus_matched_tsc = 0;
2346 	} else if (!already_matched) {
2347 		kvm->arch.nr_vcpus_matched_tsc++;
2348 	}
2349 
2350 	kvm_track_tsc_matching(vcpu);
2351 	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
2352 }
2353 
adjust_tsc_offset_guest(struct kvm_vcpu * vcpu,s64 adjustment)2354 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2355 					   s64 adjustment)
2356 {
2357 	u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2358 	kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2359 }
2360 
adjust_tsc_offset_host(struct kvm_vcpu * vcpu,s64 adjustment)2361 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2362 {
2363 	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
2364 		WARN_ON(adjustment < 0);
2365 	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
2366 	adjust_tsc_offset_guest(vcpu, adjustment);
2367 }
2368 
2369 #ifdef CONFIG_X86_64
2370 
read_tsc(void)2371 static u64 read_tsc(void)
2372 {
2373 	u64 ret = (u64)rdtsc_ordered();
2374 	u64 last = pvclock_gtod_data.clock.cycle_last;
2375 
2376 	if (likely(ret >= last))
2377 		return ret;
2378 
2379 	/*
2380 	 * GCC likes to generate cmov here, but this branch is extremely
2381 	 * predictable (it's just a function of time and the likely is
2382 	 * very likely) and there's a data dependence, so force GCC
2383 	 * to generate a branch instead.  I don't barrier() because
2384 	 * we don't actually need a barrier, and if this function
2385 	 * ever gets inlined it will generate worse code.
2386 	 */
2387 	asm volatile ("");
2388 	return last;
2389 }
2390 
vgettsc(struct pvclock_clock * clock,u64 * tsc_timestamp,int * mode)2391 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2392 			  int *mode)
2393 {
2394 	long v;
2395 	u64 tsc_pg_val;
2396 
2397 	switch (clock->vclock_mode) {
2398 	case VDSO_CLOCKMODE_HVCLOCK:
2399 		tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
2400 						  tsc_timestamp);
2401 		if (tsc_pg_val != U64_MAX) {
2402 			/* TSC page valid */
2403 			*mode = VDSO_CLOCKMODE_HVCLOCK;
2404 			v = (tsc_pg_val - clock->cycle_last) &
2405 				clock->mask;
2406 		} else {
2407 			/* TSC page invalid */
2408 			*mode = VDSO_CLOCKMODE_NONE;
2409 		}
2410 		break;
2411 	case VDSO_CLOCKMODE_TSC:
2412 		*mode = VDSO_CLOCKMODE_TSC;
2413 		*tsc_timestamp = read_tsc();
2414 		v = (*tsc_timestamp - clock->cycle_last) &
2415 			clock->mask;
2416 		break;
2417 	default:
2418 		*mode = VDSO_CLOCKMODE_NONE;
2419 	}
2420 
2421 	if (*mode == VDSO_CLOCKMODE_NONE)
2422 		*tsc_timestamp = v = 0;
2423 
2424 	return v * clock->mult;
2425 }
2426 
do_monotonic_raw(s64 * t,u64 * tsc_timestamp)2427 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
2428 {
2429 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2430 	unsigned long seq;
2431 	int mode;
2432 	u64 ns;
2433 
2434 	do {
2435 		seq = read_seqcount_begin(&gtod->seq);
2436 		ns = gtod->raw_clock.base_cycles;
2437 		ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
2438 		ns >>= gtod->raw_clock.shift;
2439 		ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2440 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2441 	*t = ns;
2442 
2443 	return mode;
2444 }
2445 
do_realtime(struct timespec64 * ts,u64 * tsc_timestamp)2446 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
2447 {
2448 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2449 	unsigned long seq;
2450 	int mode;
2451 	u64 ns;
2452 
2453 	do {
2454 		seq = read_seqcount_begin(&gtod->seq);
2455 		ts->tv_sec = gtod->wall_time_sec;
2456 		ns = gtod->clock.base_cycles;
2457 		ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
2458 		ns >>= gtod->clock.shift;
2459 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2460 
2461 	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
2462 	ts->tv_nsec = ns;
2463 
2464 	return mode;
2465 }
2466 
2467 /* returns true if host is using TSC based clocksource */
kvm_get_time_and_clockread(s64 * kernel_ns,u64 * tsc_timestamp)2468 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
2469 {
2470 	/* checked again under seqlock below */
2471 	if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2472 		return false;
2473 
2474 	return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
2475 						      tsc_timestamp));
2476 }
2477 
2478 /* returns true if host is using TSC based clocksource */
kvm_get_walltime_and_clockread(struct timespec64 * ts,u64 * tsc_timestamp)2479 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
2480 					   u64 *tsc_timestamp)
2481 {
2482 	/* checked again under seqlock below */
2483 	if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2484 		return false;
2485 
2486 	return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
2487 }
2488 #endif
2489 
2490 /*
2491  *
2492  * Assuming a stable TSC across physical CPUS, and a stable TSC
2493  * across virtual CPUs, the following condition is possible.
2494  * Each numbered line represents an event visible to both
2495  * CPUs at the next numbered event.
2496  *
2497  * "timespecX" represents host monotonic time. "tscX" represents
2498  * RDTSC value.
2499  *
2500  * 		VCPU0 on CPU0		|	VCPU1 on CPU1
2501  *
2502  * 1.  read timespec0,tsc0
2503  * 2.					| timespec1 = timespec0 + N
2504  * 					| tsc1 = tsc0 + M
2505  * 3. transition to guest		| transition to guest
2506  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2507  * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
2508  * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2509  *
2510  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
2511  *
2512  * 	- ret0 < ret1
2513  *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2514  *		...
2515  *	- 0 < N - M => M < N
2516  *
2517  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
2518  * always the case (the difference between two distinct xtime instances
2519  * might be smaller then the difference between corresponding TSC reads,
2520  * when updating guest vcpus pvclock areas).
2521  *
2522  * To avoid that problem, do not allow visibility of distinct
2523  * system_timestamp/tsc_timestamp values simultaneously: use a master
2524  * copy of host monotonic time values. Update that master copy
2525  * in lockstep.
2526  *
2527  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
2528  *
2529  */
2530 
pvclock_update_vm_gtod_copy(struct kvm * kvm)2531 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
2532 {
2533 #ifdef CONFIG_X86_64
2534 	struct kvm_arch *ka = &kvm->arch;
2535 	int vclock_mode;
2536 	bool host_tsc_clocksource, vcpus_matched;
2537 
2538 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2539 			atomic_read(&kvm->online_vcpus));
2540 
2541 	/*
2542 	 * If the host uses TSC clock, then passthrough TSC as stable
2543 	 * to the guest.
2544 	 */
2545 	host_tsc_clocksource = kvm_get_time_and_clockread(
2546 					&ka->master_kernel_ns,
2547 					&ka->master_cycle_now);
2548 
2549 	ka->use_master_clock = host_tsc_clocksource && vcpus_matched
2550 				&& !ka->backwards_tsc_observed
2551 				&& !ka->boot_vcpu_runs_old_kvmclock;
2552 
2553 	if (ka->use_master_clock)
2554 		atomic_set(&kvm_guest_has_master_clock, 1);
2555 
2556 	vclock_mode = pvclock_gtod_data.clock.vclock_mode;
2557 	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
2558 					vcpus_matched);
2559 #endif
2560 }
2561 
kvm_make_mclock_inprogress_request(struct kvm * kvm)2562 void kvm_make_mclock_inprogress_request(struct kvm *kvm)
2563 {
2564 	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
2565 }
2566 
kvm_gen_update_masterclock(struct kvm * kvm)2567 static void kvm_gen_update_masterclock(struct kvm *kvm)
2568 {
2569 #ifdef CONFIG_X86_64
2570 	int i;
2571 	struct kvm_vcpu *vcpu;
2572 	struct kvm_arch *ka = &kvm->arch;
2573 
2574 	spin_lock(&ka->pvclock_gtod_sync_lock);
2575 	kvm_make_mclock_inprogress_request(kvm);
2576 	/* no guest entries from this point */
2577 	pvclock_update_vm_gtod_copy(kvm);
2578 
2579 	kvm_for_each_vcpu(i, vcpu, kvm)
2580 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2581 
2582 	/* guest entries allowed */
2583 	kvm_for_each_vcpu(i, vcpu, kvm)
2584 		kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
2585 
2586 	spin_unlock(&ka->pvclock_gtod_sync_lock);
2587 #endif
2588 }
2589 
get_kvmclock_ns(struct kvm * kvm)2590 u64 get_kvmclock_ns(struct kvm *kvm)
2591 {
2592 	struct kvm_arch *ka = &kvm->arch;
2593 	struct pvclock_vcpu_time_info hv_clock;
2594 	u64 ret;
2595 
2596 	spin_lock(&ka->pvclock_gtod_sync_lock);
2597 	if (!ka->use_master_clock) {
2598 		spin_unlock(&ka->pvclock_gtod_sync_lock);
2599 		return get_kvmclock_base_ns() + ka->kvmclock_offset;
2600 	}
2601 
2602 	hv_clock.tsc_timestamp = ka->master_cycle_now;
2603 	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
2604 	spin_unlock(&ka->pvclock_gtod_sync_lock);
2605 
2606 	/* both __this_cpu_read() and rdtsc() should be on the same cpu */
2607 	get_cpu();
2608 
2609 	if (__this_cpu_read(cpu_tsc_khz)) {
2610 		kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
2611 				   &hv_clock.tsc_shift,
2612 				   &hv_clock.tsc_to_system_mul);
2613 		ret = __pvclock_read_cycles(&hv_clock, rdtsc());
2614 	} else
2615 		ret = get_kvmclock_base_ns() + ka->kvmclock_offset;
2616 
2617 	put_cpu();
2618 
2619 	return ret;
2620 }
2621 
kvm_setup_pvclock_page(struct kvm_vcpu * v)2622 static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
2623 {
2624 	struct kvm_vcpu_arch *vcpu = &v->arch;
2625 	struct pvclock_vcpu_time_info guest_hv_clock;
2626 
2627 	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
2628 		&guest_hv_clock, sizeof(guest_hv_clock))))
2629 		return;
2630 
2631 	/* This VCPU is paused, but it's legal for a guest to read another
2632 	 * VCPU's kvmclock, so we really have to follow the specification where
2633 	 * it says that version is odd if data is being modified, and even after
2634 	 * it is consistent.
2635 	 *
2636 	 * Version field updates must be kept separate.  This is because
2637 	 * kvm_write_guest_cached might use a "rep movs" instruction, and
2638 	 * writes within a string instruction are weakly ordered.  So there
2639 	 * are three writes overall.
2640 	 *
2641 	 * As a small optimization, only write the version field in the first
2642 	 * and third write.  The vcpu->pv_time cache is still valid, because the
2643 	 * version field is the first in the struct.
2644 	 */
2645 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
2646 
2647 	if (guest_hv_clock.version & 1)
2648 		++guest_hv_clock.version;  /* first time write, random junk */
2649 
2650 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
2651 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
2652 				&vcpu->hv_clock,
2653 				sizeof(vcpu->hv_clock.version));
2654 
2655 	smp_wmb();
2656 
2657 	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
2658 	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
2659 
2660 	if (vcpu->pvclock_set_guest_stopped_request) {
2661 		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
2662 		vcpu->pvclock_set_guest_stopped_request = false;
2663 	}
2664 
2665 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
2666 
2667 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
2668 				&vcpu->hv_clock,
2669 				sizeof(vcpu->hv_clock));
2670 
2671 	smp_wmb();
2672 
2673 	vcpu->hv_clock.version++;
2674 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
2675 				&vcpu->hv_clock,
2676 				sizeof(vcpu->hv_clock.version));
2677 }
2678 
kvm_guest_time_update(struct kvm_vcpu * v)2679 static int kvm_guest_time_update(struct kvm_vcpu *v)
2680 {
2681 	unsigned long flags, tgt_tsc_khz;
2682 	struct kvm_vcpu_arch *vcpu = &v->arch;
2683 	struct kvm_arch *ka = &v->kvm->arch;
2684 	s64 kernel_ns;
2685 	u64 tsc_timestamp, host_tsc;
2686 	u8 pvclock_flags;
2687 	bool use_master_clock;
2688 
2689 	kernel_ns = 0;
2690 	host_tsc = 0;
2691 
2692 	/*
2693 	 * If the host uses TSC clock, then passthrough TSC as stable
2694 	 * to the guest.
2695 	 */
2696 	spin_lock(&ka->pvclock_gtod_sync_lock);
2697 	use_master_clock = ka->use_master_clock;
2698 	if (use_master_clock) {
2699 		host_tsc = ka->master_cycle_now;
2700 		kernel_ns = ka->master_kernel_ns;
2701 	}
2702 	spin_unlock(&ka->pvclock_gtod_sync_lock);
2703 
2704 	/* Keep irq disabled to prevent changes to the clock */
2705 	local_irq_save(flags);
2706 	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
2707 	if (unlikely(tgt_tsc_khz == 0)) {
2708 		local_irq_restore(flags);
2709 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
2710 		return 1;
2711 	}
2712 	if (!use_master_clock) {
2713 		host_tsc = rdtsc();
2714 		kernel_ns = get_kvmclock_base_ns();
2715 	}
2716 
2717 	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
2718 
2719 	/*
2720 	 * We may have to catch up the TSC to match elapsed wall clock
2721 	 * time for two reasons, even if kvmclock is used.
2722 	 *   1) CPU could have been running below the maximum TSC rate
2723 	 *   2) Broken TSC compensation resets the base at each VCPU
2724 	 *      entry to avoid unknown leaps of TSC even when running
2725 	 *      again on the same CPU.  This may cause apparent elapsed
2726 	 *      time to disappear, and the guest to stand still or run
2727 	 *	very slowly.
2728 	 */
2729 	if (vcpu->tsc_catchup) {
2730 		u64 tsc = compute_guest_tsc(v, kernel_ns);
2731 		if (tsc > tsc_timestamp) {
2732 			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
2733 			tsc_timestamp = tsc;
2734 		}
2735 	}
2736 
2737 	local_irq_restore(flags);
2738 
2739 	/* With all the info we got, fill in the values */
2740 
2741 	if (kvm_has_tsc_control)
2742 		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
2743 
2744 	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
2745 		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
2746 				   &vcpu->hv_clock.tsc_shift,
2747 				   &vcpu->hv_clock.tsc_to_system_mul);
2748 		vcpu->hw_tsc_khz = tgt_tsc_khz;
2749 	}
2750 
2751 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
2752 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
2753 	vcpu->last_guest_tsc = tsc_timestamp;
2754 
2755 	/* If the host uses TSC clocksource, then it is stable */
2756 	pvclock_flags = 0;
2757 	if (use_master_clock)
2758 		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
2759 
2760 	vcpu->hv_clock.flags = pvclock_flags;
2761 
2762 	if (vcpu->pv_time_enabled)
2763 		kvm_setup_pvclock_page(v);
2764 	if (v == kvm_get_vcpu(v->kvm, 0))
2765 		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
2766 	return 0;
2767 }
2768 
2769 /*
2770  * kvmclock updates which are isolated to a given vcpu, such as
2771  * vcpu->cpu migration, should not allow system_timestamp from
2772  * the rest of the vcpus to remain static. Otherwise ntp frequency
2773  * correction applies to one vcpu's system_timestamp but not
2774  * the others.
2775  *
2776  * So in those cases, request a kvmclock update for all vcpus.
2777  * We need to rate-limit these requests though, as they can
2778  * considerably slow guests that have a large number of vcpus.
2779  * The time for a remote vcpu to update its kvmclock is bound
2780  * by the delay we use to rate-limit the updates.
2781  */
2782 
2783 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
2784 
kvmclock_update_fn(struct work_struct * work)2785 static void kvmclock_update_fn(struct work_struct *work)
2786 {
2787 	int i;
2788 	struct delayed_work *dwork = to_delayed_work(work);
2789 	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
2790 					   kvmclock_update_work);
2791 	struct kvm *kvm = container_of(ka, struct kvm, arch);
2792 	struct kvm_vcpu *vcpu;
2793 
2794 	kvm_for_each_vcpu(i, vcpu, kvm) {
2795 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2796 		kvm_vcpu_kick(vcpu);
2797 	}
2798 }
2799 
kvm_gen_kvmclock_update(struct kvm_vcpu * v)2800 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
2801 {
2802 	struct kvm *kvm = v->kvm;
2803 
2804 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
2805 	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
2806 					KVMCLOCK_UPDATE_DELAY);
2807 }
2808 
2809 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
2810 
kvmclock_sync_fn(struct work_struct * work)2811 static void kvmclock_sync_fn(struct work_struct *work)
2812 {
2813 	struct delayed_work *dwork = to_delayed_work(work);
2814 	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
2815 					   kvmclock_sync_work);
2816 	struct kvm *kvm = container_of(ka, struct kvm, arch);
2817 
2818 	if (!kvmclock_periodic_sync)
2819 		return;
2820 
2821 	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
2822 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
2823 					KVMCLOCK_SYNC_PERIOD);
2824 }
2825 
2826 /*
2827  * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
2828  */
can_set_mci_status(struct kvm_vcpu * vcpu)2829 static bool can_set_mci_status(struct kvm_vcpu *vcpu)
2830 {
2831 	/* McStatusWrEn enabled? */
2832 	if (guest_cpuid_is_amd_or_hygon(vcpu))
2833 		return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
2834 
2835 	return false;
2836 }
2837 
set_msr_mce(struct kvm_vcpu * vcpu,struct msr_data * msr_info)2838 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2839 {
2840 	u64 mcg_cap = vcpu->arch.mcg_cap;
2841 	unsigned bank_num = mcg_cap & 0xff;
2842 	u32 msr = msr_info->index;
2843 	u64 data = msr_info->data;
2844 
2845 	switch (msr) {
2846 	case MSR_IA32_MCG_STATUS:
2847 		vcpu->arch.mcg_status = data;
2848 		break;
2849 	case MSR_IA32_MCG_CTL:
2850 		if (!(mcg_cap & MCG_CTL_P) &&
2851 		    (data || !msr_info->host_initiated))
2852 			return 1;
2853 		if (data != 0 && data != ~(u64)0)
2854 			return 1;
2855 		vcpu->arch.mcg_ctl = data;
2856 		break;
2857 	default:
2858 		if (msr >= MSR_IA32_MC0_CTL &&
2859 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
2860 			u32 offset = array_index_nospec(
2861 				msr - MSR_IA32_MC0_CTL,
2862 				MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
2863 
2864 			/* only 0 or all 1s can be written to IA32_MCi_CTL
2865 			 * some Linux kernels though clear bit 10 in bank 4 to
2866 			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
2867 			 * this to avoid an uncatched #GP in the guest
2868 			 */
2869 			if ((offset & 0x3) == 0 &&
2870 			    data != 0 && (data | (1 << 10)) != ~(u64)0)
2871 				return -1;
2872 
2873 			/* MCi_STATUS */
2874 			if (!msr_info->host_initiated &&
2875 			    (offset & 0x3) == 1 && data != 0) {
2876 				if (!can_set_mci_status(vcpu))
2877 					return -1;
2878 			}
2879 
2880 			vcpu->arch.mce_banks[offset] = data;
2881 			break;
2882 		}
2883 		return 1;
2884 	}
2885 	return 0;
2886 }
2887 
xen_hvm_config(struct kvm_vcpu * vcpu,u64 data)2888 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
2889 {
2890 	struct kvm *kvm = vcpu->kvm;
2891 	int lm = is_long_mode(vcpu);
2892 	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
2893 		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
2894 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
2895 		: kvm->arch.xen_hvm_config.blob_size_32;
2896 	u32 page_num = data & ~PAGE_MASK;
2897 	u64 page_addr = data & PAGE_MASK;
2898 	u8 *page;
2899 
2900 	if (page_num >= blob_size)
2901 		return 1;
2902 
2903 	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
2904 	if (IS_ERR(page))
2905 		return PTR_ERR(page);
2906 
2907 	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
2908 		kfree(page);
2909 		return 1;
2910 	}
2911 	return 0;
2912 }
2913 
kvm_pv_async_pf_enabled(struct kvm_vcpu * vcpu)2914 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
2915 {
2916 	u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
2917 
2918 	return (vcpu->arch.apf.msr_en_val & mask) == mask;
2919 }
2920 
kvm_pv_enable_async_pf(struct kvm_vcpu * vcpu,u64 data)2921 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2922 {
2923 	gpa_t gpa = data & ~0x3f;
2924 
2925 	/* Bits 4:5 are reserved, Should be zero */
2926 	if (data & 0x30)
2927 		return 1;
2928 
2929 	if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
2930 	    (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
2931 		return 1;
2932 
2933 	if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
2934 	    (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
2935 		return 1;
2936 
2937 	if (!lapic_in_kernel(vcpu))
2938 		return data ? 1 : 0;
2939 
2940 	vcpu->arch.apf.msr_en_val = data;
2941 
2942 	if (!kvm_pv_async_pf_enabled(vcpu)) {
2943 		kvm_clear_async_pf_completion_queue(vcpu);
2944 		kvm_async_pf_hash_reset(vcpu);
2945 		return 0;
2946 	}
2947 
2948 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2949 					sizeof(u64)))
2950 		return 1;
2951 
2952 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2953 	vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
2954 
2955 	kvm_async_pf_wakeup_all(vcpu);
2956 
2957 	return 0;
2958 }
2959 
kvm_pv_enable_async_pf_int(struct kvm_vcpu * vcpu,u64 data)2960 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
2961 {
2962 	/* Bits 8-63 are reserved */
2963 	if (data >> 8)
2964 		return 1;
2965 
2966 	if (!lapic_in_kernel(vcpu))
2967 		return 1;
2968 
2969 	vcpu->arch.apf.msr_int_val = data;
2970 
2971 	vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
2972 
2973 	return 0;
2974 }
2975 
kvmclock_reset(struct kvm_vcpu * vcpu)2976 static void kvmclock_reset(struct kvm_vcpu *vcpu)
2977 {
2978 	vcpu->arch.pv_time_enabled = false;
2979 	vcpu->arch.time = 0;
2980 }
2981 
kvm_vcpu_flush_tlb_all(struct kvm_vcpu * vcpu)2982 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
2983 {
2984 	++vcpu->stat.tlb_flush;
2985 	kvm_x86_ops.tlb_flush_all(vcpu);
2986 }
2987 
kvm_vcpu_flush_tlb_guest(struct kvm_vcpu * vcpu)2988 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
2989 {
2990 	++vcpu->stat.tlb_flush;
2991 	kvm_x86_ops.tlb_flush_guest(vcpu);
2992 }
2993 
record_steal_time(struct kvm_vcpu * vcpu)2994 static void record_steal_time(struct kvm_vcpu *vcpu)
2995 {
2996 	struct kvm_host_map map;
2997 	struct kvm_steal_time *st;
2998 
2999 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3000 		return;
3001 
3002 	/* -EAGAIN is returned in atomic context so we can just return. */
3003 	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
3004 			&map, &vcpu->arch.st.cache, false))
3005 		return;
3006 
3007 	st = map.hva +
3008 		offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
3009 
3010 	/*
3011 	 * Doing a TLB flush here, on the guest's behalf, can avoid
3012 	 * expensive IPIs.
3013 	 */
3014 	if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3015 		trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3016 				       st->preempted & KVM_VCPU_FLUSH_TLB);
3017 		if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
3018 			kvm_vcpu_flush_tlb_guest(vcpu);
3019 	} else {
3020 		st->preempted = 0;
3021 	}
3022 
3023 	vcpu->arch.st.preempted = 0;
3024 
3025 	if (st->version & 1)
3026 		st->version += 1;  /* first time write, random junk */
3027 
3028 	st->version += 1;
3029 
3030 	smp_wmb();
3031 
3032 	st->steal += current->sched_info.run_delay -
3033 		vcpu->arch.st.last_steal;
3034 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
3035 
3036 	smp_wmb();
3037 
3038 	st->version += 1;
3039 
3040 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
3041 }
3042 
kvm_set_msr_common(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3043 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3044 {
3045 	bool pr = false;
3046 	u32 msr = msr_info->index;
3047 	u64 data = msr_info->data;
3048 
3049 	switch (msr) {
3050 	case MSR_AMD64_NB_CFG:
3051 	case MSR_IA32_UCODE_WRITE:
3052 	case MSR_VM_HSAVE_PA:
3053 	case MSR_AMD64_PATCH_LOADER:
3054 	case MSR_AMD64_BU_CFG2:
3055 	case MSR_AMD64_DC_CFG:
3056 	case MSR_F15H_EX_CFG:
3057 		break;
3058 
3059 	case MSR_IA32_UCODE_REV:
3060 		if (msr_info->host_initiated)
3061 			vcpu->arch.microcode_version = data;
3062 		break;
3063 	case MSR_IA32_ARCH_CAPABILITIES:
3064 		if (!msr_info->host_initiated)
3065 			return 1;
3066 		vcpu->arch.arch_capabilities = data;
3067 		break;
3068 	case MSR_IA32_PERF_CAPABILITIES: {
3069 		struct kvm_msr_entry msr_ent = {.index = msr, .data = 0};
3070 
3071 		if (!msr_info->host_initiated)
3072 			return 1;
3073 		if (kvm_get_msr_feature(&msr_ent))
3074 			return 1;
3075 		if (data & ~msr_ent.data)
3076 			return 1;
3077 
3078 		vcpu->arch.perf_capabilities = data;
3079 
3080 		return 0;
3081 		}
3082 	case MSR_EFER:
3083 		return set_efer(vcpu, msr_info);
3084 	case MSR_K7_HWCR:
3085 		data &= ~(u64)0x40;	/* ignore flush filter disable */
3086 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
3087 		data &= ~(u64)0x8;	/* ignore TLB cache disable */
3088 
3089 		/* Handle McStatusWrEn */
3090 		if (data == BIT_ULL(18)) {
3091 			vcpu->arch.msr_hwcr = data;
3092 		} else if (data != 0) {
3093 			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
3094 				    data);
3095 			return 1;
3096 		}
3097 		break;
3098 	case MSR_FAM10H_MMIO_CONF_BASE:
3099 		if (data != 0) {
3100 			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
3101 				    "0x%llx\n", data);
3102 			return 1;
3103 		}
3104 		break;
3105 	case MSR_IA32_DEBUGCTLMSR:
3106 		if (!data) {
3107 			/* We support the non-activated case already */
3108 			break;
3109 		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
3110 			/* Values other than LBR and BTF are vendor-specific,
3111 			   thus reserved and should throw a #GP */
3112 			return 1;
3113 		} else if (report_ignored_msrs)
3114 			vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
3115 				    __func__, data);
3116 		break;
3117 	case 0x200 ... 0x2ff:
3118 		return kvm_mtrr_set_msr(vcpu, msr, data);
3119 	case MSR_IA32_APICBASE:
3120 		return kvm_set_apic_base(vcpu, msr_info);
3121 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3122 		return kvm_x2apic_msr_write(vcpu, msr, data);
3123 	case MSR_IA32_TSCDEADLINE:
3124 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
3125 		break;
3126 	case MSR_IA32_TSC_ADJUST:
3127 		if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
3128 			if (!msr_info->host_initiated) {
3129 				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3130 				adjust_tsc_offset_guest(vcpu, adj);
3131 				/* Before back to guest, tsc_timestamp must be adjusted
3132 				 * as well, otherwise guest's percpu pvclock time could jump.
3133 				 */
3134 				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3135 			}
3136 			vcpu->arch.ia32_tsc_adjust_msr = data;
3137 		}
3138 		break;
3139 	case MSR_IA32_MISC_ENABLE:
3140 		if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
3141 		    ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
3142 			if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
3143 				return 1;
3144 			vcpu->arch.ia32_misc_enable_msr = data;
3145 			kvm_update_cpuid_runtime(vcpu);
3146 		} else {
3147 			vcpu->arch.ia32_misc_enable_msr = data;
3148 		}
3149 		break;
3150 	case MSR_IA32_SMBASE:
3151 		if (!msr_info->host_initiated)
3152 			return 1;
3153 		vcpu->arch.smbase = data;
3154 		break;
3155 	case MSR_IA32_POWER_CTL:
3156 		vcpu->arch.msr_ia32_power_ctl = data;
3157 		break;
3158 	case MSR_IA32_TSC:
3159 		if (msr_info->host_initiated) {
3160 			kvm_synchronize_tsc(vcpu, data);
3161 		} else {
3162 			u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3163 			adjust_tsc_offset_guest(vcpu, adj);
3164 			vcpu->arch.ia32_tsc_adjust_msr += adj;
3165 		}
3166 		break;
3167 	case MSR_IA32_XSS:
3168 		if (!msr_info->host_initiated &&
3169 		    !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3170 			return 1;
3171 		/*
3172 		 * KVM supports exposing PT to the guest, but does not support
3173 		 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
3174 		 * XSAVES/XRSTORS to save/restore PT MSRs.
3175 		 */
3176 		if (data & ~supported_xss)
3177 			return 1;
3178 		vcpu->arch.ia32_xss = data;
3179 		kvm_update_cpuid_runtime(vcpu);
3180 		break;
3181 	case MSR_SMI_COUNT:
3182 		if (!msr_info->host_initiated)
3183 			return 1;
3184 		vcpu->arch.smi_count = data;
3185 		break;
3186 	case MSR_KVM_WALL_CLOCK_NEW:
3187 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3188 			return 1;
3189 
3190 		kvm_write_wall_clock(vcpu->kvm, data);
3191 		break;
3192 	case MSR_KVM_WALL_CLOCK:
3193 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3194 			return 1;
3195 
3196 		kvm_write_wall_clock(vcpu->kvm, data);
3197 		break;
3198 	case MSR_KVM_SYSTEM_TIME_NEW:
3199 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3200 			return 1;
3201 
3202 		kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
3203 		break;
3204 	case MSR_KVM_SYSTEM_TIME:
3205 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3206 			return 1;
3207 
3208 		kvm_write_system_time(vcpu, data, true,  msr_info->host_initiated);
3209 		break;
3210 	case MSR_KVM_ASYNC_PF_EN:
3211 		if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
3212 			return 1;
3213 
3214 		if (kvm_pv_enable_async_pf(vcpu, data))
3215 			return 1;
3216 		break;
3217 	case MSR_KVM_ASYNC_PF_INT:
3218 		if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3219 			return 1;
3220 
3221 		if (kvm_pv_enable_async_pf_int(vcpu, data))
3222 			return 1;
3223 		break;
3224 	case MSR_KVM_ASYNC_PF_ACK:
3225 		if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3226 			return 1;
3227 		if (data & 0x1) {
3228 			vcpu->arch.apf.pageready_pending = false;
3229 			kvm_check_async_pf_completion(vcpu);
3230 		}
3231 		break;
3232 	case MSR_KVM_STEAL_TIME:
3233 		if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
3234 			return 1;
3235 
3236 		if (unlikely(!sched_info_on()))
3237 			return 1;
3238 
3239 		if (data & KVM_STEAL_RESERVED_MASK)
3240 			return 1;
3241 
3242 		vcpu->arch.st.msr_val = data;
3243 
3244 		if (!(data & KVM_MSR_ENABLED))
3245 			break;
3246 
3247 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3248 
3249 		break;
3250 	case MSR_KVM_PV_EOI_EN:
3251 		if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
3252 			return 1;
3253 
3254 		if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
3255 			return 1;
3256 		break;
3257 
3258 	case MSR_KVM_POLL_CONTROL:
3259 		if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
3260 			return 1;
3261 
3262 		/* only enable bit supported */
3263 		if (data & (-1ULL << 1))
3264 			return 1;
3265 
3266 		vcpu->arch.msr_kvm_poll_control = data;
3267 		break;
3268 
3269 	case MSR_IA32_MCG_CTL:
3270 	case MSR_IA32_MCG_STATUS:
3271 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3272 		return set_msr_mce(vcpu, msr_info);
3273 
3274 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3275 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3276 		pr = true;
3277 		fallthrough;
3278 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3279 	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3280 		if (kvm_pmu_is_valid_msr(vcpu, msr))
3281 			return kvm_pmu_set_msr(vcpu, msr_info);
3282 
3283 		if (pr || data != 0)
3284 			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
3285 				    "0x%x data 0x%llx\n", msr, data);
3286 		break;
3287 	case MSR_K7_CLK_CTL:
3288 		/*
3289 		 * Ignore all writes to this no longer documented MSR.
3290 		 * Writes are only relevant for old K7 processors,
3291 		 * all pre-dating SVM, but a recommended workaround from
3292 		 * AMD for these chips. It is possible to specify the
3293 		 * affected processor models on the command line, hence
3294 		 * the need to ignore the workaround.
3295 		 */
3296 		break;
3297 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
3298 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
3299 	case HV_X64_MSR_SYNDBG_OPTIONS:
3300 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3301 	case HV_X64_MSR_CRASH_CTL:
3302 	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
3303 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3304 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
3305 	case HV_X64_MSR_TSC_EMULATION_STATUS:
3306 		return kvm_hv_set_msr_common(vcpu, msr, data,
3307 					     msr_info->host_initiated);
3308 	case MSR_IA32_BBL_CR_CTL3:
3309 		/* Drop writes to this legacy MSR -- see rdmsr
3310 		 * counterpart for further detail.
3311 		 */
3312 		if (report_ignored_msrs)
3313 			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
3314 				msr, data);
3315 		break;
3316 	case MSR_AMD64_OSVW_ID_LENGTH:
3317 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3318 			return 1;
3319 		vcpu->arch.osvw.length = data;
3320 		break;
3321 	case MSR_AMD64_OSVW_STATUS:
3322 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3323 			return 1;
3324 		vcpu->arch.osvw.status = data;
3325 		break;
3326 	case MSR_PLATFORM_INFO:
3327 		if (!msr_info->host_initiated ||
3328 		    (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
3329 		     cpuid_fault_enabled(vcpu)))
3330 			return 1;
3331 		vcpu->arch.msr_platform_info = data;
3332 		break;
3333 	case MSR_MISC_FEATURES_ENABLES:
3334 		if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
3335 		    (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3336 		     !supports_cpuid_fault(vcpu)))
3337 			return 1;
3338 		vcpu->arch.msr_misc_features_enables = data;
3339 		break;
3340 	default:
3341 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
3342 			return xen_hvm_config(vcpu, data);
3343 		if (kvm_pmu_is_valid_msr(vcpu, msr))
3344 			return kvm_pmu_set_msr(vcpu, msr_info);
3345 		return KVM_MSR_RET_INVALID;
3346 	}
3347 	return 0;
3348 }
3349 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
3350 
get_msr_mce(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)3351 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
3352 {
3353 	u64 data;
3354 	u64 mcg_cap = vcpu->arch.mcg_cap;
3355 	unsigned bank_num = mcg_cap & 0xff;
3356 
3357 	switch (msr) {
3358 	case MSR_IA32_P5_MC_ADDR:
3359 	case MSR_IA32_P5_MC_TYPE:
3360 		data = 0;
3361 		break;
3362 	case MSR_IA32_MCG_CAP:
3363 		data = vcpu->arch.mcg_cap;
3364 		break;
3365 	case MSR_IA32_MCG_CTL:
3366 		if (!(mcg_cap & MCG_CTL_P) && !host)
3367 			return 1;
3368 		data = vcpu->arch.mcg_ctl;
3369 		break;
3370 	case MSR_IA32_MCG_STATUS:
3371 		data = vcpu->arch.mcg_status;
3372 		break;
3373 	default:
3374 		if (msr >= MSR_IA32_MC0_CTL &&
3375 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
3376 			u32 offset = array_index_nospec(
3377 				msr - MSR_IA32_MC0_CTL,
3378 				MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
3379 
3380 			data = vcpu->arch.mce_banks[offset];
3381 			break;
3382 		}
3383 		return 1;
3384 	}
3385 	*pdata = data;
3386 	return 0;
3387 }
3388 
kvm_get_msr_common(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3389 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3390 {
3391 	switch (msr_info->index) {
3392 	case MSR_IA32_PLATFORM_ID:
3393 	case MSR_IA32_EBL_CR_POWERON:
3394 	case MSR_IA32_DEBUGCTLMSR:
3395 	case MSR_IA32_LASTBRANCHFROMIP:
3396 	case MSR_IA32_LASTBRANCHTOIP:
3397 	case MSR_IA32_LASTINTFROMIP:
3398 	case MSR_IA32_LASTINTTOIP:
3399 	case MSR_K8_SYSCFG:
3400 	case MSR_K8_TSEG_ADDR:
3401 	case MSR_K8_TSEG_MASK:
3402 	case MSR_VM_HSAVE_PA:
3403 	case MSR_K8_INT_PENDING_MSG:
3404 	case MSR_AMD64_NB_CFG:
3405 	case MSR_FAM10H_MMIO_CONF_BASE:
3406 	case MSR_AMD64_BU_CFG2:
3407 	case MSR_IA32_PERF_CTL:
3408 	case MSR_AMD64_DC_CFG:
3409 	case MSR_F15H_EX_CFG:
3410 	/*
3411 	 * Intel Sandy Bridge CPUs must support the RAPL (running average power
3412 	 * limit) MSRs. Just return 0, as we do not want to expose the host
3413 	 * data here. Do not conditionalize this on CPUID, as KVM does not do
3414 	 * so for existing CPU-specific MSRs.
3415 	 */
3416 	case MSR_RAPL_POWER_UNIT:
3417 	case MSR_PP0_ENERGY_STATUS:	/* Power plane 0 (core) */
3418 	case MSR_PP1_ENERGY_STATUS:	/* Power plane 1 (graphics uncore) */
3419 	case MSR_PKG_ENERGY_STATUS:	/* Total package */
3420 	case MSR_DRAM_ENERGY_STATUS:	/* DRAM controller */
3421 		msr_info->data = 0;
3422 		break;
3423 	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
3424 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3425 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3426 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3427 	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3428 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3429 			return kvm_pmu_get_msr(vcpu, msr_info);
3430 		msr_info->data = 0;
3431 		break;
3432 	case MSR_IA32_UCODE_REV:
3433 		msr_info->data = vcpu->arch.microcode_version;
3434 		break;
3435 	case MSR_IA32_ARCH_CAPABILITIES:
3436 		if (!msr_info->host_initiated &&
3437 		    !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3438 			return 1;
3439 		msr_info->data = vcpu->arch.arch_capabilities;
3440 		break;
3441 	case MSR_IA32_PERF_CAPABILITIES:
3442 		if (!msr_info->host_initiated &&
3443 		    !guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
3444 			return 1;
3445 		msr_info->data = vcpu->arch.perf_capabilities;
3446 		break;
3447 	case MSR_IA32_POWER_CTL:
3448 		msr_info->data = vcpu->arch.msr_ia32_power_ctl;
3449 		break;
3450 	case MSR_IA32_TSC: {
3451 		/*
3452 		 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
3453 		 * even when not intercepted. AMD manual doesn't explicitly
3454 		 * state this but appears to behave the same.
3455 		 *
3456 		 * On userspace reads and writes, however, we unconditionally
3457 		 * return L1's TSC value to ensure backwards-compatible
3458 		 * behavior for migration.
3459 		 */
3460 		u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
3461 							    vcpu->arch.tsc_offset;
3462 
3463 		msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
3464 		break;
3465 	}
3466 	case MSR_MTRRcap:
3467 	case 0x200 ... 0x2ff:
3468 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
3469 	case 0xcd: /* fsb frequency */
3470 		msr_info->data = 3;
3471 		break;
3472 		/*
3473 		 * MSR_EBC_FREQUENCY_ID
3474 		 * Conservative value valid for even the basic CPU models.
3475 		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
3476 		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
3477 		 * and 266MHz for model 3, or 4. Set Core Clock
3478 		 * Frequency to System Bus Frequency Ratio to 1 (bits
3479 		 * 31:24) even though these are only valid for CPU
3480 		 * models > 2, however guests may end up dividing or
3481 		 * multiplying by zero otherwise.
3482 		 */
3483 	case MSR_EBC_FREQUENCY_ID:
3484 		msr_info->data = 1 << 24;
3485 		break;
3486 	case MSR_IA32_APICBASE:
3487 		msr_info->data = kvm_get_apic_base(vcpu);
3488 		break;
3489 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3490 		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
3491 	case MSR_IA32_TSCDEADLINE:
3492 		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
3493 		break;
3494 	case MSR_IA32_TSC_ADJUST:
3495 		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
3496 		break;
3497 	case MSR_IA32_MISC_ENABLE:
3498 		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
3499 		break;
3500 	case MSR_IA32_SMBASE:
3501 		if (!msr_info->host_initiated)
3502 			return 1;
3503 		msr_info->data = vcpu->arch.smbase;
3504 		break;
3505 	case MSR_SMI_COUNT:
3506 		msr_info->data = vcpu->arch.smi_count;
3507 		break;
3508 	case MSR_IA32_PERF_STATUS:
3509 		/* TSC increment by tick */
3510 		msr_info->data = 1000ULL;
3511 		/* CPU multiplier */
3512 		msr_info->data |= (((uint64_t)4ULL) << 40);
3513 		break;
3514 	case MSR_EFER:
3515 		msr_info->data = vcpu->arch.efer;
3516 		break;
3517 	case MSR_KVM_WALL_CLOCK:
3518 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3519 			return 1;
3520 
3521 		msr_info->data = vcpu->kvm->arch.wall_clock;
3522 		break;
3523 	case MSR_KVM_WALL_CLOCK_NEW:
3524 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3525 			return 1;
3526 
3527 		msr_info->data = vcpu->kvm->arch.wall_clock;
3528 		break;
3529 	case MSR_KVM_SYSTEM_TIME:
3530 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3531 			return 1;
3532 
3533 		msr_info->data = vcpu->arch.time;
3534 		break;
3535 	case MSR_KVM_SYSTEM_TIME_NEW:
3536 		if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3537 			return 1;
3538 
3539 		msr_info->data = vcpu->arch.time;
3540 		break;
3541 	case MSR_KVM_ASYNC_PF_EN:
3542 		if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
3543 			return 1;
3544 
3545 		msr_info->data = vcpu->arch.apf.msr_en_val;
3546 		break;
3547 	case MSR_KVM_ASYNC_PF_INT:
3548 		if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3549 			return 1;
3550 
3551 		msr_info->data = vcpu->arch.apf.msr_int_val;
3552 		break;
3553 	case MSR_KVM_ASYNC_PF_ACK:
3554 		if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3555 			return 1;
3556 
3557 		msr_info->data = 0;
3558 		break;
3559 	case MSR_KVM_STEAL_TIME:
3560 		if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
3561 			return 1;
3562 
3563 		msr_info->data = vcpu->arch.st.msr_val;
3564 		break;
3565 	case MSR_KVM_PV_EOI_EN:
3566 		if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
3567 			return 1;
3568 
3569 		msr_info->data = vcpu->arch.pv_eoi.msr_val;
3570 		break;
3571 	case MSR_KVM_POLL_CONTROL:
3572 		if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
3573 			return 1;
3574 
3575 		msr_info->data = vcpu->arch.msr_kvm_poll_control;
3576 		break;
3577 	case MSR_IA32_P5_MC_ADDR:
3578 	case MSR_IA32_P5_MC_TYPE:
3579 	case MSR_IA32_MCG_CAP:
3580 	case MSR_IA32_MCG_CTL:
3581 	case MSR_IA32_MCG_STATUS:
3582 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3583 		return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
3584 				   msr_info->host_initiated);
3585 	case MSR_IA32_XSS:
3586 		if (!msr_info->host_initiated &&
3587 		    !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3588 			return 1;
3589 		msr_info->data = vcpu->arch.ia32_xss;
3590 		break;
3591 	case MSR_K7_CLK_CTL:
3592 		/*
3593 		 * Provide expected ramp-up count for K7. All other
3594 		 * are set to zero, indicating minimum divisors for
3595 		 * every field.
3596 		 *
3597 		 * This prevents guest kernels on AMD host with CPU
3598 		 * type 6, model 8 and higher from exploding due to
3599 		 * the rdmsr failing.
3600 		 */
3601 		msr_info->data = 0x20000000;
3602 		break;
3603 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
3604 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
3605 	case HV_X64_MSR_SYNDBG_OPTIONS:
3606 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3607 	case HV_X64_MSR_CRASH_CTL:
3608 	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
3609 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3610 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
3611 	case HV_X64_MSR_TSC_EMULATION_STATUS:
3612 		return kvm_hv_get_msr_common(vcpu,
3613 					     msr_info->index, &msr_info->data,
3614 					     msr_info->host_initiated);
3615 	case MSR_IA32_BBL_CR_CTL3:
3616 		/* This legacy MSR exists but isn't fully documented in current
3617 		 * silicon.  It is however accessed by winxp in very narrow
3618 		 * scenarios where it sets bit #19, itself documented as
3619 		 * a "reserved" bit.  Best effort attempt to source coherent
3620 		 * read data here should the balance of the register be
3621 		 * interpreted by the guest:
3622 		 *
3623 		 * L2 cache control register 3: 64GB range, 256KB size,
3624 		 * enabled, latency 0x1, configured
3625 		 */
3626 		msr_info->data = 0xbe702111;
3627 		break;
3628 	case MSR_AMD64_OSVW_ID_LENGTH:
3629 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3630 			return 1;
3631 		msr_info->data = vcpu->arch.osvw.length;
3632 		break;
3633 	case MSR_AMD64_OSVW_STATUS:
3634 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3635 			return 1;
3636 		msr_info->data = vcpu->arch.osvw.status;
3637 		break;
3638 	case MSR_PLATFORM_INFO:
3639 		if (!msr_info->host_initiated &&
3640 		    !vcpu->kvm->arch.guest_can_read_msr_platform_info)
3641 			return 1;
3642 		msr_info->data = vcpu->arch.msr_platform_info;
3643 		break;
3644 	case MSR_MISC_FEATURES_ENABLES:
3645 		msr_info->data = vcpu->arch.msr_misc_features_enables;
3646 		break;
3647 	case MSR_K7_HWCR:
3648 		msr_info->data = vcpu->arch.msr_hwcr;
3649 		break;
3650 	default:
3651 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3652 			return kvm_pmu_get_msr(vcpu, msr_info);
3653 		return KVM_MSR_RET_INVALID;
3654 	}
3655 	return 0;
3656 }
3657 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
3658 
3659 /*
3660  * Read or write a bunch of msrs. All parameters are kernel addresses.
3661  *
3662  * @return number of msrs set successfully.
3663  */
__msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs,struct kvm_msr_entry * entries,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data))3664 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
3665 		    struct kvm_msr_entry *entries,
3666 		    int (*do_msr)(struct kvm_vcpu *vcpu,
3667 				  unsigned index, u64 *data))
3668 {
3669 	int i;
3670 
3671 	for (i = 0; i < msrs->nmsrs; ++i)
3672 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
3673 			break;
3674 
3675 	return i;
3676 }
3677 
3678 /*
3679  * Read or write a bunch of msrs. Parameters are user addresses.
3680  *
3681  * @return number of msrs set successfully.
3682  */
msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs __user * user_msrs,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data),int writeback)3683 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
3684 		  int (*do_msr)(struct kvm_vcpu *vcpu,
3685 				unsigned index, u64 *data),
3686 		  int writeback)
3687 {
3688 	struct kvm_msrs msrs;
3689 	struct kvm_msr_entry *entries;
3690 	int r, n;
3691 	unsigned size;
3692 
3693 	r = -EFAULT;
3694 	if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
3695 		goto out;
3696 
3697 	r = -E2BIG;
3698 	if (msrs.nmsrs >= MAX_IO_MSRS)
3699 		goto out;
3700 
3701 	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
3702 	entries = memdup_user(user_msrs->entries, size);
3703 	if (IS_ERR(entries)) {
3704 		r = PTR_ERR(entries);
3705 		goto out;
3706 	}
3707 
3708 	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
3709 	if (r < 0)
3710 		goto out_free;
3711 
3712 	r = -EFAULT;
3713 	if (writeback && copy_to_user(user_msrs->entries, entries, size))
3714 		goto out_free;
3715 
3716 	r = n;
3717 
3718 out_free:
3719 	kfree(entries);
3720 out:
3721 	return r;
3722 }
3723 
kvm_can_mwait_in_guest(void)3724 static inline bool kvm_can_mwait_in_guest(void)
3725 {
3726 	return boot_cpu_has(X86_FEATURE_MWAIT) &&
3727 		!boot_cpu_has_bug(X86_BUG_MONITOR) &&
3728 		boot_cpu_has(X86_FEATURE_ARAT);
3729 }
3730 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)3731 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
3732 {
3733 	int r = 0;
3734 
3735 	switch (ext) {
3736 	case KVM_CAP_IRQCHIP:
3737 	case KVM_CAP_HLT:
3738 	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
3739 	case KVM_CAP_SET_TSS_ADDR:
3740 	case KVM_CAP_EXT_CPUID:
3741 	case KVM_CAP_EXT_EMUL_CPUID:
3742 	case KVM_CAP_CLOCKSOURCE:
3743 	case KVM_CAP_PIT:
3744 	case KVM_CAP_NOP_IO_DELAY:
3745 	case KVM_CAP_MP_STATE:
3746 	case KVM_CAP_SYNC_MMU:
3747 	case KVM_CAP_USER_NMI:
3748 	case KVM_CAP_REINJECT_CONTROL:
3749 	case KVM_CAP_IRQ_INJECT_STATUS:
3750 	case KVM_CAP_IOEVENTFD:
3751 	case KVM_CAP_IOEVENTFD_NO_LENGTH:
3752 	case KVM_CAP_PIT2:
3753 	case KVM_CAP_PIT_STATE2:
3754 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
3755 	case KVM_CAP_XEN_HVM:
3756 	case KVM_CAP_VCPU_EVENTS:
3757 	case KVM_CAP_HYPERV:
3758 	case KVM_CAP_HYPERV_VAPIC:
3759 	case KVM_CAP_HYPERV_SPIN:
3760 	case KVM_CAP_HYPERV_SYNIC:
3761 	case KVM_CAP_HYPERV_SYNIC2:
3762 	case KVM_CAP_HYPERV_VP_INDEX:
3763 	case KVM_CAP_HYPERV_EVENTFD:
3764 	case KVM_CAP_HYPERV_TLBFLUSH:
3765 	case KVM_CAP_HYPERV_SEND_IPI:
3766 	case KVM_CAP_HYPERV_CPUID:
3767 	case KVM_CAP_PCI_SEGMENT:
3768 	case KVM_CAP_DEBUGREGS:
3769 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
3770 	case KVM_CAP_XSAVE:
3771 	case KVM_CAP_ASYNC_PF:
3772 	case KVM_CAP_ASYNC_PF_INT:
3773 	case KVM_CAP_GET_TSC_KHZ:
3774 	case KVM_CAP_KVMCLOCK_CTRL:
3775 	case KVM_CAP_READONLY_MEM:
3776 	case KVM_CAP_HYPERV_TIME:
3777 	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
3778 	case KVM_CAP_TSC_DEADLINE_TIMER:
3779 	case KVM_CAP_DISABLE_QUIRKS:
3780 	case KVM_CAP_SET_BOOT_CPU_ID:
3781  	case KVM_CAP_SPLIT_IRQCHIP:
3782 	case KVM_CAP_IMMEDIATE_EXIT:
3783 	case KVM_CAP_PMU_EVENT_FILTER:
3784 	case KVM_CAP_GET_MSR_FEATURES:
3785 	case KVM_CAP_MSR_PLATFORM_INFO:
3786 	case KVM_CAP_EXCEPTION_PAYLOAD:
3787 	case KVM_CAP_SET_GUEST_DEBUG:
3788 	case KVM_CAP_LAST_CPU:
3789 	case KVM_CAP_X86_USER_SPACE_MSR:
3790 	case KVM_CAP_X86_MSR_FILTER:
3791 	case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
3792 		r = 1;
3793 		break;
3794 	case KVM_CAP_SYNC_REGS:
3795 		r = KVM_SYNC_X86_VALID_FIELDS;
3796 		break;
3797 	case KVM_CAP_ADJUST_CLOCK:
3798 		r = KVM_CLOCK_TSC_STABLE;
3799 		break;
3800 	case KVM_CAP_X86_DISABLE_EXITS:
3801 		r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
3802 		      KVM_X86_DISABLE_EXITS_CSTATE;
3803 		if(kvm_can_mwait_in_guest())
3804 			r |= KVM_X86_DISABLE_EXITS_MWAIT;
3805 		break;
3806 	case KVM_CAP_X86_SMM:
3807 		/* SMBASE is usually relocated above 1M on modern chipsets,
3808 		 * and SMM handlers might indeed rely on 4G segment limits,
3809 		 * so do not report SMM to be available if real mode is
3810 		 * emulated via vm86 mode.  Still, do not go to great lengths
3811 		 * to avoid userspace's usage of the feature, because it is a
3812 		 * fringe case that is not enabled except via specific settings
3813 		 * of the module parameters.
3814 		 */
3815 		r = kvm_x86_ops.has_emulated_msr(MSR_IA32_SMBASE);
3816 		break;
3817 	case KVM_CAP_VAPIC:
3818 		r = !kvm_x86_ops.cpu_has_accelerated_tpr();
3819 		break;
3820 	case KVM_CAP_NR_VCPUS:
3821 		r = KVM_SOFT_MAX_VCPUS;
3822 		break;
3823 	case KVM_CAP_MAX_VCPUS:
3824 		r = KVM_MAX_VCPUS;
3825 		break;
3826 	case KVM_CAP_MAX_VCPU_ID:
3827 		r = KVM_MAX_VCPU_ID;
3828 		break;
3829 	case KVM_CAP_PV_MMU:	/* obsolete */
3830 		r = 0;
3831 		break;
3832 	case KVM_CAP_MCE:
3833 		r = KVM_MAX_MCE_BANKS;
3834 		break;
3835 	case KVM_CAP_XCRS:
3836 		r = boot_cpu_has(X86_FEATURE_XSAVE);
3837 		break;
3838 	case KVM_CAP_TSC_CONTROL:
3839 		r = kvm_has_tsc_control;
3840 		break;
3841 	case KVM_CAP_X2APIC_API:
3842 		r = KVM_X2APIC_API_VALID_FLAGS;
3843 		break;
3844 	case KVM_CAP_NESTED_STATE:
3845 		r = kvm_x86_ops.nested_ops->get_state ?
3846 			kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
3847 		break;
3848 	case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
3849 		r = kvm_x86_ops.enable_direct_tlbflush != NULL;
3850 		break;
3851 	case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
3852 		r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
3853 		break;
3854 	case KVM_CAP_SMALLER_MAXPHYADDR:
3855 		r = (int) allow_smaller_maxphyaddr;
3856 		break;
3857 	case KVM_CAP_STEAL_TIME:
3858 		r = sched_info_on();
3859 		break;
3860 	default:
3861 		break;
3862 	}
3863 	return r;
3864 
3865 }
3866 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3867 long kvm_arch_dev_ioctl(struct file *filp,
3868 			unsigned int ioctl, unsigned long arg)
3869 {
3870 	void __user *argp = (void __user *)arg;
3871 	long r;
3872 
3873 	switch (ioctl) {
3874 	case KVM_GET_MSR_INDEX_LIST: {
3875 		struct kvm_msr_list __user *user_msr_list = argp;
3876 		struct kvm_msr_list msr_list;
3877 		unsigned n;
3878 
3879 		r = -EFAULT;
3880 		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
3881 			goto out;
3882 		n = msr_list.nmsrs;
3883 		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
3884 		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
3885 			goto out;
3886 		r = -E2BIG;
3887 		if (n < msr_list.nmsrs)
3888 			goto out;
3889 		r = -EFAULT;
3890 		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
3891 				 num_msrs_to_save * sizeof(u32)))
3892 			goto out;
3893 		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
3894 				 &emulated_msrs,
3895 				 num_emulated_msrs * sizeof(u32)))
3896 			goto out;
3897 		r = 0;
3898 		break;
3899 	}
3900 	case KVM_GET_SUPPORTED_CPUID:
3901 	case KVM_GET_EMULATED_CPUID: {
3902 		struct kvm_cpuid2 __user *cpuid_arg = argp;
3903 		struct kvm_cpuid2 cpuid;
3904 
3905 		r = -EFAULT;
3906 		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
3907 			goto out;
3908 
3909 		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
3910 					    ioctl);
3911 		if (r)
3912 			goto out;
3913 
3914 		r = -EFAULT;
3915 		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
3916 			goto out;
3917 		r = 0;
3918 		break;
3919 	}
3920 	case KVM_X86_GET_MCE_CAP_SUPPORTED:
3921 		r = -EFAULT;
3922 		if (copy_to_user(argp, &kvm_mce_cap_supported,
3923 				 sizeof(kvm_mce_cap_supported)))
3924 			goto out;
3925 		r = 0;
3926 		break;
3927 	case KVM_GET_MSR_FEATURE_INDEX_LIST: {
3928 		struct kvm_msr_list __user *user_msr_list = argp;
3929 		struct kvm_msr_list msr_list;
3930 		unsigned int n;
3931 
3932 		r = -EFAULT;
3933 		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
3934 			goto out;
3935 		n = msr_list.nmsrs;
3936 		msr_list.nmsrs = num_msr_based_features;
3937 		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
3938 			goto out;
3939 		r = -E2BIG;
3940 		if (n < msr_list.nmsrs)
3941 			goto out;
3942 		r = -EFAULT;
3943 		if (copy_to_user(user_msr_list->indices, &msr_based_features,
3944 				 num_msr_based_features * sizeof(u32)))
3945 			goto out;
3946 		r = 0;
3947 		break;
3948 	}
3949 	case KVM_GET_MSRS:
3950 		r = msr_io(NULL, argp, do_get_msr_feature, 1);
3951 		break;
3952 	default:
3953 		r = -EINVAL;
3954 		break;
3955 	}
3956 out:
3957 	return r;
3958 }
3959 
wbinvd_ipi(void * garbage)3960 static void wbinvd_ipi(void *garbage)
3961 {
3962 	wbinvd();
3963 }
3964 
need_emulate_wbinvd(struct kvm_vcpu * vcpu)3965 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
3966 {
3967 	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
3968 }
3969 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)3970 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3971 {
3972 	/* Address WBINVD may be executed by guest */
3973 	if (need_emulate_wbinvd(vcpu)) {
3974 		if (kvm_x86_ops.has_wbinvd_exit())
3975 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
3976 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
3977 			smp_call_function_single(vcpu->cpu,
3978 					wbinvd_ipi, NULL, 1);
3979 	}
3980 
3981 	kvm_x86_ops.vcpu_load(vcpu, cpu);
3982 
3983 	/* Save host pkru register if supported */
3984 	vcpu->arch.host_pkru = read_pkru();
3985 
3986 	/* Apply any externally detected TSC adjustments (due to suspend) */
3987 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
3988 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
3989 		vcpu->arch.tsc_offset_adjustment = 0;
3990 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3991 	}
3992 
3993 	if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
3994 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
3995 				rdtsc() - vcpu->arch.last_host_tsc;
3996 		if (tsc_delta < 0)
3997 			mark_tsc_unstable("KVM discovered backwards TSC");
3998 
3999 		if (kvm_check_tsc_unstable()) {
4000 			u64 offset = kvm_compute_tsc_offset(vcpu,
4001 						vcpu->arch.last_guest_tsc);
4002 			kvm_vcpu_write_tsc_offset(vcpu, offset);
4003 			vcpu->arch.tsc_catchup = 1;
4004 		}
4005 
4006 		if (kvm_lapic_hv_timer_in_use(vcpu))
4007 			kvm_lapic_restart_hv_timer(vcpu);
4008 
4009 		/*
4010 		 * On a host with synchronized TSC, there is no need to update
4011 		 * kvmclock on vcpu->cpu migration
4012 		 */
4013 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
4014 			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
4015 		if (vcpu->cpu != cpu)
4016 			kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
4017 		vcpu->cpu = cpu;
4018 	}
4019 
4020 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4021 }
4022 
kvm_steal_time_set_preempted(struct kvm_vcpu * vcpu)4023 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
4024 {
4025 	struct kvm_host_map map;
4026 	struct kvm_steal_time *st;
4027 
4028 	/*
4029 	 * The vCPU can be marked preempted if and only if the VM-Exit was on
4030 	 * an instruction boundary and will not trigger guest emulation of any
4031 	 * kind (see vcpu_run).  Vendor specific code controls (conservatively)
4032 	 * when this is true, for example allowing the vCPU to be marked
4033 	 * preempted if and only if the VM-Exit was due to a host interrupt.
4034 	 */
4035 	if (!vcpu->arch.at_instruction_boundary) {
4036 		vcpu->stat.preemption_other++;
4037 		return;
4038 	}
4039 
4040 	vcpu->stat.preemption_reported++;
4041 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
4042 		return;
4043 
4044 	if (vcpu->arch.st.preempted)
4045 		return;
4046 
4047 	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
4048 			&vcpu->arch.st.cache, true))
4049 		return;
4050 
4051 	st = map.hva +
4052 		offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
4053 
4054 	st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
4055 
4056 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
4057 }
4058 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)4059 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
4060 {
4061 	int idx;
4062 
4063 	if (vcpu->preempted)
4064 		vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
4065 
4066 	/*
4067 	 * Disable page faults because we're in atomic context here.
4068 	 * kvm_write_guest_offset_cached() would call might_fault()
4069 	 * that relies on pagefault_disable() to tell if there's a
4070 	 * bug. NOTE: the write to guest memory may not go through if
4071 	 * during postcopy live migration or if there's heavy guest
4072 	 * paging.
4073 	 */
4074 	pagefault_disable();
4075 	/*
4076 	 * kvm_memslots() will be called by
4077 	 * kvm_write_guest_offset_cached() so take the srcu lock.
4078 	 */
4079 	idx = srcu_read_lock(&vcpu->kvm->srcu);
4080 	kvm_steal_time_set_preempted(vcpu);
4081 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
4082 	pagefault_enable();
4083 	kvm_x86_ops.vcpu_put(vcpu);
4084 	vcpu->arch.last_host_tsc = rdtsc();
4085 	/*
4086 	 * If userspace has set any breakpoints or watchpoints, dr6 is restored
4087 	 * on every vmexit, but if not, we might have a stale dr6 from the
4088 	 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
4089 	 */
4090 	set_debugreg(0, 6);
4091 }
4092 
kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)4093 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
4094 				    struct kvm_lapic_state *s)
4095 {
4096 	if (vcpu->arch.apicv_active)
4097 		kvm_x86_ops.sync_pir_to_irr(vcpu);
4098 
4099 	return kvm_apic_get_state(vcpu, s);
4100 }
4101 
kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)4102 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
4103 				    struct kvm_lapic_state *s)
4104 {
4105 	int r;
4106 
4107 	r = kvm_apic_set_state(vcpu, s);
4108 	if (r)
4109 		return r;
4110 	update_cr8_intercept(vcpu);
4111 
4112 	return 0;
4113 }
4114 
kvm_cpu_accept_dm_intr(struct kvm_vcpu * vcpu)4115 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
4116 {
4117 	/*
4118 	 * We can accept userspace's request for interrupt injection
4119 	 * as long as we have a place to store the interrupt number.
4120 	 * The actual injection will happen when the CPU is able to
4121 	 * deliver the interrupt.
4122 	 */
4123 	if (kvm_cpu_has_extint(vcpu))
4124 		return false;
4125 
4126 	/* Acknowledging ExtINT does not happen if LINT0 is masked.  */
4127 	return (!lapic_in_kernel(vcpu) ||
4128 		kvm_apic_accept_pic_intr(vcpu));
4129 }
4130 
kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu * vcpu)4131 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
4132 {
4133 	/*
4134 	 * Do not cause an interrupt window exit if an exception
4135 	 * is pending or an event needs reinjection; userspace
4136 	 * might want to inject the interrupt manually using KVM_SET_REGS
4137 	 * or KVM_SET_SREGS.  For that to work, we must be at an
4138 	 * instruction boundary and with no events half-injected.
4139 	 */
4140 	return (kvm_arch_interrupt_allowed(vcpu) &&
4141 		kvm_cpu_accept_dm_intr(vcpu) &&
4142 		!kvm_event_needs_reinjection(vcpu) &&
4143 		!vcpu->arch.exception.pending);
4144 }
4145 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)4146 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
4147 				    struct kvm_interrupt *irq)
4148 {
4149 	if (irq->irq >= KVM_NR_INTERRUPTS)
4150 		return -EINVAL;
4151 
4152 	if (!irqchip_in_kernel(vcpu->kvm)) {
4153 		kvm_queue_interrupt(vcpu, irq->irq, false);
4154 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4155 		return 0;
4156 	}
4157 
4158 	/*
4159 	 * With in-kernel LAPIC, we only use this to inject EXTINT, so
4160 	 * fail for in-kernel 8259.
4161 	 */
4162 	if (pic_in_kernel(vcpu->kvm))
4163 		return -ENXIO;
4164 
4165 	if (vcpu->arch.pending_external_vector != -1)
4166 		return -EEXIST;
4167 
4168 	vcpu->arch.pending_external_vector = irq->irq;
4169 	kvm_make_request(KVM_REQ_EVENT, vcpu);
4170 	return 0;
4171 }
4172 
kvm_vcpu_ioctl_nmi(struct kvm_vcpu * vcpu)4173 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
4174 {
4175 	kvm_inject_nmi(vcpu);
4176 
4177 	return 0;
4178 }
4179 
kvm_vcpu_ioctl_smi(struct kvm_vcpu * vcpu)4180 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
4181 {
4182 	kvm_make_request(KVM_REQ_SMI, vcpu);
4183 
4184 	return 0;
4185 }
4186 
vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu * vcpu,struct kvm_tpr_access_ctl * tac)4187 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
4188 					   struct kvm_tpr_access_ctl *tac)
4189 {
4190 	if (tac->flags)
4191 		return -EINVAL;
4192 	vcpu->arch.tpr_access_reporting = !!tac->enabled;
4193 	return 0;
4194 }
4195 
kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu * vcpu,u64 mcg_cap)4196 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
4197 					u64 mcg_cap)
4198 {
4199 	int r;
4200 	unsigned bank_num = mcg_cap & 0xff, bank;
4201 
4202 	r = -EINVAL;
4203 	if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
4204 		goto out;
4205 	if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
4206 		goto out;
4207 	r = 0;
4208 	vcpu->arch.mcg_cap = mcg_cap;
4209 	/* Init IA32_MCG_CTL to all 1s */
4210 	if (mcg_cap & MCG_CTL_P)
4211 		vcpu->arch.mcg_ctl = ~(u64)0;
4212 	/* Init IA32_MCi_CTL to all 1s */
4213 	for (bank = 0; bank < bank_num; bank++)
4214 		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
4215 
4216 	kvm_x86_ops.setup_mce(vcpu);
4217 out:
4218 	return r;
4219 }
4220 
kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu * vcpu,struct kvm_x86_mce * mce)4221 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
4222 				      struct kvm_x86_mce *mce)
4223 {
4224 	u64 mcg_cap = vcpu->arch.mcg_cap;
4225 	unsigned bank_num = mcg_cap & 0xff;
4226 	u64 *banks = vcpu->arch.mce_banks;
4227 
4228 	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
4229 		return -EINVAL;
4230 	/*
4231 	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
4232 	 * reporting is disabled
4233 	 */
4234 	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
4235 	    vcpu->arch.mcg_ctl != ~(u64)0)
4236 		return 0;
4237 	banks += 4 * mce->bank;
4238 	/*
4239 	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
4240 	 * reporting is disabled for the bank
4241 	 */
4242 	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
4243 		return 0;
4244 	if (mce->status & MCI_STATUS_UC) {
4245 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
4246 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
4247 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4248 			return 0;
4249 		}
4250 		if (banks[1] & MCI_STATUS_VAL)
4251 			mce->status |= MCI_STATUS_OVER;
4252 		banks[2] = mce->addr;
4253 		banks[3] = mce->misc;
4254 		vcpu->arch.mcg_status = mce->mcg_status;
4255 		banks[1] = mce->status;
4256 		kvm_queue_exception(vcpu, MC_VECTOR);
4257 	} else if (!(banks[1] & MCI_STATUS_VAL)
4258 		   || !(banks[1] & MCI_STATUS_UC)) {
4259 		if (banks[1] & MCI_STATUS_VAL)
4260 			mce->status |= MCI_STATUS_OVER;
4261 		banks[2] = mce->addr;
4262 		banks[3] = mce->misc;
4263 		banks[1] = mce->status;
4264 	} else
4265 		banks[1] |= MCI_STATUS_OVER;
4266 	return 0;
4267 }
4268 
kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)4269 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
4270 					       struct kvm_vcpu_events *events)
4271 {
4272 	process_nmi(vcpu);
4273 
4274 	if (kvm_check_request(KVM_REQ_SMI, vcpu))
4275 		process_smi(vcpu);
4276 
4277 	/*
4278 	 * In guest mode, payload delivery should be deferred,
4279 	 * so that the L1 hypervisor can intercept #PF before
4280 	 * CR2 is modified (or intercept #DB before DR6 is
4281 	 * modified under nVMX). Unless the per-VM capability,
4282 	 * KVM_CAP_EXCEPTION_PAYLOAD, is set, we may not defer the delivery of
4283 	 * an exception payload and handle after a KVM_GET_VCPU_EVENTS. Since we
4284 	 * opportunistically defer the exception payload, deliver it if the
4285 	 * capability hasn't been requested before processing a
4286 	 * KVM_GET_VCPU_EVENTS.
4287 	 */
4288 	if (!vcpu->kvm->arch.exception_payload_enabled &&
4289 	    vcpu->arch.exception.pending && vcpu->arch.exception.has_payload)
4290 		kvm_deliver_exception_payload(vcpu);
4291 
4292 	/*
4293 	 * The API doesn't provide the instruction length for software
4294 	 * exceptions, so don't report them. As long as the guest RIP
4295 	 * isn't advanced, we should expect to encounter the exception
4296 	 * again.
4297 	 */
4298 	if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
4299 		events->exception.injected = 0;
4300 		events->exception.pending = 0;
4301 	} else {
4302 		events->exception.injected = vcpu->arch.exception.injected;
4303 		events->exception.pending = vcpu->arch.exception.pending;
4304 		/*
4305 		 * For ABI compatibility, deliberately conflate
4306 		 * pending and injected exceptions when
4307 		 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
4308 		 */
4309 		if (!vcpu->kvm->arch.exception_payload_enabled)
4310 			events->exception.injected |=
4311 				vcpu->arch.exception.pending;
4312 	}
4313 	events->exception.nr = vcpu->arch.exception.nr;
4314 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
4315 	events->exception.error_code = vcpu->arch.exception.error_code;
4316 	events->exception_has_payload = vcpu->arch.exception.has_payload;
4317 	events->exception_payload = vcpu->arch.exception.payload;
4318 
4319 	events->interrupt.injected =
4320 		vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
4321 	events->interrupt.nr = vcpu->arch.interrupt.nr;
4322 	events->interrupt.soft = 0;
4323 	events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
4324 
4325 	events->nmi.injected = vcpu->arch.nmi_injected;
4326 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
4327 	events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu);
4328 	events->nmi.pad = 0;
4329 
4330 	events->sipi_vector = 0; /* never valid when reporting to user space */
4331 
4332 	events->smi.smm = is_smm(vcpu);
4333 	events->smi.pending = vcpu->arch.smi_pending;
4334 	events->smi.smm_inside_nmi =
4335 		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
4336 	events->smi.latched_init = kvm_lapic_latched_init(vcpu);
4337 
4338 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
4339 			 | KVM_VCPUEVENT_VALID_SHADOW
4340 			 | KVM_VCPUEVENT_VALID_SMM);
4341 	if (vcpu->kvm->arch.exception_payload_enabled)
4342 		events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
4343 
4344 	memset(&events->reserved, 0, sizeof(events->reserved));
4345 }
4346 
4347 static void kvm_smm_changed(struct kvm_vcpu *vcpu);
4348 
kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)4349 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
4350 					      struct kvm_vcpu_events *events)
4351 {
4352 	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
4353 			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
4354 			      | KVM_VCPUEVENT_VALID_SHADOW
4355 			      | KVM_VCPUEVENT_VALID_SMM
4356 			      | KVM_VCPUEVENT_VALID_PAYLOAD))
4357 		return -EINVAL;
4358 
4359 	if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
4360 		if (!vcpu->kvm->arch.exception_payload_enabled)
4361 			return -EINVAL;
4362 		if (events->exception.pending)
4363 			events->exception.injected = 0;
4364 		else
4365 			events->exception_has_payload = 0;
4366 	} else {
4367 		events->exception.pending = 0;
4368 		events->exception_has_payload = 0;
4369 	}
4370 
4371 	if ((events->exception.injected || events->exception.pending) &&
4372 	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
4373 		return -EINVAL;
4374 
4375 	/* INITs are latched while in SMM */
4376 	if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
4377 	    (events->smi.smm || events->smi.pending) &&
4378 	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4379 		return -EINVAL;
4380 
4381 	process_nmi(vcpu);
4382 	vcpu->arch.exception.injected = events->exception.injected;
4383 	vcpu->arch.exception.pending = events->exception.pending;
4384 	vcpu->arch.exception.nr = events->exception.nr;
4385 	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
4386 	vcpu->arch.exception.error_code = events->exception.error_code;
4387 	vcpu->arch.exception.has_payload = events->exception_has_payload;
4388 	vcpu->arch.exception.payload = events->exception_payload;
4389 
4390 	vcpu->arch.interrupt.injected = events->interrupt.injected;
4391 	vcpu->arch.interrupt.nr = events->interrupt.nr;
4392 	vcpu->arch.interrupt.soft = events->interrupt.soft;
4393 	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
4394 		kvm_x86_ops.set_interrupt_shadow(vcpu,
4395 						  events->interrupt.shadow);
4396 
4397 	vcpu->arch.nmi_injected = events->nmi.injected;
4398 	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
4399 		vcpu->arch.nmi_pending = events->nmi.pending;
4400 	kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked);
4401 
4402 	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
4403 	    lapic_in_kernel(vcpu))
4404 		vcpu->arch.apic->sipi_vector = events->sipi_vector;
4405 
4406 	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
4407 		if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
4408 			if (events->smi.smm)
4409 				vcpu->arch.hflags |= HF_SMM_MASK;
4410 			else
4411 				vcpu->arch.hflags &= ~HF_SMM_MASK;
4412 
4413 			kvm_x86_ops.nested_ops->leave_nested(vcpu);
4414 			kvm_smm_changed(vcpu);
4415 		}
4416 
4417 		vcpu->arch.smi_pending = events->smi.pending;
4418 
4419 		if (events->smi.smm) {
4420 			if (events->smi.smm_inside_nmi)
4421 				vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
4422 			else
4423 				vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
4424 		}
4425 
4426 		if (lapic_in_kernel(vcpu)) {
4427 			if (events->smi.latched_init)
4428 				set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
4429 			else
4430 				clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
4431 		}
4432 	}
4433 
4434 	kvm_make_request(KVM_REQ_EVENT, vcpu);
4435 
4436 	return 0;
4437 }
4438 
kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)4439 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
4440 					     struct kvm_debugregs *dbgregs)
4441 {
4442 	unsigned long val;
4443 
4444 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
4445 	kvm_get_dr(vcpu, 6, &val);
4446 	dbgregs->dr6 = val;
4447 	dbgregs->dr7 = vcpu->arch.dr7;
4448 	dbgregs->flags = 0;
4449 	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
4450 }
4451 
kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)4452 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
4453 					    struct kvm_debugregs *dbgregs)
4454 {
4455 	if (dbgregs->flags)
4456 		return -EINVAL;
4457 
4458 	if (dbgregs->dr6 & ~0xffffffffull)
4459 		return -EINVAL;
4460 	if (dbgregs->dr7 & ~0xffffffffull)
4461 		return -EINVAL;
4462 
4463 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
4464 	kvm_update_dr0123(vcpu);
4465 	vcpu->arch.dr6 = dbgregs->dr6;
4466 	vcpu->arch.dr7 = dbgregs->dr7;
4467 	kvm_update_dr7(vcpu);
4468 
4469 	return 0;
4470 }
4471 
4472 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
4473 
fill_xsave(u8 * dest,struct kvm_vcpu * vcpu)4474 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
4475 {
4476 	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
4477 	u64 xstate_bv = xsave->header.xfeatures;
4478 	u64 valid;
4479 
4480 	/*
4481 	 * Copy legacy XSAVE area, to avoid complications with CPUID
4482 	 * leaves 0 and 1 in the loop below.
4483 	 */
4484 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
4485 
4486 	/* Set XSTATE_BV */
4487 	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
4488 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
4489 
4490 	/*
4491 	 * Copy each region from the possibly compacted offset to the
4492 	 * non-compacted offset.
4493 	 */
4494 	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
4495 	while (valid) {
4496 		u64 xfeature_mask = valid & -valid;
4497 		int xfeature_nr = fls64(xfeature_mask) - 1;
4498 		void *src = get_xsave_addr(xsave, xfeature_nr);
4499 
4500 		if (src) {
4501 			u32 size, offset, ecx, edx;
4502 			cpuid_count(XSTATE_CPUID, xfeature_nr,
4503 				    &size, &offset, &ecx, &edx);
4504 			if (xfeature_nr == XFEATURE_PKRU)
4505 				memcpy(dest + offset, &vcpu->arch.pkru,
4506 				       sizeof(vcpu->arch.pkru));
4507 			else
4508 				memcpy(dest + offset, src, size);
4509 
4510 		}
4511 
4512 		valid -= xfeature_mask;
4513 	}
4514 }
4515 
load_xsave(struct kvm_vcpu * vcpu,u8 * src)4516 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
4517 {
4518 	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
4519 	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
4520 	u64 valid;
4521 
4522 	/*
4523 	 * Copy legacy XSAVE area, to avoid complications with CPUID
4524 	 * leaves 0 and 1 in the loop below.
4525 	 */
4526 	memcpy(xsave, src, XSAVE_HDR_OFFSET);
4527 
4528 	/* Set XSTATE_BV and possibly XCOMP_BV.  */
4529 	xsave->header.xfeatures = xstate_bv;
4530 	if (boot_cpu_has(X86_FEATURE_XSAVES))
4531 		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
4532 
4533 	/*
4534 	 * Copy each region from the non-compacted offset to the
4535 	 * possibly compacted offset.
4536 	 */
4537 	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
4538 	while (valid) {
4539 		u64 xfeature_mask = valid & -valid;
4540 		int xfeature_nr = fls64(xfeature_mask) - 1;
4541 		void *dest = get_xsave_addr(xsave, xfeature_nr);
4542 
4543 		if (dest) {
4544 			u32 size, offset, ecx, edx;
4545 			cpuid_count(XSTATE_CPUID, xfeature_nr,
4546 				    &size, &offset, &ecx, &edx);
4547 			if (xfeature_nr == XFEATURE_PKRU)
4548 				memcpy(&vcpu->arch.pkru, src + offset,
4549 				       sizeof(vcpu->arch.pkru));
4550 			else
4551 				memcpy(dest, src + offset, size);
4552 		}
4553 
4554 		valid -= xfeature_mask;
4555 	}
4556 }
4557 
kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)4558 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
4559 					 struct kvm_xsave *guest_xsave)
4560 {
4561 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
4562 		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
4563 		fill_xsave((u8 *) guest_xsave->region, vcpu);
4564 	} else {
4565 		memcpy(guest_xsave->region,
4566 			&vcpu->arch.guest_fpu->state.fxsave,
4567 			sizeof(struct fxregs_state));
4568 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
4569 			XFEATURE_MASK_FPSSE;
4570 	}
4571 }
4572 
4573 #define XSAVE_MXCSR_OFFSET 24
4574 
kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)4575 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
4576 					struct kvm_xsave *guest_xsave)
4577 {
4578 	u64 xstate_bv =
4579 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
4580 	u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
4581 
4582 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
4583 		/*
4584 		 * Here we allow setting states that are not present in
4585 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
4586 		 * with old userspace.
4587 		 */
4588 		if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
4589 			return -EINVAL;
4590 		load_xsave(vcpu, (u8 *)guest_xsave->region);
4591 	} else {
4592 		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
4593 			mxcsr & ~mxcsr_feature_mask)
4594 			return -EINVAL;
4595 		memcpy(&vcpu->arch.guest_fpu->state.fxsave,
4596 			guest_xsave->region, sizeof(struct fxregs_state));
4597 	}
4598 	return 0;
4599 }
4600 
kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)4601 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
4602 					struct kvm_xcrs *guest_xcrs)
4603 {
4604 	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
4605 		guest_xcrs->nr_xcrs = 0;
4606 		return;
4607 	}
4608 
4609 	guest_xcrs->nr_xcrs = 1;
4610 	guest_xcrs->flags = 0;
4611 	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
4612 	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
4613 }
4614 
kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)4615 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
4616 				       struct kvm_xcrs *guest_xcrs)
4617 {
4618 	int i, r = 0;
4619 
4620 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
4621 		return -EINVAL;
4622 
4623 	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
4624 		return -EINVAL;
4625 
4626 	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
4627 		/* Only support XCR0 currently */
4628 		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
4629 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
4630 				guest_xcrs->xcrs[i].value);
4631 			break;
4632 		}
4633 	if (r)
4634 		r = -EINVAL;
4635 	return r;
4636 }
4637 
4638 /*
4639  * kvm_set_guest_paused() indicates to the guest kernel that it has been
4640  * stopped by the hypervisor.  This function will be called from the host only.
4641  * EINVAL is returned when the host attempts to set the flag for a guest that
4642  * does not support pv clocks.
4643  */
kvm_set_guest_paused(struct kvm_vcpu * vcpu)4644 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
4645 {
4646 	if (!vcpu->arch.pv_time_enabled)
4647 		return -EINVAL;
4648 	vcpu->arch.pvclock_set_guest_stopped_request = true;
4649 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4650 	return 0;
4651 }
4652 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)4653 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4654 				     struct kvm_enable_cap *cap)
4655 {
4656 	int r;
4657 	uint16_t vmcs_version;
4658 	void __user *user_ptr;
4659 
4660 	if (cap->flags)
4661 		return -EINVAL;
4662 
4663 	switch (cap->cap) {
4664 	case KVM_CAP_HYPERV_SYNIC2:
4665 		if (cap->args[0])
4666 			return -EINVAL;
4667 		fallthrough;
4668 
4669 	case KVM_CAP_HYPERV_SYNIC:
4670 		if (!irqchip_in_kernel(vcpu->kvm))
4671 			return -EINVAL;
4672 		return kvm_hv_activate_synic(vcpu, cap->cap ==
4673 					     KVM_CAP_HYPERV_SYNIC2);
4674 	case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4675 		if (!kvm_x86_ops.nested_ops->enable_evmcs)
4676 			return -ENOTTY;
4677 		r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
4678 		if (!r) {
4679 			user_ptr = (void __user *)(uintptr_t)cap->args[0];
4680 			if (copy_to_user(user_ptr, &vmcs_version,
4681 					 sizeof(vmcs_version)))
4682 				r = -EFAULT;
4683 		}
4684 		return r;
4685 	case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4686 		if (!kvm_x86_ops.enable_direct_tlbflush)
4687 			return -ENOTTY;
4688 
4689 		return kvm_x86_ops.enable_direct_tlbflush(vcpu);
4690 
4691 	case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4692 		vcpu->arch.pv_cpuid.enforce = cap->args[0];
4693 		if (vcpu->arch.pv_cpuid.enforce)
4694 			kvm_update_pv_runtime(vcpu);
4695 
4696 		return 0;
4697 
4698 	default:
4699 		return -EINVAL;
4700 	}
4701 }
4702 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4703 long kvm_arch_vcpu_ioctl(struct file *filp,
4704 			 unsigned int ioctl, unsigned long arg)
4705 {
4706 	struct kvm_vcpu *vcpu = filp->private_data;
4707 	void __user *argp = (void __user *)arg;
4708 	int r;
4709 	union {
4710 		struct kvm_lapic_state *lapic;
4711 		struct kvm_xsave *xsave;
4712 		struct kvm_xcrs *xcrs;
4713 		void *buffer;
4714 	} u;
4715 
4716 	vcpu_load(vcpu);
4717 
4718 	u.buffer = NULL;
4719 	switch (ioctl) {
4720 	case KVM_GET_LAPIC: {
4721 		r = -EINVAL;
4722 		if (!lapic_in_kernel(vcpu))
4723 			goto out;
4724 		u.lapic = kzalloc(sizeof(struct kvm_lapic_state),
4725 				GFP_KERNEL_ACCOUNT);
4726 
4727 		r = -ENOMEM;
4728 		if (!u.lapic)
4729 			goto out;
4730 		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
4731 		if (r)
4732 			goto out;
4733 		r = -EFAULT;
4734 		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
4735 			goto out;
4736 		r = 0;
4737 		break;
4738 	}
4739 	case KVM_SET_LAPIC: {
4740 		r = -EINVAL;
4741 		if (!lapic_in_kernel(vcpu))
4742 			goto out;
4743 		u.lapic = memdup_user(argp, sizeof(*u.lapic));
4744 		if (IS_ERR(u.lapic)) {
4745 			r = PTR_ERR(u.lapic);
4746 			goto out_nofree;
4747 		}
4748 
4749 		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
4750 		break;
4751 	}
4752 	case KVM_INTERRUPT: {
4753 		struct kvm_interrupt irq;
4754 
4755 		r = -EFAULT;
4756 		if (copy_from_user(&irq, argp, sizeof(irq)))
4757 			goto out;
4758 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
4759 		break;
4760 	}
4761 	case KVM_NMI: {
4762 		r = kvm_vcpu_ioctl_nmi(vcpu);
4763 		break;
4764 	}
4765 	case KVM_SMI: {
4766 		r = kvm_vcpu_ioctl_smi(vcpu);
4767 		break;
4768 	}
4769 	case KVM_SET_CPUID: {
4770 		struct kvm_cpuid __user *cpuid_arg = argp;
4771 		struct kvm_cpuid cpuid;
4772 
4773 		r = -EFAULT;
4774 		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4775 			goto out;
4776 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
4777 		break;
4778 	}
4779 	case KVM_SET_CPUID2: {
4780 		struct kvm_cpuid2 __user *cpuid_arg = argp;
4781 		struct kvm_cpuid2 cpuid;
4782 
4783 		r = -EFAULT;
4784 		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4785 			goto out;
4786 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
4787 					      cpuid_arg->entries);
4788 		break;
4789 	}
4790 	case KVM_GET_CPUID2: {
4791 		struct kvm_cpuid2 __user *cpuid_arg = argp;
4792 		struct kvm_cpuid2 cpuid;
4793 
4794 		r = -EFAULT;
4795 		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4796 			goto out;
4797 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
4798 					      cpuid_arg->entries);
4799 		if (r)
4800 			goto out;
4801 		r = -EFAULT;
4802 		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4803 			goto out;
4804 		r = 0;
4805 		break;
4806 	}
4807 	case KVM_GET_MSRS: {
4808 		int idx = srcu_read_lock(&vcpu->kvm->srcu);
4809 		r = msr_io(vcpu, argp, do_get_msr, 1);
4810 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
4811 		break;
4812 	}
4813 	case KVM_SET_MSRS: {
4814 		int idx = srcu_read_lock(&vcpu->kvm->srcu);
4815 		r = msr_io(vcpu, argp, do_set_msr, 0);
4816 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
4817 		break;
4818 	}
4819 	case KVM_TPR_ACCESS_REPORTING: {
4820 		struct kvm_tpr_access_ctl tac;
4821 
4822 		r = -EFAULT;
4823 		if (copy_from_user(&tac, argp, sizeof(tac)))
4824 			goto out;
4825 		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
4826 		if (r)
4827 			goto out;
4828 		r = -EFAULT;
4829 		if (copy_to_user(argp, &tac, sizeof(tac)))
4830 			goto out;
4831 		r = 0;
4832 		break;
4833 	};
4834 	case KVM_SET_VAPIC_ADDR: {
4835 		struct kvm_vapic_addr va;
4836 		int idx;
4837 
4838 		r = -EINVAL;
4839 		if (!lapic_in_kernel(vcpu))
4840 			goto out;
4841 		r = -EFAULT;
4842 		if (copy_from_user(&va, argp, sizeof(va)))
4843 			goto out;
4844 		idx = srcu_read_lock(&vcpu->kvm->srcu);
4845 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
4846 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
4847 		break;
4848 	}
4849 	case KVM_X86_SETUP_MCE: {
4850 		u64 mcg_cap;
4851 
4852 		r = -EFAULT;
4853 		if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
4854 			goto out;
4855 		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
4856 		break;
4857 	}
4858 	case KVM_X86_SET_MCE: {
4859 		struct kvm_x86_mce mce;
4860 
4861 		r = -EFAULT;
4862 		if (copy_from_user(&mce, argp, sizeof(mce)))
4863 			goto out;
4864 		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
4865 		break;
4866 	}
4867 	case KVM_GET_VCPU_EVENTS: {
4868 		struct kvm_vcpu_events events;
4869 
4870 		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
4871 
4872 		r = -EFAULT;
4873 		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
4874 			break;
4875 		r = 0;
4876 		break;
4877 	}
4878 	case KVM_SET_VCPU_EVENTS: {
4879 		struct kvm_vcpu_events events;
4880 
4881 		r = -EFAULT;
4882 		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
4883 			break;
4884 
4885 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
4886 		break;
4887 	}
4888 	case KVM_GET_DEBUGREGS: {
4889 		struct kvm_debugregs dbgregs;
4890 
4891 		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
4892 
4893 		r = -EFAULT;
4894 		if (copy_to_user(argp, &dbgregs,
4895 				 sizeof(struct kvm_debugregs)))
4896 			break;
4897 		r = 0;
4898 		break;
4899 	}
4900 	case KVM_SET_DEBUGREGS: {
4901 		struct kvm_debugregs dbgregs;
4902 
4903 		r = -EFAULT;
4904 		if (copy_from_user(&dbgregs, argp,
4905 				   sizeof(struct kvm_debugregs)))
4906 			break;
4907 
4908 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
4909 		break;
4910 	}
4911 	case KVM_GET_XSAVE: {
4912 		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
4913 		r = -ENOMEM;
4914 		if (!u.xsave)
4915 			break;
4916 
4917 		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
4918 
4919 		r = -EFAULT;
4920 		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
4921 			break;
4922 		r = 0;
4923 		break;
4924 	}
4925 	case KVM_SET_XSAVE: {
4926 		u.xsave = memdup_user(argp, sizeof(*u.xsave));
4927 		if (IS_ERR(u.xsave)) {
4928 			r = PTR_ERR(u.xsave);
4929 			goto out_nofree;
4930 		}
4931 
4932 		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
4933 		break;
4934 	}
4935 	case KVM_GET_XCRS: {
4936 		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
4937 		r = -ENOMEM;
4938 		if (!u.xcrs)
4939 			break;
4940 
4941 		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
4942 
4943 		r = -EFAULT;
4944 		if (copy_to_user(argp, u.xcrs,
4945 				 sizeof(struct kvm_xcrs)))
4946 			break;
4947 		r = 0;
4948 		break;
4949 	}
4950 	case KVM_SET_XCRS: {
4951 		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
4952 		if (IS_ERR(u.xcrs)) {
4953 			r = PTR_ERR(u.xcrs);
4954 			goto out_nofree;
4955 		}
4956 
4957 		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
4958 		break;
4959 	}
4960 	case KVM_SET_TSC_KHZ: {
4961 		u32 user_tsc_khz;
4962 
4963 		r = -EINVAL;
4964 		user_tsc_khz = (u32)arg;
4965 
4966 		if (kvm_has_tsc_control &&
4967 		    user_tsc_khz >= kvm_max_guest_tsc_khz)
4968 			goto out;
4969 
4970 		if (user_tsc_khz == 0)
4971 			user_tsc_khz = tsc_khz;
4972 
4973 		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
4974 			r = 0;
4975 
4976 		goto out;
4977 	}
4978 	case KVM_GET_TSC_KHZ: {
4979 		r = vcpu->arch.virtual_tsc_khz;
4980 		goto out;
4981 	}
4982 	case KVM_KVMCLOCK_CTRL: {
4983 		r = kvm_set_guest_paused(vcpu);
4984 		goto out;
4985 	}
4986 	case KVM_ENABLE_CAP: {
4987 		struct kvm_enable_cap cap;
4988 
4989 		r = -EFAULT;
4990 		if (copy_from_user(&cap, argp, sizeof(cap)))
4991 			goto out;
4992 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4993 		break;
4994 	}
4995 	case KVM_GET_NESTED_STATE: {
4996 		struct kvm_nested_state __user *user_kvm_nested_state = argp;
4997 		u32 user_data_size;
4998 
4999 		r = -EINVAL;
5000 		if (!kvm_x86_ops.nested_ops->get_state)
5001 			break;
5002 
5003 		BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
5004 		r = -EFAULT;
5005 		if (get_user(user_data_size, &user_kvm_nested_state->size))
5006 			break;
5007 
5008 		r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
5009 						     user_data_size);
5010 		if (r < 0)
5011 			break;
5012 
5013 		if (r > user_data_size) {
5014 			if (put_user(r, &user_kvm_nested_state->size))
5015 				r = -EFAULT;
5016 			else
5017 				r = -E2BIG;
5018 			break;
5019 		}
5020 
5021 		r = 0;
5022 		break;
5023 	}
5024 	case KVM_SET_NESTED_STATE: {
5025 		struct kvm_nested_state __user *user_kvm_nested_state = argp;
5026 		struct kvm_nested_state kvm_state;
5027 		int idx;
5028 
5029 		r = -EINVAL;
5030 		if (!kvm_x86_ops.nested_ops->set_state)
5031 			break;
5032 
5033 		r = -EFAULT;
5034 		if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
5035 			break;
5036 
5037 		r = -EINVAL;
5038 		if (kvm_state.size < sizeof(kvm_state))
5039 			break;
5040 
5041 		if (kvm_state.flags &
5042 		    ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
5043 		      | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
5044 		      | KVM_STATE_NESTED_GIF_SET))
5045 			break;
5046 
5047 		/* nested_run_pending implies guest_mode.  */
5048 		if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
5049 		    && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
5050 			break;
5051 
5052 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5053 		r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
5054 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5055 		break;
5056 	}
5057 	case KVM_GET_SUPPORTED_HV_CPUID: {
5058 		struct kvm_cpuid2 __user *cpuid_arg = argp;
5059 		struct kvm_cpuid2 cpuid;
5060 
5061 		r = -EFAULT;
5062 		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5063 			goto out;
5064 
5065 		r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid,
5066 						cpuid_arg->entries);
5067 		if (r)
5068 			goto out;
5069 
5070 		r = -EFAULT;
5071 		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5072 			goto out;
5073 		r = 0;
5074 		break;
5075 	}
5076 	default:
5077 		r = -EINVAL;
5078 	}
5079 out:
5080 	kfree(u.buffer);
5081 out_nofree:
5082 	vcpu_put(vcpu);
5083 	return r;
5084 }
5085 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)5086 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5087 {
5088 	return VM_FAULT_SIGBUS;
5089 }
5090 
kvm_vm_ioctl_set_tss_addr(struct kvm * kvm,unsigned long addr)5091 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
5092 {
5093 	int ret;
5094 
5095 	if (addr > (unsigned int)(-3 * PAGE_SIZE))
5096 		return -EINVAL;
5097 	ret = kvm_x86_ops.set_tss_addr(kvm, addr);
5098 	return ret;
5099 }
5100 
kvm_vm_ioctl_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)5101 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
5102 					      u64 ident_addr)
5103 {
5104 	return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr);
5105 }
5106 
kvm_vm_ioctl_set_nr_mmu_pages(struct kvm * kvm,unsigned long kvm_nr_mmu_pages)5107 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
5108 					 unsigned long kvm_nr_mmu_pages)
5109 {
5110 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
5111 		return -EINVAL;
5112 
5113 	mutex_lock(&kvm->slots_lock);
5114 
5115 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
5116 	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
5117 
5118 	mutex_unlock(&kvm->slots_lock);
5119 	return 0;
5120 }
5121 
kvm_vm_ioctl_get_nr_mmu_pages(struct kvm * kvm)5122 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
5123 {
5124 	return kvm->arch.n_max_mmu_pages;
5125 }
5126 
kvm_vm_ioctl_get_irqchip(struct kvm * kvm,struct kvm_irqchip * chip)5127 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
5128 {
5129 	struct kvm_pic *pic = kvm->arch.vpic;
5130 	int r;
5131 
5132 	r = 0;
5133 	switch (chip->chip_id) {
5134 	case KVM_IRQCHIP_PIC_MASTER:
5135 		memcpy(&chip->chip.pic, &pic->pics[0],
5136 			sizeof(struct kvm_pic_state));
5137 		break;
5138 	case KVM_IRQCHIP_PIC_SLAVE:
5139 		memcpy(&chip->chip.pic, &pic->pics[1],
5140 			sizeof(struct kvm_pic_state));
5141 		break;
5142 	case KVM_IRQCHIP_IOAPIC:
5143 		kvm_get_ioapic(kvm, &chip->chip.ioapic);
5144 		break;
5145 	default:
5146 		r = -EINVAL;
5147 		break;
5148 	}
5149 	return r;
5150 }
5151 
kvm_vm_ioctl_set_irqchip(struct kvm * kvm,struct kvm_irqchip * chip)5152 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
5153 {
5154 	struct kvm_pic *pic = kvm->arch.vpic;
5155 	int r;
5156 
5157 	r = 0;
5158 	switch (chip->chip_id) {
5159 	case KVM_IRQCHIP_PIC_MASTER:
5160 		spin_lock(&pic->lock);
5161 		memcpy(&pic->pics[0], &chip->chip.pic,
5162 			sizeof(struct kvm_pic_state));
5163 		spin_unlock(&pic->lock);
5164 		break;
5165 	case KVM_IRQCHIP_PIC_SLAVE:
5166 		spin_lock(&pic->lock);
5167 		memcpy(&pic->pics[1], &chip->chip.pic,
5168 			sizeof(struct kvm_pic_state));
5169 		spin_unlock(&pic->lock);
5170 		break;
5171 	case KVM_IRQCHIP_IOAPIC:
5172 		kvm_set_ioapic(kvm, &chip->chip.ioapic);
5173 		break;
5174 	default:
5175 		r = -EINVAL;
5176 		break;
5177 	}
5178 	kvm_pic_update_irq(pic);
5179 	return r;
5180 }
5181 
kvm_vm_ioctl_get_pit(struct kvm * kvm,struct kvm_pit_state * ps)5182 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
5183 {
5184 	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
5185 
5186 	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
5187 
5188 	mutex_lock(&kps->lock);
5189 	memcpy(ps, &kps->channels, sizeof(*ps));
5190 	mutex_unlock(&kps->lock);
5191 	return 0;
5192 }
5193 
kvm_vm_ioctl_set_pit(struct kvm * kvm,struct kvm_pit_state * ps)5194 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
5195 {
5196 	int i;
5197 	struct kvm_pit *pit = kvm->arch.vpit;
5198 
5199 	mutex_lock(&pit->pit_state.lock);
5200 	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
5201 	for (i = 0; i < 3; i++)
5202 		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
5203 	mutex_unlock(&pit->pit_state.lock);
5204 	return 0;
5205 }
5206 
kvm_vm_ioctl_get_pit2(struct kvm * kvm,struct kvm_pit_state2 * ps)5207 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
5208 {
5209 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
5210 	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
5211 		sizeof(ps->channels));
5212 	ps->flags = kvm->arch.vpit->pit_state.flags;
5213 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
5214 	memset(&ps->reserved, 0, sizeof(ps->reserved));
5215 	return 0;
5216 }
5217 
kvm_vm_ioctl_set_pit2(struct kvm * kvm,struct kvm_pit_state2 * ps)5218 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
5219 {
5220 	int start = 0;
5221 	int i;
5222 	u32 prev_legacy, cur_legacy;
5223 	struct kvm_pit *pit = kvm->arch.vpit;
5224 
5225 	mutex_lock(&pit->pit_state.lock);
5226 	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
5227 	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
5228 	if (!prev_legacy && cur_legacy)
5229 		start = 1;
5230 	memcpy(&pit->pit_state.channels, &ps->channels,
5231 	       sizeof(pit->pit_state.channels));
5232 	pit->pit_state.flags = ps->flags;
5233 	for (i = 0; i < 3; i++)
5234 		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
5235 				   start && i == 0);
5236 	mutex_unlock(&pit->pit_state.lock);
5237 	return 0;
5238 }
5239 
kvm_vm_ioctl_reinject(struct kvm * kvm,struct kvm_reinject_control * control)5240 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
5241 				 struct kvm_reinject_control *control)
5242 {
5243 	struct kvm_pit *pit = kvm->arch.vpit;
5244 
5245 	/* pit->pit_state.lock was overloaded to prevent userspace from getting
5246 	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
5247 	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
5248 	 */
5249 	mutex_lock(&pit->pit_state.lock);
5250 	kvm_pit_set_reinject(pit, control->pit_reinject);
5251 	mutex_unlock(&pit->pit_state.lock);
5252 
5253 	return 0;
5254 }
5255 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)5256 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
5257 {
5258 	/*
5259 	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
5260 	 */
5261 	if (kvm_x86_ops.flush_log_dirty)
5262 		kvm_x86_ops.flush_log_dirty(kvm);
5263 }
5264 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)5265 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
5266 			bool line_status)
5267 {
5268 	if (!irqchip_in_kernel(kvm))
5269 		return -ENXIO;
5270 
5271 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
5272 					irq_event->irq, irq_event->level,
5273 					line_status);
5274 	return 0;
5275 }
5276 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)5277 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
5278 			    struct kvm_enable_cap *cap)
5279 {
5280 	int r;
5281 
5282 	if (cap->flags)
5283 		return -EINVAL;
5284 
5285 	switch (cap->cap) {
5286 	case KVM_CAP_DISABLE_QUIRKS:
5287 		kvm->arch.disabled_quirks = cap->args[0];
5288 		r = 0;
5289 		break;
5290 	case KVM_CAP_SPLIT_IRQCHIP: {
5291 		mutex_lock(&kvm->lock);
5292 		r = -EINVAL;
5293 		if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
5294 			goto split_irqchip_unlock;
5295 		r = -EEXIST;
5296 		if (irqchip_in_kernel(kvm))
5297 			goto split_irqchip_unlock;
5298 		if (kvm->created_vcpus)
5299 			goto split_irqchip_unlock;
5300 		r = kvm_setup_empty_irq_routing(kvm);
5301 		if (r)
5302 			goto split_irqchip_unlock;
5303 		/* Pairs with irqchip_in_kernel. */
5304 		smp_wmb();
5305 		kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
5306 		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
5307 		r = 0;
5308 split_irqchip_unlock:
5309 		mutex_unlock(&kvm->lock);
5310 		break;
5311 	}
5312 	case KVM_CAP_X2APIC_API:
5313 		r = -EINVAL;
5314 		if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
5315 			break;
5316 
5317 		if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
5318 			kvm->arch.x2apic_format = true;
5319 		if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
5320 			kvm->arch.x2apic_broadcast_quirk_disabled = true;
5321 
5322 		r = 0;
5323 		break;
5324 	case KVM_CAP_X86_DISABLE_EXITS:
5325 		r = -EINVAL;
5326 		if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
5327 			break;
5328 
5329 		if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
5330 			kvm_can_mwait_in_guest())
5331 			kvm->arch.mwait_in_guest = true;
5332 		if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
5333 			kvm->arch.hlt_in_guest = true;
5334 		if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
5335 			kvm->arch.pause_in_guest = true;
5336 		if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
5337 			kvm->arch.cstate_in_guest = true;
5338 		r = 0;
5339 		break;
5340 	case KVM_CAP_MSR_PLATFORM_INFO:
5341 		kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
5342 		r = 0;
5343 		break;
5344 	case KVM_CAP_EXCEPTION_PAYLOAD:
5345 		kvm->arch.exception_payload_enabled = cap->args[0];
5346 		r = 0;
5347 		break;
5348 	case KVM_CAP_X86_USER_SPACE_MSR:
5349 		kvm->arch.user_space_msr_mask = cap->args[0];
5350 		r = 0;
5351 		break;
5352 	default:
5353 		r = -EINVAL;
5354 		break;
5355 	}
5356 	return r;
5357 }
5358 
kvm_alloc_msr_filter(bool default_allow)5359 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
5360 {
5361 	struct kvm_x86_msr_filter *msr_filter;
5362 
5363 	msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
5364 	if (!msr_filter)
5365 		return NULL;
5366 
5367 	msr_filter->default_allow = default_allow;
5368 	return msr_filter;
5369 }
5370 
kvm_free_msr_filter(struct kvm_x86_msr_filter * msr_filter)5371 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
5372 {
5373 	u32 i;
5374 
5375 	if (!msr_filter)
5376 		return;
5377 
5378 	for (i = 0; i < msr_filter->count; i++)
5379 		kfree(msr_filter->ranges[i].bitmap);
5380 
5381 	kfree(msr_filter);
5382 }
5383 
kvm_add_msr_filter(struct kvm_x86_msr_filter * msr_filter,struct kvm_msr_filter_range * user_range)5384 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
5385 			      struct kvm_msr_filter_range *user_range)
5386 {
5387 	struct msr_bitmap_range range;
5388 	unsigned long *bitmap = NULL;
5389 	size_t bitmap_size;
5390 	int r;
5391 
5392 	if (!user_range->nmsrs)
5393 		return 0;
5394 
5395 	bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
5396 	if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
5397 		return -EINVAL;
5398 
5399 	bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size);
5400 	if (IS_ERR(bitmap))
5401 		return PTR_ERR(bitmap);
5402 
5403 	range = (struct msr_bitmap_range) {
5404 		.flags = user_range->flags,
5405 		.base = user_range->base,
5406 		.nmsrs = user_range->nmsrs,
5407 		.bitmap = bitmap,
5408 	};
5409 
5410 	if (range.flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) {
5411 		r = -EINVAL;
5412 		goto err;
5413 	}
5414 
5415 	if (!range.flags) {
5416 		r = -EINVAL;
5417 		goto err;
5418 	}
5419 
5420 	/* Everything ok, add this range identifier. */
5421 	msr_filter->ranges[msr_filter->count] = range;
5422 	msr_filter->count++;
5423 
5424 	return 0;
5425 err:
5426 	kfree(bitmap);
5427 	return r;
5428 }
5429 
kvm_vm_ioctl_set_msr_filter(struct kvm * kvm,void __user * argp)5430 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
5431 {
5432 	struct kvm_msr_filter __user *user_msr_filter = argp;
5433 	struct kvm_x86_msr_filter *new_filter, *old_filter;
5434 	struct kvm_msr_filter filter;
5435 	bool default_allow;
5436 	bool empty = true;
5437 	int r = 0;
5438 	u32 i;
5439 
5440 	if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
5441 		return -EFAULT;
5442 
5443 	for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
5444 		empty &= !filter.ranges[i].nmsrs;
5445 
5446 	default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
5447 	if (empty && !default_allow)
5448 		return -EINVAL;
5449 
5450 	new_filter = kvm_alloc_msr_filter(default_allow);
5451 	if (!new_filter)
5452 		return -ENOMEM;
5453 
5454 	for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
5455 		r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
5456 		if (r) {
5457 			kvm_free_msr_filter(new_filter);
5458 			return r;
5459 		}
5460 	}
5461 
5462 	mutex_lock(&kvm->lock);
5463 
5464 	/* The per-VM filter is protected by kvm->lock... */
5465 	old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
5466 
5467 	rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
5468 	synchronize_srcu(&kvm->srcu);
5469 
5470 	kvm_free_msr_filter(old_filter);
5471 
5472 	kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
5473 	mutex_unlock(&kvm->lock);
5474 
5475 	return 0;
5476 }
5477 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5478 long kvm_arch_vm_ioctl(struct file *filp,
5479 		       unsigned int ioctl, unsigned long arg)
5480 {
5481 	struct kvm *kvm = filp->private_data;
5482 	void __user *argp = (void __user *)arg;
5483 	int r = -ENOTTY;
5484 	/*
5485 	 * This union makes it completely explicit to gcc-3.x
5486 	 * that these two variables' stack usage should be
5487 	 * combined, not added together.
5488 	 */
5489 	union {
5490 		struct kvm_pit_state ps;
5491 		struct kvm_pit_state2 ps2;
5492 		struct kvm_pit_config pit_config;
5493 	} u;
5494 
5495 	switch (ioctl) {
5496 	case KVM_SET_TSS_ADDR:
5497 		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
5498 		break;
5499 	case KVM_SET_IDENTITY_MAP_ADDR: {
5500 		u64 ident_addr;
5501 
5502 		mutex_lock(&kvm->lock);
5503 		r = -EINVAL;
5504 		if (kvm->created_vcpus)
5505 			goto set_identity_unlock;
5506 		r = -EFAULT;
5507 		if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
5508 			goto set_identity_unlock;
5509 		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
5510 set_identity_unlock:
5511 		mutex_unlock(&kvm->lock);
5512 		break;
5513 	}
5514 	case KVM_SET_NR_MMU_PAGES:
5515 		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
5516 		break;
5517 	case KVM_GET_NR_MMU_PAGES:
5518 		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
5519 		break;
5520 	case KVM_CREATE_IRQCHIP: {
5521 		mutex_lock(&kvm->lock);
5522 
5523 		r = -EEXIST;
5524 		if (irqchip_in_kernel(kvm))
5525 			goto create_irqchip_unlock;
5526 
5527 		r = -EINVAL;
5528 		if (kvm->created_vcpus)
5529 			goto create_irqchip_unlock;
5530 
5531 		r = kvm_pic_init(kvm);
5532 		if (r)
5533 			goto create_irqchip_unlock;
5534 
5535 		r = kvm_ioapic_init(kvm);
5536 		if (r) {
5537 			kvm_pic_destroy(kvm);
5538 			goto create_irqchip_unlock;
5539 		}
5540 
5541 		r = kvm_setup_default_irq_routing(kvm);
5542 		if (r) {
5543 			kvm_ioapic_destroy(kvm);
5544 			kvm_pic_destroy(kvm);
5545 			goto create_irqchip_unlock;
5546 		}
5547 		/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
5548 		smp_wmb();
5549 		kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
5550 	create_irqchip_unlock:
5551 		mutex_unlock(&kvm->lock);
5552 		break;
5553 	}
5554 	case KVM_CREATE_PIT:
5555 		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
5556 		goto create_pit;
5557 	case KVM_CREATE_PIT2:
5558 		r = -EFAULT;
5559 		if (copy_from_user(&u.pit_config, argp,
5560 				   sizeof(struct kvm_pit_config)))
5561 			goto out;
5562 	create_pit:
5563 		mutex_lock(&kvm->lock);
5564 		r = -EEXIST;
5565 		if (kvm->arch.vpit)
5566 			goto create_pit_unlock;
5567 		r = -ENOMEM;
5568 		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
5569 		if (kvm->arch.vpit)
5570 			r = 0;
5571 	create_pit_unlock:
5572 		mutex_unlock(&kvm->lock);
5573 		break;
5574 	case KVM_GET_IRQCHIP: {
5575 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
5576 		struct kvm_irqchip *chip;
5577 
5578 		chip = memdup_user(argp, sizeof(*chip));
5579 		if (IS_ERR(chip)) {
5580 			r = PTR_ERR(chip);
5581 			goto out;
5582 		}
5583 
5584 		r = -ENXIO;
5585 		if (!irqchip_kernel(kvm))
5586 			goto get_irqchip_out;
5587 		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
5588 		if (r)
5589 			goto get_irqchip_out;
5590 		r = -EFAULT;
5591 		if (copy_to_user(argp, chip, sizeof(*chip)))
5592 			goto get_irqchip_out;
5593 		r = 0;
5594 	get_irqchip_out:
5595 		kfree(chip);
5596 		break;
5597 	}
5598 	case KVM_SET_IRQCHIP: {
5599 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
5600 		struct kvm_irqchip *chip;
5601 
5602 		chip = memdup_user(argp, sizeof(*chip));
5603 		if (IS_ERR(chip)) {
5604 			r = PTR_ERR(chip);
5605 			goto out;
5606 		}
5607 
5608 		r = -ENXIO;
5609 		if (!irqchip_kernel(kvm))
5610 			goto set_irqchip_out;
5611 		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
5612 	set_irqchip_out:
5613 		kfree(chip);
5614 		break;
5615 	}
5616 	case KVM_GET_PIT: {
5617 		r = -EFAULT;
5618 		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
5619 			goto out;
5620 		r = -ENXIO;
5621 		if (!kvm->arch.vpit)
5622 			goto out;
5623 		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
5624 		if (r)
5625 			goto out;
5626 		r = -EFAULT;
5627 		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
5628 			goto out;
5629 		r = 0;
5630 		break;
5631 	}
5632 	case KVM_SET_PIT: {
5633 		r = -EFAULT;
5634 		if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
5635 			goto out;
5636 		mutex_lock(&kvm->lock);
5637 		r = -ENXIO;
5638 		if (!kvm->arch.vpit)
5639 			goto set_pit_out;
5640 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
5641 set_pit_out:
5642 		mutex_unlock(&kvm->lock);
5643 		break;
5644 	}
5645 	case KVM_GET_PIT2: {
5646 		r = -ENXIO;
5647 		if (!kvm->arch.vpit)
5648 			goto out;
5649 		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
5650 		if (r)
5651 			goto out;
5652 		r = -EFAULT;
5653 		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
5654 			goto out;
5655 		r = 0;
5656 		break;
5657 	}
5658 	case KVM_SET_PIT2: {
5659 		r = -EFAULT;
5660 		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
5661 			goto out;
5662 		mutex_lock(&kvm->lock);
5663 		r = -ENXIO;
5664 		if (!kvm->arch.vpit)
5665 			goto set_pit2_out;
5666 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
5667 set_pit2_out:
5668 		mutex_unlock(&kvm->lock);
5669 		break;
5670 	}
5671 	case KVM_REINJECT_CONTROL: {
5672 		struct kvm_reinject_control control;
5673 		r =  -EFAULT;
5674 		if (copy_from_user(&control, argp, sizeof(control)))
5675 			goto out;
5676 		r = -ENXIO;
5677 		if (!kvm->arch.vpit)
5678 			goto out;
5679 		r = kvm_vm_ioctl_reinject(kvm, &control);
5680 		break;
5681 	}
5682 	case KVM_SET_BOOT_CPU_ID:
5683 		r = 0;
5684 		mutex_lock(&kvm->lock);
5685 		if (kvm->created_vcpus)
5686 			r = -EBUSY;
5687 		else
5688 			kvm->arch.bsp_vcpu_id = arg;
5689 		mutex_unlock(&kvm->lock);
5690 		break;
5691 	case KVM_XEN_HVM_CONFIG: {
5692 		struct kvm_xen_hvm_config xhc;
5693 		r = -EFAULT;
5694 		if (copy_from_user(&xhc, argp, sizeof(xhc)))
5695 			goto out;
5696 		r = -EINVAL;
5697 		if (xhc.flags)
5698 			goto out;
5699 		memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
5700 		r = 0;
5701 		break;
5702 	}
5703 	case KVM_SET_CLOCK: {
5704 		struct kvm_clock_data user_ns;
5705 		u64 now_ns;
5706 
5707 		r = -EFAULT;
5708 		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
5709 			goto out;
5710 
5711 		r = -EINVAL;
5712 		if (user_ns.flags)
5713 			goto out;
5714 
5715 		r = 0;
5716 		/*
5717 		 * TODO: userspace has to take care of races with VCPU_RUN, so
5718 		 * kvm_gen_update_masterclock() can be cut down to locked
5719 		 * pvclock_update_vm_gtod_copy().
5720 		 */
5721 		kvm_gen_update_masterclock(kvm);
5722 		now_ns = get_kvmclock_ns(kvm);
5723 		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
5724 		kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
5725 		break;
5726 	}
5727 	case KVM_GET_CLOCK: {
5728 		struct kvm_clock_data user_ns;
5729 		u64 now_ns;
5730 
5731 		now_ns = get_kvmclock_ns(kvm);
5732 		user_ns.clock = now_ns;
5733 		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
5734 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
5735 
5736 		r = -EFAULT;
5737 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
5738 			goto out;
5739 		r = 0;
5740 		break;
5741 	}
5742 	case KVM_MEMORY_ENCRYPT_OP: {
5743 		r = -ENOTTY;
5744 		if (kvm_x86_ops.mem_enc_op)
5745 			r = kvm_x86_ops.mem_enc_op(kvm, argp);
5746 		break;
5747 	}
5748 	case KVM_MEMORY_ENCRYPT_REG_REGION: {
5749 		struct kvm_enc_region region;
5750 
5751 		r = -EFAULT;
5752 		if (copy_from_user(&region, argp, sizeof(region)))
5753 			goto out;
5754 
5755 		r = -ENOTTY;
5756 		if (kvm_x86_ops.mem_enc_reg_region)
5757 			r = kvm_x86_ops.mem_enc_reg_region(kvm, &region);
5758 		break;
5759 	}
5760 	case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
5761 		struct kvm_enc_region region;
5762 
5763 		r = -EFAULT;
5764 		if (copy_from_user(&region, argp, sizeof(region)))
5765 			goto out;
5766 
5767 		r = -ENOTTY;
5768 		if (kvm_x86_ops.mem_enc_unreg_region)
5769 			r = kvm_x86_ops.mem_enc_unreg_region(kvm, &region);
5770 		break;
5771 	}
5772 	case KVM_HYPERV_EVENTFD: {
5773 		struct kvm_hyperv_eventfd hvevfd;
5774 
5775 		r = -EFAULT;
5776 		if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
5777 			goto out;
5778 		r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
5779 		break;
5780 	}
5781 	case KVM_SET_PMU_EVENT_FILTER:
5782 		r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
5783 		break;
5784 	case KVM_X86_SET_MSR_FILTER:
5785 		r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
5786 		break;
5787 	default:
5788 		r = -ENOTTY;
5789 	}
5790 out:
5791 	return r;
5792 }
5793 
kvm_init_msr_list(void)5794 static void kvm_init_msr_list(void)
5795 {
5796 	struct x86_pmu_capability x86_pmu;
5797 	u32 dummy[2];
5798 	unsigned i;
5799 
5800 	BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
5801 			 "Please update the fixed PMCs in msrs_to_saved_all[]");
5802 
5803 	perf_get_x86_pmu_capability(&x86_pmu);
5804 
5805 	num_msrs_to_save = 0;
5806 	num_emulated_msrs = 0;
5807 	num_msr_based_features = 0;
5808 
5809 	for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
5810 		if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
5811 			continue;
5812 
5813 		/*
5814 		 * Even MSRs that are valid in the host may not be exposed
5815 		 * to the guests in some cases.
5816 		 */
5817 		switch (msrs_to_save_all[i]) {
5818 		case MSR_IA32_BNDCFGS:
5819 			if (!kvm_mpx_supported())
5820 				continue;
5821 			break;
5822 		case MSR_TSC_AUX:
5823 			if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
5824 				continue;
5825 			break;
5826 		case MSR_IA32_UMWAIT_CONTROL:
5827 			if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
5828 				continue;
5829 			break;
5830 		case MSR_IA32_RTIT_CTL:
5831 		case MSR_IA32_RTIT_STATUS:
5832 			if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
5833 				continue;
5834 			break;
5835 		case MSR_IA32_RTIT_CR3_MATCH:
5836 			if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
5837 			    !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
5838 				continue;
5839 			break;
5840 		case MSR_IA32_RTIT_OUTPUT_BASE:
5841 		case MSR_IA32_RTIT_OUTPUT_MASK:
5842 			if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
5843 				(!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
5844 				 !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
5845 				continue;
5846 			break;
5847 		case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
5848 			if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
5849 				msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
5850 				intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
5851 				continue;
5852 			break;
5853 		case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
5854 			if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
5855 			    min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
5856 				continue;
5857 			break;
5858 		case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
5859 			if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
5860 			    min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
5861 				continue;
5862 			break;
5863 		default:
5864 			break;
5865 		}
5866 
5867 		msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
5868 	}
5869 
5870 	for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
5871 		if (!kvm_x86_ops.has_emulated_msr(emulated_msrs_all[i]))
5872 			continue;
5873 
5874 		emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
5875 	}
5876 
5877 	for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
5878 		struct kvm_msr_entry msr;
5879 
5880 		msr.index = msr_based_features_all[i];
5881 		if (kvm_get_msr_feature(&msr))
5882 			continue;
5883 
5884 		msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
5885 	}
5886 }
5887 
vcpu_mmio_write(struct kvm_vcpu * vcpu,gpa_t addr,int len,const void * v)5888 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
5889 			   const void *v)
5890 {
5891 	int handled = 0;
5892 	int n;
5893 
5894 	do {
5895 		n = min(len, 8);
5896 		if (!(lapic_in_kernel(vcpu) &&
5897 		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
5898 		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
5899 			break;
5900 		handled += n;
5901 		addr += n;
5902 		len -= n;
5903 		v += n;
5904 	} while (len);
5905 
5906 	return handled;
5907 }
5908 
vcpu_mmio_read(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * v)5909 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
5910 {
5911 	int handled = 0;
5912 	int n;
5913 
5914 	do {
5915 		n = min(len, 8);
5916 		if (!(lapic_in_kernel(vcpu) &&
5917 		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
5918 					 addr, n, v))
5919 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
5920 			break;
5921 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
5922 		handled += n;
5923 		addr += n;
5924 		len -= n;
5925 		v += n;
5926 	} while (len);
5927 
5928 	return handled;
5929 }
5930 
kvm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)5931 static void kvm_set_segment(struct kvm_vcpu *vcpu,
5932 			struct kvm_segment *var, int seg)
5933 {
5934 	kvm_x86_ops.set_segment(vcpu, var, seg);
5935 }
5936 
kvm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)5937 void kvm_get_segment(struct kvm_vcpu *vcpu,
5938 		     struct kvm_segment *var, int seg)
5939 {
5940 	kvm_x86_ops.get_segment(vcpu, var, seg);
5941 }
5942 
translate_nested_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,u32 access,struct x86_exception * exception)5943 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
5944 			   struct x86_exception *exception)
5945 {
5946 	gpa_t t_gpa;
5947 
5948 	BUG_ON(!mmu_is_nested(vcpu));
5949 
5950 	/* NPT walks are always user-walks */
5951 	access |= PFERR_USER_MASK;
5952 	t_gpa  = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
5953 
5954 	return t_gpa;
5955 }
5956 
kvm_mmu_gva_to_gpa_read(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)5957 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
5958 			      struct x86_exception *exception)
5959 {
5960 	u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5961 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
5962 }
5963 
kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)5964  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
5965 				struct x86_exception *exception)
5966 {
5967 	u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5968 	access |= PFERR_FETCH_MASK;
5969 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
5970 }
5971 
kvm_mmu_gva_to_gpa_write(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)5972 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
5973 			       struct x86_exception *exception)
5974 {
5975 	u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5976 	access |= PFERR_WRITE_MASK;
5977 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
5978 }
5979 
5980 /* uses this to access any guest's mapped memory without checking CPL */
kvm_mmu_gva_to_gpa_system(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)5981 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
5982 				struct x86_exception *exception)
5983 {
5984 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
5985 }
5986 
kvm_read_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u32 access,struct x86_exception * exception)5987 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
5988 				      struct kvm_vcpu *vcpu, u32 access,
5989 				      struct x86_exception *exception)
5990 {
5991 	void *data = val;
5992 	int r = X86EMUL_CONTINUE;
5993 
5994 	while (bytes) {
5995 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
5996 							    exception);
5997 		unsigned offset = addr & (PAGE_SIZE-1);
5998 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
5999 		int ret;
6000 
6001 		if (gpa == UNMAPPED_GVA)
6002 			return X86EMUL_PROPAGATE_FAULT;
6003 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
6004 					       offset, toread);
6005 		if (ret < 0) {
6006 			r = X86EMUL_IO_NEEDED;
6007 			goto out;
6008 		}
6009 
6010 		bytes -= toread;
6011 		data += toread;
6012 		addr += toread;
6013 	}
6014 out:
6015 	return r;
6016 }
6017 
6018 /* used for instruction fetching */
kvm_fetch_guest_virt(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)6019 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
6020 				gva_t addr, void *val, unsigned int bytes,
6021 				struct x86_exception *exception)
6022 {
6023 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6024 	u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6025 	unsigned offset;
6026 	int ret;
6027 
6028 	/* Inline kvm_read_guest_virt_helper for speed.  */
6029 	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
6030 						    exception);
6031 	if (unlikely(gpa == UNMAPPED_GVA))
6032 		return X86EMUL_PROPAGATE_FAULT;
6033 
6034 	offset = addr & (PAGE_SIZE-1);
6035 	if (WARN_ON(offset + bytes > PAGE_SIZE))
6036 		bytes = (unsigned)PAGE_SIZE - offset;
6037 	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
6038 				       offset, bytes);
6039 	if (unlikely(ret < 0))
6040 		return X86EMUL_IO_NEEDED;
6041 
6042 	return X86EMUL_CONTINUE;
6043 }
6044 
kvm_read_guest_virt(struct kvm_vcpu * vcpu,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)6045 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
6046 			       gva_t addr, void *val, unsigned int bytes,
6047 			       struct x86_exception *exception)
6048 {
6049 	u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6050 
6051 	/*
6052 	 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
6053 	 * is returned, but our callers are not ready for that and they blindly
6054 	 * call kvm_inject_page_fault.  Ensure that they at least do not leak
6055 	 * uninitialized kernel stack memory into cr2 and error code.
6056 	 */
6057 	memset(exception, 0, sizeof(*exception));
6058 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
6059 					  exception);
6060 }
6061 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
6062 
emulator_read_std(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception,bool system)6063 static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
6064 			     gva_t addr, void *val, unsigned int bytes,
6065 			     struct x86_exception *exception, bool system)
6066 {
6067 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6068 	u32 access = 0;
6069 
6070 	if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
6071 		access |= PFERR_USER_MASK;
6072 
6073 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
6074 }
6075 
kvm_read_guest_phys_system(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes)6076 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
6077 		unsigned long addr, void *val, unsigned int bytes)
6078 {
6079 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6080 	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
6081 
6082 	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
6083 }
6084 
kvm_write_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u32 access,struct x86_exception * exception)6085 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
6086 				      struct kvm_vcpu *vcpu, u32 access,
6087 				      struct x86_exception *exception)
6088 {
6089 	void *data = val;
6090 	int r = X86EMUL_CONTINUE;
6091 
6092 	while (bytes) {
6093 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
6094 							     access,
6095 							     exception);
6096 		unsigned offset = addr & (PAGE_SIZE-1);
6097 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
6098 		int ret;
6099 
6100 		if (gpa == UNMAPPED_GVA)
6101 			return X86EMUL_PROPAGATE_FAULT;
6102 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
6103 		if (ret < 0) {
6104 			r = X86EMUL_IO_NEEDED;
6105 			goto out;
6106 		}
6107 
6108 		bytes -= towrite;
6109 		data += towrite;
6110 		addr += towrite;
6111 	}
6112 out:
6113 	return r;
6114 }
6115 
emulator_write_std(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception,bool system)6116 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
6117 			      unsigned int bytes, struct x86_exception *exception,
6118 			      bool system)
6119 {
6120 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6121 	u32 access = PFERR_WRITE_MASK;
6122 
6123 	if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
6124 		access |= PFERR_USER_MASK;
6125 
6126 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
6127 					   access, exception);
6128 }
6129 
kvm_write_guest_virt_system(struct kvm_vcpu * vcpu,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)6130 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
6131 				unsigned int bytes, struct x86_exception *exception)
6132 {
6133 	/* kvm_write_guest_virt_system can pull in tons of pages. */
6134 	vcpu->arch.l1tf_flush_l1d = true;
6135 
6136 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
6137 					   PFERR_WRITE_MASK, exception);
6138 }
6139 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
6140 
handle_ud(struct kvm_vcpu * vcpu)6141 int handle_ud(struct kvm_vcpu *vcpu)
6142 {
6143 	static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
6144 	int emul_type = EMULTYPE_TRAP_UD;
6145 	char sig[5]; /* ud2; .ascii "kvm" */
6146 	struct x86_exception e;
6147 
6148 	if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0)))
6149 		return 1;
6150 
6151 	if (force_emulation_prefix &&
6152 	    kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
6153 				sig, sizeof(sig), &e) == 0 &&
6154 	    memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
6155 		kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
6156 		emul_type = EMULTYPE_TRAP_UD_FORCED;
6157 	}
6158 
6159 	return kvm_emulate_instruction(vcpu, emul_type);
6160 }
6161 EXPORT_SYMBOL_GPL(handle_ud);
6162 
vcpu_is_mmio_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t gpa,bool write)6163 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
6164 			    gpa_t gpa, bool write)
6165 {
6166 	/* For APIC access vmexit */
6167 	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
6168 		return 1;
6169 
6170 	if (vcpu_match_mmio_gpa(vcpu, gpa)) {
6171 		trace_vcpu_match_mmio(gva, gpa, write, true);
6172 		return 1;
6173 	}
6174 
6175 	return 0;
6176 }
6177 
vcpu_mmio_gva_to_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t * gpa,struct x86_exception * exception,bool write)6178 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
6179 				gpa_t *gpa, struct x86_exception *exception,
6180 				bool write)
6181 {
6182 	u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
6183 		| (write ? PFERR_WRITE_MASK : 0);
6184 
6185 	/*
6186 	 * currently PKRU is only applied to ept enabled guest so
6187 	 * there is no pkey in EPT page table for L1 guest or EPT
6188 	 * shadow page table for L2 guest.
6189 	 */
6190 	if (vcpu_match_mmio_gva(vcpu, gva)
6191 	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
6192 				 vcpu->arch.mmio_access, 0, access)) {
6193 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
6194 					(gva & (PAGE_SIZE - 1));
6195 		trace_vcpu_match_mmio(gva, *gpa, write, false);
6196 		return 1;
6197 	}
6198 
6199 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
6200 
6201 	if (*gpa == UNMAPPED_GVA)
6202 		return -1;
6203 
6204 	return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
6205 }
6206 
emulator_write_phys(struct kvm_vcpu * vcpu,gpa_t gpa,const void * val,int bytes)6207 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
6208 			const void *val, int bytes)
6209 {
6210 	int ret;
6211 
6212 	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
6213 	if (ret < 0)
6214 		return 0;
6215 	kvm_page_track_write(vcpu, gpa, val, bytes);
6216 	return 1;
6217 }
6218 
6219 struct read_write_emulator_ops {
6220 	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
6221 				  int bytes);
6222 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
6223 				  void *val, int bytes);
6224 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
6225 			       int bytes, void *val);
6226 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
6227 				    void *val, int bytes);
6228 	bool write;
6229 };
6230 
read_prepare(struct kvm_vcpu * vcpu,void * val,int bytes)6231 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
6232 {
6233 	if (vcpu->mmio_read_completed) {
6234 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
6235 			       vcpu->mmio_fragments[0].gpa, val);
6236 		vcpu->mmio_read_completed = 0;
6237 		return 1;
6238 	}
6239 
6240 	return 0;
6241 }
6242 
read_emulate(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)6243 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
6244 			void *val, int bytes)
6245 {
6246 	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
6247 }
6248 
write_emulate(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)6249 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
6250 			 void *val, int bytes)
6251 {
6252 	return emulator_write_phys(vcpu, gpa, val, bytes);
6253 }
6254 
write_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,int bytes,void * val)6255 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
6256 {
6257 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
6258 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
6259 }
6260 
read_exit_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)6261 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
6262 			  void *val, int bytes)
6263 {
6264 	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
6265 	return X86EMUL_IO_NEEDED;
6266 }
6267 
write_exit_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)6268 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
6269 			   void *val, int bytes)
6270 {
6271 	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
6272 
6273 	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
6274 	return X86EMUL_CONTINUE;
6275 }
6276 
6277 static const struct read_write_emulator_ops read_emultor = {
6278 	.read_write_prepare = read_prepare,
6279 	.read_write_emulate = read_emulate,
6280 	.read_write_mmio = vcpu_mmio_read,
6281 	.read_write_exit_mmio = read_exit_mmio,
6282 };
6283 
6284 static const struct read_write_emulator_ops write_emultor = {
6285 	.read_write_emulate = write_emulate,
6286 	.read_write_mmio = write_mmio,
6287 	.read_write_exit_mmio = write_exit_mmio,
6288 	.write = true,
6289 };
6290 
emulator_read_write_onepage(unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,struct kvm_vcpu * vcpu,const struct read_write_emulator_ops * ops)6291 static int emulator_read_write_onepage(unsigned long addr, void *val,
6292 				       unsigned int bytes,
6293 				       struct x86_exception *exception,
6294 				       struct kvm_vcpu *vcpu,
6295 				       const struct read_write_emulator_ops *ops)
6296 {
6297 	gpa_t gpa;
6298 	int handled, ret;
6299 	bool write = ops->write;
6300 	struct kvm_mmio_fragment *frag;
6301 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
6302 
6303 	/*
6304 	 * If the exit was due to a NPF we may already have a GPA.
6305 	 * If the GPA is present, use it to avoid the GVA to GPA table walk.
6306 	 * Note, this cannot be used on string operations since string
6307 	 * operation using rep will only have the initial GPA from the NPF
6308 	 * occurred.
6309 	 */
6310 	if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
6311 	    (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
6312 		gpa = ctxt->gpa_val;
6313 		ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
6314 	} else {
6315 		ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
6316 		if (ret < 0)
6317 			return X86EMUL_PROPAGATE_FAULT;
6318 	}
6319 
6320 	if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
6321 		return X86EMUL_CONTINUE;
6322 
6323 	/*
6324 	 * Is this MMIO handled locally?
6325 	 */
6326 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
6327 	if (handled == bytes)
6328 		return X86EMUL_CONTINUE;
6329 
6330 	gpa += handled;
6331 	bytes -= handled;
6332 	val += handled;
6333 
6334 	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
6335 	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
6336 	frag->gpa = gpa;
6337 	frag->data = val;
6338 	frag->len = bytes;
6339 	return X86EMUL_CONTINUE;
6340 }
6341 
emulator_read_write(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,const struct read_write_emulator_ops * ops)6342 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
6343 			unsigned long addr,
6344 			void *val, unsigned int bytes,
6345 			struct x86_exception *exception,
6346 			const struct read_write_emulator_ops *ops)
6347 {
6348 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6349 	gpa_t gpa;
6350 	int rc;
6351 
6352 	if (ops->read_write_prepare &&
6353 		  ops->read_write_prepare(vcpu, val, bytes))
6354 		return X86EMUL_CONTINUE;
6355 
6356 	vcpu->mmio_nr_fragments = 0;
6357 
6358 	/* Crossing a page boundary? */
6359 	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
6360 		int now;
6361 
6362 		now = -addr & ~PAGE_MASK;
6363 		rc = emulator_read_write_onepage(addr, val, now, exception,
6364 						 vcpu, ops);
6365 
6366 		if (rc != X86EMUL_CONTINUE)
6367 			return rc;
6368 		addr += now;
6369 		if (ctxt->mode != X86EMUL_MODE_PROT64)
6370 			addr = (u32)addr;
6371 		val += now;
6372 		bytes -= now;
6373 	}
6374 
6375 	rc = emulator_read_write_onepage(addr, val, bytes, exception,
6376 					 vcpu, ops);
6377 	if (rc != X86EMUL_CONTINUE)
6378 		return rc;
6379 
6380 	if (!vcpu->mmio_nr_fragments)
6381 		return rc;
6382 
6383 	gpa = vcpu->mmio_fragments[0].gpa;
6384 
6385 	vcpu->mmio_needed = 1;
6386 	vcpu->mmio_cur_fragment = 0;
6387 
6388 	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
6389 	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
6390 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
6391 	vcpu->run->mmio.phys_addr = gpa;
6392 
6393 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
6394 }
6395 
emulator_read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception)6396 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
6397 				  unsigned long addr,
6398 				  void *val,
6399 				  unsigned int bytes,
6400 				  struct x86_exception *exception)
6401 {
6402 	return emulator_read_write(ctxt, addr, val, bytes,
6403 				   exception, &read_emultor);
6404 }
6405 
emulator_write_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * val,unsigned int bytes,struct x86_exception * exception)6406 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
6407 			    unsigned long addr,
6408 			    const void *val,
6409 			    unsigned int bytes,
6410 			    struct x86_exception *exception)
6411 {
6412 	return emulator_read_write(ctxt, addr, (void *)val, bytes,
6413 				   exception, &write_emultor);
6414 }
6415 
6416 #define CMPXCHG_TYPE(t, ptr, old, new) \
6417 	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
6418 
6419 #ifdef CONFIG_X86_64
6420 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
6421 #else
6422 #  define CMPXCHG64(ptr, old, new) \
6423 	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
6424 #endif
6425 
emulator_cmpxchg_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * old,const void * new,unsigned int bytes,struct x86_exception * exception)6426 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
6427 				     unsigned long addr,
6428 				     const void *old,
6429 				     const void *new,
6430 				     unsigned int bytes,
6431 				     struct x86_exception *exception)
6432 {
6433 	struct kvm_host_map map;
6434 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6435 	u64 page_line_mask;
6436 	gpa_t gpa;
6437 	char *kaddr;
6438 	bool exchanged;
6439 
6440 	/* guests cmpxchg8b have to be emulated atomically */
6441 	if (bytes > 8 || (bytes & (bytes - 1)))
6442 		goto emul_write;
6443 
6444 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
6445 
6446 	if (gpa == UNMAPPED_GVA ||
6447 	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
6448 		goto emul_write;
6449 
6450 	/*
6451 	 * Emulate the atomic as a straight write to avoid #AC if SLD is
6452 	 * enabled in the host and the access splits a cache line.
6453 	 */
6454 	if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
6455 		page_line_mask = ~(cache_line_size() - 1);
6456 	else
6457 		page_line_mask = PAGE_MASK;
6458 
6459 	if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
6460 		goto emul_write;
6461 
6462 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
6463 		goto emul_write;
6464 
6465 	kaddr = map.hva + offset_in_page(gpa);
6466 
6467 	switch (bytes) {
6468 	case 1:
6469 		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
6470 		break;
6471 	case 2:
6472 		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
6473 		break;
6474 	case 4:
6475 		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
6476 		break;
6477 	case 8:
6478 		exchanged = CMPXCHG64(kaddr, old, new);
6479 		break;
6480 	default:
6481 		BUG();
6482 	}
6483 
6484 	kvm_vcpu_unmap(vcpu, &map, true);
6485 
6486 	if (!exchanged)
6487 		return X86EMUL_CMPXCHG_FAILED;
6488 
6489 	kvm_page_track_write(vcpu, gpa, new, bytes);
6490 
6491 	return X86EMUL_CONTINUE;
6492 
6493 emul_write:
6494 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
6495 
6496 	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
6497 }
6498 
kernel_pio(struct kvm_vcpu * vcpu,void * pd)6499 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
6500 {
6501 	int r = 0, i;
6502 
6503 	for (i = 0; i < vcpu->arch.pio.count; i++) {
6504 		if (vcpu->arch.pio.in)
6505 			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
6506 					    vcpu->arch.pio.size, pd);
6507 		else
6508 			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
6509 					     vcpu->arch.pio.port, vcpu->arch.pio.size,
6510 					     pd);
6511 		if (r)
6512 			break;
6513 		pd += vcpu->arch.pio.size;
6514 	}
6515 	return r;
6516 }
6517 
emulator_pio_in_out(struct kvm_vcpu * vcpu,int size,unsigned short port,void * val,unsigned int count,bool in)6518 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
6519 			       unsigned short port, void *val,
6520 			       unsigned int count, bool in)
6521 {
6522 	vcpu->arch.pio.port = port;
6523 	vcpu->arch.pio.in = in;
6524 	vcpu->arch.pio.count  = count;
6525 	vcpu->arch.pio.size = size;
6526 
6527 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
6528 		vcpu->arch.pio.count = 0;
6529 		return 1;
6530 	}
6531 
6532 	vcpu->run->exit_reason = KVM_EXIT_IO;
6533 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
6534 	vcpu->run->io.size = size;
6535 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
6536 	vcpu->run->io.count = count;
6537 	vcpu->run->io.port = port;
6538 
6539 	return 0;
6540 }
6541 
emulator_pio_in(struct kvm_vcpu * vcpu,int size,unsigned short port,void * val,unsigned int count)6542 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
6543 			   unsigned short port, void *val, unsigned int count)
6544 {
6545 	int ret;
6546 
6547 	if (vcpu->arch.pio.count)
6548 		goto data_avail;
6549 
6550 	memset(vcpu->arch.pio_data, 0, size * count);
6551 
6552 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
6553 	if (ret) {
6554 data_avail:
6555 		memcpy(val, vcpu->arch.pio_data, size * count);
6556 		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
6557 		vcpu->arch.pio.count = 0;
6558 		return 1;
6559 	}
6560 
6561 	return 0;
6562 }
6563 
emulator_pio_in_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,void * val,unsigned int count)6564 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
6565 				    int size, unsigned short port, void *val,
6566 				    unsigned int count)
6567 {
6568 	return emulator_pio_in(emul_to_vcpu(ctxt), size, port, val, count);
6569 
6570 }
6571 
emulator_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port,const void * val,unsigned int count)6572 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
6573 			    unsigned short port, const void *val,
6574 			    unsigned int count)
6575 {
6576 	memcpy(vcpu->arch.pio_data, val, size * count);
6577 	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
6578 	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
6579 }
6580 
emulator_pio_out_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,const void * val,unsigned int count)6581 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
6582 				     int size, unsigned short port,
6583 				     const void *val, unsigned int count)
6584 {
6585 	return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
6586 }
6587 
get_segment_base(struct kvm_vcpu * vcpu,int seg)6588 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
6589 {
6590 	return kvm_x86_ops.get_segment_base(vcpu, seg);
6591 }
6592 
emulator_invlpg(struct x86_emulate_ctxt * ctxt,ulong address)6593 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
6594 {
6595 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
6596 }
6597 
kvm_emulate_wbinvd_noskip(struct kvm_vcpu * vcpu)6598 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
6599 {
6600 	if (!need_emulate_wbinvd(vcpu))
6601 		return X86EMUL_CONTINUE;
6602 
6603 	if (kvm_x86_ops.has_wbinvd_exit()) {
6604 		int cpu = get_cpu();
6605 
6606 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
6607 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
6608 				wbinvd_ipi, NULL, 1);
6609 		put_cpu();
6610 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
6611 	} else
6612 		wbinvd();
6613 	return X86EMUL_CONTINUE;
6614 }
6615 
kvm_emulate_wbinvd(struct kvm_vcpu * vcpu)6616 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
6617 {
6618 	kvm_emulate_wbinvd_noskip(vcpu);
6619 	return kvm_skip_emulated_instruction(vcpu);
6620 }
6621 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
6622 
6623 
6624 
emulator_wbinvd(struct x86_emulate_ctxt * ctxt)6625 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
6626 {
6627 	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
6628 }
6629 
emulator_get_dr(struct x86_emulate_ctxt * ctxt,int dr,unsigned long * dest)6630 static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
6631 			   unsigned long *dest)
6632 {
6633 	return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
6634 }
6635 
emulator_set_dr(struct x86_emulate_ctxt * ctxt,int dr,unsigned long value)6636 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
6637 			   unsigned long value)
6638 {
6639 
6640 	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
6641 }
6642 
mk_cr_64(u64 curr_cr,u32 new_val)6643 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
6644 {
6645 	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
6646 }
6647 
emulator_get_cr(struct x86_emulate_ctxt * ctxt,int cr)6648 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
6649 {
6650 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6651 	unsigned long value;
6652 
6653 	switch (cr) {
6654 	case 0:
6655 		value = kvm_read_cr0(vcpu);
6656 		break;
6657 	case 2:
6658 		value = vcpu->arch.cr2;
6659 		break;
6660 	case 3:
6661 		value = kvm_read_cr3(vcpu);
6662 		break;
6663 	case 4:
6664 		value = kvm_read_cr4(vcpu);
6665 		break;
6666 	case 8:
6667 		value = kvm_get_cr8(vcpu);
6668 		break;
6669 	default:
6670 		kvm_err("%s: unexpected cr %u\n", __func__, cr);
6671 		return 0;
6672 	}
6673 
6674 	return value;
6675 }
6676 
emulator_set_cr(struct x86_emulate_ctxt * ctxt,int cr,ulong val)6677 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
6678 {
6679 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6680 	int res = 0;
6681 
6682 	switch (cr) {
6683 	case 0:
6684 		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
6685 		break;
6686 	case 2:
6687 		vcpu->arch.cr2 = val;
6688 		break;
6689 	case 3:
6690 		res = kvm_set_cr3(vcpu, val);
6691 		break;
6692 	case 4:
6693 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
6694 		break;
6695 	case 8:
6696 		res = kvm_set_cr8(vcpu, val);
6697 		break;
6698 	default:
6699 		kvm_err("%s: unexpected cr %u\n", __func__, cr);
6700 		res = -1;
6701 	}
6702 
6703 	return res;
6704 }
6705 
emulator_get_cpl(struct x86_emulate_ctxt * ctxt)6706 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
6707 {
6708 	return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt));
6709 }
6710 
emulator_get_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)6711 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
6712 {
6713 	kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt);
6714 }
6715 
emulator_get_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)6716 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
6717 {
6718 	kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt);
6719 }
6720 
emulator_set_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)6721 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
6722 {
6723 	kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt);
6724 }
6725 
emulator_set_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)6726 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
6727 {
6728 	kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt);
6729 }
6730 
emulator_get_cached_segment_base(struct x86_emulate_ctxt * ctxt,int seg)6731 static unsigned long emulator_get_cached_segment_base(
6732 	struct x86_emulate_ctxt *ctxt, int seg)
6733 {
6734 	return get_segment_base(emul_to_vcpu(ctxt), seg);
6735 }
6736 
emulator_get_segment(struct x86_emulate_ctxt * ctxt,u16 * selector,struct desc_struct * desc,u32 * base3,int seg)6737 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
6738 				 struct desc_struct *desc, u32 *base3,
6739 				 int seg)
6740 {
6741 	struct kvm_segment var;
6742 
6743 	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
6744 	*selector = var.selector;
6745 
6746 	if (var.unusable) {
6747 		memset(desc, 0, sizeof(*desc));
6748 		if (base3)
6749 			*base3 = 0;
6750 		return false;
6751 	}
6752 
6753 	if (var.g)
6754 		var.limit >>= 12;
6755 	set_desc_limit(desc, var.limit);
6756 	set_desc_base(desc, (unsigned long)var.base);
6757 #ifdef CONFIG_X86_64
6758 	if (base3)
6759 		*base3 = var.base >> 32;
6760 #endif
6761 	desc->type = var.type;
6762 	desc->s = var.s;
6763 	desc->dpl = var.dpl;
6764 	desc->p = var.present;
6765 	desc->avl = var.avl;
6766 	desc->l = var.l;
6767 	desc->d = var.db;
6768 	desc->g = var.g;
6769 
6770 	return true;
6771 }
6772 
emulator_set_segment(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,u32 base3,int seg)6773 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
6774 				 struct desc_struct *desc, u32 base3,
6775 				 int seg)
6776 {
6777 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6778 	struct kvm_segment var;
6779 
6780 	var.selector = selector;
6781 	var.base = get_desc_base(desc);
6782 #ifdef CONFIG_X86_64
6783 	var.base |= ((u64)base3) << 32;
6784 #endif
6785 	var.limit = get_desc_limit(desc);
6786 	if (desc->g)
6787 		var.limit = (var.limit << 12) | 0xfff;
6788 	var.type = desc->type;
6789 	var.dpl = desc->dpl;
6790 	var.db = desc->d;
6791 	var.s = desc->s;
6792 	var.l = desc->l;
6793 	var.g = desc->g;
6794 	var.avl = desc->avl;
6795 	var.present = desc->p;
6796 	var.unusable = !var.present;
6797 	var.padding = 0;
6798 
6799 	kvm_set_segment(vcpu, &var, seg);
6800 	return;
6801 }
6802 
emulator_get_msr(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 * pdata)6803 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
6804 			    u32 msr_index, u64 *pdata)
6805 {
6806 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6807 	int r;
6808 
6809 	r = kvm_get_msr(vcpu, msr_index, pdata);
6810 
6811 	if (r && kvm_get_msr_user_space(vcpu, msr_index, r)) {
6812 		/* Bounce to user space */
6813 		return X86EMUL_IO_NEEDED;
6814 	}
6815 
6816 	return r;
6817 }
6818 
emulator_set_msr(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 data)6819 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
6820 			    u32 msr_index, u64 data)
6821 {
6822 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6823 	int r;
6824 
6825 	r = kvm_set_msr(vcpu, msr_index, data);
6826 
6827 	if (r && kvm_set_msr_user_space(vcpu, msr_index, data, r)) {
6828 		/* Bounce to user space */
6829 		return X86EMUL_IO_NEEDED;
6830 	}
6831 
6832 	return r;
6833 }
6834 
emulator_get_smbase(struct x86_emulate_ctxt * ctxt)6835 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
6836 {
6837 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6838 
6839 	return vcpu->arch.smbase;
6840 }
6841 
emulator_set_smbase(struct x86_emulate_ctxt * ctxt,u64 smbase)6842 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
6843 {
6844 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6845 
6846 	vcpu->arch.smbase = smbase;
6847 }
6848 
emulator_check_pmc(struct x86_emulate_ctxt * ctxt,u32 pmc)6849 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
6850 			      u32 pmc)
6851 {
6852 	return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc);
6853 }
6854 
emulator_read_pmc(struct x86_emulate_ctxt * ctxt,u32 pmc,u64 * pdata)6855 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
6856 			     u32 pmc, u64 *pdata)
6857 {
6858 	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
6859 }
6860 
emulator_halt(struct x86_emulate_ctxt * ctxt)6861 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
6862 {
6863 	emul_to_vcpu(ctxt)->arch.halt_request = 1;
6864 }
6865 
emulator_intercept(struct x86_emulate_ctxt * ctxt,struct x86_instruction_info * info,enum x86_intercept_stage stage)6866 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
6867 			      struct x86_instruction_info *info,
6868 			      enum x86_intercept_stage stage)
6869 {
6870 	return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage,
6871 					    &ctxt->exception);
6872 }
6873 
emulator_get_cpuid(struct x86_emulate_ctxt * ctxt,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx,bool exact_only)6874 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
6875 			      u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
6876 			      bool exact_only)
6877 {
6878 	return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
6879 }
6880 
emulator_guest_has_long_mode(struct x86_emulate_ctxt * ctxt)6881 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt)
6882 {
6883 	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM);
6884 }
6885 
emulator_guest_has_movbe(struct x86_emulate_ctxt * ctxt)6886 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
6887 {
6888 	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
6889 }
6890 
emulator_guest_has_fxsr(struct x86_emulate_ctxt * ctxt)6891 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
6892 {
6893 	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
6894 }
6895 
emulator_read_gpr(struct x86_emulate_ctxt * ctxt,unsigned reg)6896 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
6897 {
6898 	return kvm_register_read(emul_to_vcpu(ctxt), reg);
6899 }
6900 
emulator_write_gpr(struct x86_emulate_ctxt * ctxt,unsigned reg,ulong val)6901 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
6902 {
6903 	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
6904 }
6905 
emulator_set_nmi_mask(struct x86_emulate_ctxt * ctxt,bool masked)6906 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
6907 {
6908 	kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked);
6909 }
6910 
emulator_get_hflags(struct x86_emulate_ctxt * ctxt)6911 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
6912 {
6913 	return emul_to_vcpu(ctxt)->arch.hflags;
6914 }
6915 
emulator_set_hflags(struct x86_emulate_ctxt * ctxt,unsigned emul_flags)6916 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
6917 {
6918 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6919 
6920 	vcpu->arch.hflags = emul_flags;
6921 	kvm_mmu_reset_context(vcpu);
6922 }
6923 
emulator_pre_leave_smm(struct x86_emulate_ctxt * ctxt,const char * smstate)6924 static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
6925 				  const char *smstate)
6926 {
6927 	return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate);
6928 }
6929 
emulator_post_leave_smm(struct x86_emulate_ctxt * ctxt)6930 static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
6931 {
6932 	kvm_smm_changed(emul_to_vcpu(ctxt));
6933 }
6934 
emulator_set_xcr(struct x86_emulate_ctxt * ctxt,u32 index,u64 xcr)6935 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
6936 {
6937 	return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
6938 }
6939 
6940 static const struct x86_emulate_ops emulate_ops = {
6941 	.read_gpr            = emulator_read_gpr,
6942 	.write_gpr           = emulator_write_gpr,
6943 	.read_std            = emulator_read_std,
6944 	.write_std           = emulator_write_std,
6945 	.read_phys           = kvm_read_guest_phys_system,
6946 	.fetch               = kvm_fetch_guest_virt,
6947 	.read_emulated       = emulator_read_emulated,
6948 	.write_emulated      = emulator_write_emulated,
6949 	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
6950 	.invlpg              = emulator_invlpg,
6951 	.pio_in_emulated     = emulator_pio_in_emulated,
6952 	.pio_out_emulated    = emulator_pio_out_emulated,
6953 	.get_segment         = emulator_get_segment,
6954 	.set_segment         = emulator_set_segment,
6955 	.get_cached_segment_base = emulator_get_cached_segment_base,
6956 	.get_gdt             = emulator_get_gdt,
6957 	.get_idt	     = emulator_get_idt,
6958 	.set_gdt             = emulator_set_gdt,
6959 	.set_idt	     = emulator_set_idt,
6960 	.get_cr              = emulator_get_cr,
6961 	.set_cr              = emulator_set_cr,
6962 	.cpl                 = emulator_get_cpl,
6963 	.get_dr              = emulator_get_dr,
6964 	.set_dr              = emulator_set_dr,
6965 	.get_smbase          = emulator_get_smbase,
6966 	.set_smbase          = emulator_set_smbase,
6967 	.set_msr             = emulator_set_msr,
6968 	.get_msr             = emulator_get_msr,
6969 	.check_pmc	     = emulator_check_pmc,
6970 	.read_pmc            = emulator_read_pmc,
6971 	.halt                = emulator_halt,
6972 	.wbinvd              = emulator_wbinvd,
6973 	.fix_hypercall       = emulator_fix_hypercall,
6974 	.intercept           = emulator_intercept,
6975 	.get_cpuid           = emulator_get_cpuid,
6976 	.guest_has_long_mode = emulator_guest_has_long_mode,
6977 	.guest_has_movbe     = emulator_guest_has_movbe,
6978 	.guest_has_fxsr      = emulator_guest_has_fxsr,
6979 	.set_nmi_mask        = emulator_set_nmi_mask,
6980 	.get_hflags          = emulator_get_hflags,
6981 	.set_hflags          = emulator_set_hflags,
6982 	.pre_leave_smm       = emulator_pre_leave_smm,
6983 	.post_leave_smm      = emulator_post_leave_smm,
6984 	.set_xcr             = emulator_set_xcr,
6985 };
6986 
toggle_interruptibility(struct kvm_vcpu * vcpu,u32 mask)6987 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
6988 {
6989 	u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
6990 	/*
6991 	 * an sti; sti; sequence only disable interrupts for the first
6992 	 * instruction. So, if the last instruction, be it emulated or
6993 	 * not, left the system with the INT_STI flag enabled, it
6994 	 * means that the last instruction is an sti. We should not
6995 	 * leave the flag on in this case. The same goes for mov ss
6996 	 */
6997 	if (int_shadow & mask)
6998 		mask = 0;
6999 	if (unlikely(int_shadow || mask)) {
7000 		kvm_x86_ops.set_interrupt_shadow(vcpu, mask);
7001 		if (!mask)
7002 			kvm_make_request(KVM_REQ_EVENT, vcpu);
7003 	}
7004 }
7005 
inject_emulated_exception(struct kvm_vcpu * vcpu)7006 static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
7007 {
7008 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7009 	if (ctxt->exception.vector == PF_VECTOR)
7010 		return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
7011 
7012 	if (ctxt->exception.error_code_valid)
7013 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
7014 				      ctxt->exception.error_code);
7015 	else
7016 		kvm_queue_exception(vcpu, ctxt->exception.vector);
7017 	return false;
7018 }
7019 
alloc_emulate_ctxt(struct kvm_vcpu * vcpu)7020 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
7021 {
7022 	struct x86_emulate_ctxt *ctxt;
7023 
7024 	ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
7025 	if (!ctxt) {
7026 		pr_err("kvm: failed to allocate vcpu's emulator\n");
7027 		return NULL;
7028 	}
7029 
7030 	ctxt->vcpu = vcpu;
7031 	ctxt->ops = &emulate_ops;
7032 	vcpu->arch.emulate_ctxt = ctxt;
7033 
7034 	return ctxt;
7035 }
7036 
init_emulate_ctxt(struct kvm_vcpu * vcpu)7037 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
7038 {
7039 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7040 	int cs_db, cs_l;
7041 
7042 	kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
7043 
7044 	ctxt->gpa_available = false;
7045 	ctxt->eflags = kvm_get_rflags(vcpu);
7046 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
7047 
7048 	ctxt->eip = kvm_rip_read(vcpu);
7049 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
7050 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
7051 		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
7052 		     cs_db				? X86EMUL_MODE_PROT32 :
7053 							  X86EMUL_MODE_PROT16;
7054 	BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
7055 	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
7056 	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
7057 
7058 	ctxt->interruptibility = 0;
7059 	ctxt->have_exception = false;
7060 	ctxt->exception.vector = -1;
7061 	ctxt->perm_ok = false;
7062 
7063 	init_decode_cache(ctxt);
7064 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
7065 }
7066 
kvm_inject_realmode_interrupt(struct kvm_vcpu * vcpu,int irq,int inc_eip)7067 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
7068 {
7069 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7070 	int ret;
7071 
7072 	init_emulate_ctxt(vcpu);
7073 
7074 	ctxt->op_bytes = 2;
7075 	ctxt->ad_bytes = 2;
7076 	ctxt->_eip = ctxt->eip + inc_eip;
7077 	ret = emulate_int_real(ctxt, irq);
7078 
7079 	if (ret != X86EMUL_CONTINUE) {
7080 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7081 	} else {
7082 		ctxt->eip = ctxt->_eip;
7083 		kvm_rip_write(vcpu, ctxt->eip);
7084 		kvm_set_rflags(vcpu, ctxt->eflags);
7085 	}
7086 }
7087 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
7088 
handle_emulation_failure(struct kvm_vcpu * vcpu,int emulation_type)7089 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
7090 {
7091 	++vcpu->stat.insn_emulation_fail;
7092 	trace_kvm_emulate_insn_failed(vcpu);
7093 
7094 	if (emulation_type & EMULTYPE_VMWARE_GP) {
7095 		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
7096 		return 1;
7097 	}
7098 
7099 	if (emulation_type & EMULTYPE_SKIP) {
7100 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7101 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7102 		vcpu->run->internal.ndata = 0;
7103 		return 0;
7104 	}
7105 
7106 	kvm_queue_exception(vcpu, UD_VECTOR);
7107 
7108 	if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) {
7109 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7110 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7111 		vcpu->run->internal.ndata = 0;
7112 		return 0;
7113 	}
7114 
7115 	return 1;
7116 }
7117 
reexecute_instruction(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,bool write_fault_to_shadow_pgtable,int emulation_type)7118 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
7119 				  bool write_fault_to_shadow_pgtable,
7120 				  int emulation_type)
7121 {
7122 	gpa_t gpa = cr2_or_gpa;
7123 	kvm_pfn_t pfn;
7124 
7125 	if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
7126 		return false;
7127 
7128 	if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
7129 	    WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
7130 		return false;
7131 
7132 	if (!vcpu->arch.mmu->direct_map) {
7133 		/*
7134 		 * Write permission should be allowed since only
7135 		 * write access need to be emulated.
7136 		 */
7137 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
7138 
7139 		/*
7140 		 * If the mapping is invalid in guest, let cpu retry
7141 		 * it to generate fault.
7142 		 */
7143 		if (gpa == UNMAPPED_GVA)
7144 			return true;
7145 	}
7146 
7147 	/*
7148 	 * Do not retry the unhandleable instruction if it faults on the
7149 	 * readonly host memory, otherwise it will goto a infinite loop:
7150 	 * retry instruction -> write #PF -> emulation fail -> retry
7151 	 * instruction -> ...
7152 	 */
7153 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
7154 
7155 	/*
7156 	 * If the instruction failed on the error pfn, it can not be fixed,
7157 	 * report the error to userspace.
7158 	 */
7159 	if (is_error_noslot_pfn(pfn))
7160 		return false;
7161 
7162 	kvm_release_pfn_clean(pfn);
7163 
7164 	/* The instructions are well-emulated on direct mmu. */
7165 	if (vcpu->arch.mmu->direct_map) {
7166 		unsigned int indirect_shadow_pages;
7167 
7168 		spin_lock(&vcpu->kvm->mmu_lock);
7169 		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
7170 		spin_unlock(&vcpu->kvm->mmu_lock);
7171 
7172 		if (indirect_shadow_pages)
7173 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
7174 
7175 		return true;
7176 	}
7177 
7178 	/*
7179 	 * if emulation was due to access to shadowed page table
7180 	 * and it failed try to unshadow page and re-enter the
7181 	 * guest to let CPU execute the instruction.
7182 	 */
7183 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
7184 
7185 	/*
7186 	 * If the access faults on its page table, it can not
7187 	 * be fixed by unprotecting shadow page and it should
7188 	 * be reported to userspace.
7189 	 */
7190 	return !write_fault_to_shadow_pgtable;
7191 }
7192 
retry_instruction(struct x86_emulate_ctxt * ctxt,gpa_t cr2_or_gpa,int emulation_type)7193 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
7194 			      gpa_t cr2_or_gpa,  int emulation_type)
7195 {
7196 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7197 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
7198 
7199 	last_retry_eip = vcpu->arch.last_retry_eip;
7200 	last_retry_addr = vcpu->arch.last_retry_addr;
7201 
7202 	/*
7203 	 * If the emulation is caused by #PF and it is non-page_table
7204 	 * writing instruction, it means the VM-EXIT is caused by shadow
7205 	 * page protected, we can zap the shadow page and retry this
7206 	 * instruction directly.
7207 	 *
7208 	 * Note: if the guest uses a non-page-table modifying instruction
7209 	 * on the PDE that points to the instruction, then we will unmap
7210 	 * the instruction and go to an infinite loop. So, we cache the
7211 	 * last retried eip and the last fault address, if we meet the eip
7212 	 * and the address again, we can break out of the potential infinite
7213 	 * loop.
7214 	 */
7215 	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
7216 
7217 	if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
7218 		return false;
7219 
7220 	if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
7221 	    WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
7222 		return false;
7223 
7224 	if (x86_page_table_writing_insn(ctxt))
7225 		return false;
7226 
7227 	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
7228 		return false;
7229 
7230 	vcpu->arch.last_retry_eip = ctxt->eip;
7231 	vcpu->arch.last_retry_addr = cr2_or_gpa;
7232 
7233 	if (!vcpu->arch.mmu->direct_map)
7234 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
7235 
7236 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
7237 
7238 	return true;
7239 }
7240 
7241 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
7242 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
7243 
kvm_smm_changed(struct kvm_vcpu * vcpu)7244 static void kvm_smm_changed(struct kvm_vcpu *vcpu)
7245 {
7246 	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
7247 		/* This is a good place to trace that we are exiting SMM.  */
7248 		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
7249 
7250 		/* Process a latched INIT or SMI, if any.  */
7251 		kvm_make_request(KVM_REQ_EVENT, vcpu);
7252 	}
7253 
7254 	kvm_mmu_reset_context(vcpu);
7255 }
7256 
kvm_vcpu_check_hw_bp(unsigned long addr,u32 type,u32 dr7,unsigned long * db)7257 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
7258 				unsigned long *db)
7259 {
7260 	u32 dr6 = 0;
7261 	int i;
7262 	u32 enable, rwlen;
7263 
7264 	enable = dr7;
7265 	rwlen = dr7 >> 16;
7266 	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
7267 		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
7268 			dr6 |= (1 << i);
7269 	return dr6;
7270 }
7271 
kvm_vcpu_do_singlestep(struct kvm_vcpu * vcpu)7272 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
7273 {
7274 	struct kvm_run *kvm_run = vcpu->run;
7275 
7276 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
7277 		kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
7278 		kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
7279 		kvm_run->debug.arch.exception = DB_VECTOR;
7280 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
7281 		return 0;
7282 	}
7283 	kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
7284 	return 1;
7285 }
7286 
kvm_skip_emulated_instruction(struct kvm_vcpu * vcpu)7287 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
7288 {
7289 	unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
7290 	int r;
7291 
7292 	r = kvm_x86_ops.skip_emulated_instruction(vcpu);
7293 	if (unlikely(!r))
7294 		return 0;
7295 
7296 	/*
7297 	 * rflags is the old, "raw" value of the flags.  The new value has
7298 	 * not been saved yet.
7299 	 *
7300 	 * This is correct even for TF set by the guest, because "the
7301 	 * processor will not generate this exception after the instruction
7302 	 * that sets the TF flag".
7303 	 */
7304 	if (unlikely(rflags & X86_EFLAGS_TF))
7305 		r = kvm_vcpu_do_singlestep(vcpu);
7306 	return r;
7307 }
7308 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
7309 
kvm_vcpu_check_breakpoint(struct kvm_vcpu * vcpu,int * r)7310 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
7311 {
7312 	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
7313 	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
7314 		struct kvm_run *kvm_run = vcpu->run;
7315 		unsigned long eip = kvm_get_linear_rip(vcpu);
7316 		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
7317 					   vcpu->arch.guest_debug_dr7,
7318 					   vcpu->arch.eff_db);
7319 
7320 		if (dr6 != 0) {
7321 			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
7322 			kvm_run->debug.arch.pc = eip;
7323 			kvm_run->debug.arch.exception = DB_VECTOR;
7324 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
7325 			*r = 0;
7326 			return true;
7327 		}
7328 	}
7329 
7330 	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
7331 	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
7332 		unsigned long eip = kvm_get_linear_rip(vcpu);
7333 		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
7334 					   vcpu->arch.dr7,
7335 					   vcpu->arch.db);
7336 
7337 		if (dr6 != 0) {
7338 			kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
7339 			*r = 1;
7340 			return true;
7341 		}
7342 	}
7343 
7344 	return false;
7345 }
7346 
is_vmware_backdoor_opcode(struct x86_emulate_ctxt * ctxt)7347 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
7348 {
7349 	switch (ctxt->opcode_len) {
7350 	case 1:
7351 		switch (ctxt->b) {
7352 		case 0xe4:	/* IN */
7353 		case 0xe5:
7354 		case 0xec:
7355 		case 0xed:
7356 		case 0xe6:	/* OUT */
7357 		case 0xe7:
7358 		case 0xee:
7359 		case 0xef:
7360 		case 0x6c:	/* INS */
7361 		case 0x6d:
7362 		case 0x6e:	/* OUTS */
7363 		case 0x6f:
7364 			return true;
7365 		}
7366 		break;
7367 	case 2:
7368 		switch (ctxt->b) {
7369 		case 0x33:	/* RDPMC */
7370 			return true;
7371 		}
7372 		break;
7373 	}
7374 
7375 	return false;
7376 }
7377 
7378 /*
7379  * Decode to be emulated instruction. Return EMULATION_OK if success.
7380  */
x86_decode_emulated_instruction(struct kvm_vcpu * vcpu,int emulation_type,void * insn,int insn_len)7381 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
7382 				    void *insn, int insn_len)
7383 {
7384 	int r = EMULATION_OK;
7385 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7386 
7387 	init_emulate_ctxt(vcpu);
7388 
7389 	/*
7390 	 * We will reenter on the same instruction since we do not set
7391 	 * complete_userspace_io. This does not handle watchpoints yet,
7392 	 * those would be handled in the emulate_ops.
7393 	 */
7394 	if (!(emulation_type & EMULTYPE_SKIP) &&
7395 	    kvm_vcpu_check_breakpoint(vcpu, &r))
7396 		return r;
7397 
7398 	ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
7399 
7400 	r = x86_decode_insn(ctxt, insn, insn_len);
7401 
7402 	trace_kvm_emulate_insn_start(vcpu);
7403 	++vcpu->stat.insn_emulation;
7404 
7405 	return r;
7406 }
7407 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
7408 
x86_emulate_instruction(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,int emulation_type,void * insn,int insn_len)7409 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
7410 			    int emulation_type, void *insn, int insn_len)
7411 {
7412 	int r;
7413 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7414 	bool writeback = true;
7415 	bool write_fault_to_spt;
7416 
7417 	if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len)))
7418 		return 1;
7419 
7420 	vcpu->arch.l1tf_flush_l1d = true;
7421 
7422 	/*
7423 	 * Clear write_fault_to_shadow_pgtable here to ensure it is
7424 	 * never reused.
7425 	 */
7426 	write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
7427 	vcpu->arch.write_fault_to_shadow_pgtable = false;
7428 
7429 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
7430 		kvm_clear_exception_queue(vcpu);
7431 
7432 		r = x86_decode_emulated_instruction(vcpu, emulation_type,
7433 						    insn, insn_len);
7434 		if (r != EMULATION_OK)  {
7435 			if ((emulation_type & EMULTYPE_TRAP_UD) ||
7436 			    (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
7437 				kvm_queue_exception(vcpu, UD_VECTOR);
7438 				return 1;
7439 			}
7440 			if (reexecute_instruction(vcpu, cr2_or_gpa,
7441 						  write_fault_to_spt,
7442 						  emulation_type))
7443 				return 1;
7444 			if (ctxt->have_exception) {
7445 				/*
7446 				 * #UD should result in just EMULATION_FAILED, and trap-like
7447 				 * exception should not be encountered during decode.
7448 				 */
7449 				WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
7450 					     exception_type(ctxt->exception.vector) == EXCPT_TRAP);
7451 				inject_emulated_exception(vcpu);
7452 				return 1;
7453 			}
7454 			return handle_emulation_failure(vcpu, emulation_type);
7455 		}
7456 	}
7457 
7458 	if ((emulation_type & EMULTYPE_VMWARE_GP) &&
7459 	    !is_vmware_backdoor_opcode(ctxt)) {
7460 		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
7461 		return 1;
7462 	}
7463 
7464 	/*
7465 	 * Note, EMULTYPE_SKIP is intended for use *only* by vendor callbacks
7466 	 * for kvm_skip_emulated_instruction().  The caller is responsible for
7467 	 * updating interruptibility state and injecting single-step #DBs.
7468 	 */
7469 	if (emulation_type & EMULTYPE_SKIP) {
7470 		kvm_rip_write(vcpu, ctxt->_eip);
7471 		if (ctxt->eflags & X86_EFLAGS_RF)
7472 			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
7473 		return 1;
7474 	}
7475 
7476 	if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
7477 		return 1;
7478 
7479 	/* this is needed for vmware backdoor interface to work since it
7480 	   changes registers values  during IO operation */
7481 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
7482 		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
7483 		emulator_invalidate_register_cache(ctxt);
7484 	}
7485 
7486 restart:
7487 	if (emulation_type & EMULTYPE_PF) {
7488 		/* Save the faulting GPA (cr2) in the address field */
7489 		ctxt->exception.address = cr2_or_gpa;
7490 
7491 		/* With shadow page tables, cr2 contains a GVA or nGPA. */
7492 		if (vcpu->arch.mmu->direct_map) {
7493 			ctxt->gpa_available = true;
7494 			ctxt->gpa_val = cr2_or_gpa;
7495 		}
7496 	} else {
7497 		/* Sanitize the address out of an abundance of paranoia. */
7498 		ctxt->exception.address = 0;
7499 	}
7500 
7501 	r = x86_emulate_insn(ctxt);
7502 
7503 	if (r == EMULATION_INTERCEPTED)
7504 		return 1;
7505 
7506 	if (r == EMULATION_FAILED) {
7507 		if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
7508 					emulation_type))
7509 			return 1;
7510 
7511 		return handle_emulation_failure(vcpu, emulation_type);
7512 	}
7513 
7514 	if (ctxt->have_exception) {
7515 		r = 1;
7516 		if (inject_emulated_exception(vcpu))
7517 			return r;
7518 	} else if (vcpu->arch.pio.count) {
7519 		if (!vcpu->arch.pio.in) {
7520 			/* FIXME: return into emulator if single-stepping.  */
7521 			vcpu->arch.pio.count = 0;
7522 		} else {
7523 			writeback = false;
7524 			vcpu->arch.complete_userspace_io = complete_emulated_pio;
7525 		}
7526 		r = 0;
7527 	} else if (vcpu->mmio_needed) {
7528 		++vcpu->stat.mmio_exits;
7529 
7530 		if (!vcpu->mmio_is_write)
7531 			writeback = false;
7532 		r = 0;
7533 		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
7534 	} else if (r == EMULATION_RESTART)
7535 		goto restart;
7536 	else
7537 		r = 1;
7538 
7539 	if (writeback) {
7540 		unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
7541 		toggle_interruptibility(vcpu, ctxt->interruptibility);
7542 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
7543 		if (!ctxt->have_exception ||
7544 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
7545 			kvm_rip_write(vcpu, ctxt->eip);
7546 			if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
7547 				r = kvm_vcpu_do_singlestep(vcpu);
7548 			if (kvm_x86_ops.update_emulated_instruction)
7549 				kvm_x86_ops.update_emulated_instruction(vcpu);
7550 			__kvm_set_rflags(vcpu, ctxt->eflags);
7551 		}
7552 
7553 		/*
7554 		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
7555 		 * do nothing, and it will be requested again as soon as
7556 		 * the shadow expires.  But we still need to check here,
7557 		 * because POPF has no interrupt shadow.
7558 		 */
7559 		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
7560 			kvm_make_request(KVM_REQ_EVENT, vcpu);
7561 	} else
7562 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
7563 
7564 	return r;
7565 }
7566 
kvm_emulate_instruction(struct kvm_vcpu * vcpu,int emulation_type)7567 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
7568 {
7569 	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
7570 }
7571 EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
7572 
kvm_emulate_instruction_from_buffer(struct kvm_vcpu * vcpu,void * insn,int insn_len)7573 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
7574 					void *insn, int insn_len)
7575 {
7576 	return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
7577 }
7578 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
7579 
complete_fast_pio_out_port_0x7e(struct kvm_vcpu * vcpu)7580 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
7581 {
7582 	vcpu->arch.pio.count = 0;
7583 	return 1;
7584 }
7585 
complete_fast_pio_out(struct kvm_vcpu * vcpu)7586 static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
7587 {
7588 	vcpu->arch.pio.count = 0;
7589 
7590 	if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
7591 		return 1;
7592 
7593 	return kvm_skip_emulated_instruction(vcpu);
7594 }
7595 
kvm_fast_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port)7596 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
7597 			    unsigned short port)
7598 {
7599 	unsigned long val = kvm_rax_read(vcpu);
7600 	int ret = emulator_pio_out(vcpu, size, port, &val, 1);
7601 
7602 	if (ret)
7603 		return ret;
7604 
7605 	/*
7606 	 * Workaround userspace that relies on old KVM behavior of %rip being
7607 	 * incremented prior to exiting to userspace to handle "OUT 0x7e".
7608 	 */
7609 	if (port == 0x7e &&
7610 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
7611 		vcpu->arch.complete_userspace_io =
7612 			complete_fast_pio_out_port_0x7e;
7613 		kvm_skip_emulated_instruction(vcpu);
7614 	} else {
7615 		vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
7616 		vcpu->arch.complete_userspace_io = complete_fast_pio_out;
7617 	}
7618 	return 0;
7619 }
7620 
complete_fast_pio_in(struct kvm_vcpu * vcpu)7621 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
7622 {
7623 	unsigned long val;
7624 
7625 	/* We should only ever be called with arch.pio.count equal to 1 */
7626 	BUG_ON(vcpu->arch.pio.count != 1);
7627 
7628 	if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
7629 		vcpu->arch.pio.count = 0;
7630 		return 1;
7631 	}
7632 
7633 	/* For size less than 4 we merge, else we zero extend */
7634 	val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
7635 
7636 	/*
7637 	 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform
7638 	 * the copy and tracing
7639 	 */
7640 	emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1);
7641 	kvm_rax_write(vcpu, val);
7642 
7643 	return kvm_skip_emulated_instruction(vcpu);
7644 }
7645 
kvm_fast_pio_in(struct kvm_vcpu * vcpu,int size,unsigned short port)7646 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
7647 			   unsigned short port)
7648 {
7649 	unsigned long val;
7650 	int ret;
7651 
7652 	/* For size less than 4 we merge, else we zero extend */
7653 	val = (size < 4) ? kvm_rax_read(vcpu) : 0;
7654 
7655 	ret = emulator_pio_in(vcpu, size, port, &val, 1);
7656 	if (ret) {
7657 		kvm_rax_write(vcpu, val);
7658 		return ret;
7659 	}
7660 
7661 	vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
7662 	vcpu->arch.complete_userspace_io = complete_fast_pio_in;
7663 
7664 	return 0;
7665 }
7666 
kvm_fast_pio(struct kvm_vcpu * vcpu,int size,unsigned short port,int in)7667 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
7668 {
7669 	int ret;
7670 
7671 	if (in)
7672 		ret = kvm_fast_pio_in(vcpu, size, port);
7673 	else
7674 		ret = kvm_fast_pio_out(vcpu, size, port);
7675 	return ret && kvm_skip_emulated_instruction(vcpu);
7676 }
7677 EXPORT_SYMBOL_GPL(kvm_fast_pio);
7678 
kvmclock_cpu_down_prep(unsigned int cpu)7679 static int kvmclock_cpu_down_prep(unsigned int cpu)
7680 {
7681 	__this_cpu_write(cpu_tsc_khz, 0);
7682 	return 0;
7683 }
7684 
tsc_khz_changed(void * data)7685 static void tsc_khz_changed(void *data)
7686 {
7687 	struct cpufreq_freqs *freq = data;
7688 	unsigned long khz = 0;
7689 
7690 	if (data)
7691 		khz = freq->new;
7692 	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
7693 		khz = cpufreq_quick_get(raw_smp_processor_id());
7694 	if (!khz)
7695 		khz = tsc_khz;
7696 	__this_cpu_write(cpu_tsc_khz, khz);
7697 }
7698 
7699 #ifdef CONFIG_X86_64
kvm_hyperv_tsc_notifier(void)7700 static void kvm_hyperv_tsc_notifier(void)
7701 {
7702 	struct kvm *kvm;
7703 	struct kvm_vcpu *vcpu;
7704 	int cpu;
7705 
7706 	mutex_lock(&kvm_lock);
7707 	list_for_each_entry(kvm, &vm_list, vm_list)
7708 		kvm_make_mclock_inprogress_request(kvm);
7709 
7710 	hyperv_stop_tsc_emulation();
7711 
7712 	/* TSC frequency always matches when on Hyper-V */
7713 	for_each_present_cpu(cpu)
7714 		per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
7715 	kvm_max_guest_tsc_khz = tsc_khz;
7716 
7717 	list_for_each_entry(kvm, &vm_list, vm_list) {
7718 		struct kvm_arch *ka = &kvm->arch;
7719 
7720 		spin_lock(&ka->pvclock_gtod_sync_lock);
7721 
7722 		pvclock_update_vm_gtod_copy(kvm);
7723 
7724 		kvm_for_each_vcpu(cpu, vcpu, kvm)
7725 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7726 
7727 		kvm_for_each_vcpu(cpu, vcpu, kvm)
7728 			kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
7729 
7730 		spin_unlock(&ka->pvclock_gtod_sync_lock);
7731 	}
7732 	mutex_unlock(&kvm_lock);
7733 }
7734 #endif
7735 
__kvmclock_cpufreq_notifier(struct cpufreq_freqs * freq,int cpu)7736 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
7737 {
7738 	struct kvm *kvm;
7739 	struct kvm_vcpu *vcpu;
7740 	int i, send_ipi = 0;
7741 
7742 	/*
7743 	 * We allow guests to temporarily run on slowing clocks,
7744 	 * provided we notify them after, or to run on accelerating
7745 	 * clocks, provided we notify them before.  Thus time never
7746 	 * goes backwards.
7747 	 *
7748 	 * However, we have a problem.  We can't atomically update
7749 	 * the frequency of a given CPU from this function; it is
7750 	 * merely a notifier, which can be called from any CPU.
7751 	 * Changing the TSC frequency at arbitrary points in time
7752 	 * requires a recomputation of local variables related to
7753 	 * the TSC for each VCPU.  We must flag these local variables
7754 	 * to be updated and be sure the update takes place with the
7755 	 * new frequency before any guests proceed.
7756 	 *
7757 	 * Unfortunately, the combination of hotplug CPU and frequency
7758 	 * change creates an intractable locking scenario; the order
7759 	 * of when these callouts happen is undefined with respect to
7760 	 * CPU hotplug, and they can race with each other.  As such,
7761 	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
7762 	 * undefined; you can actually have a CPU frequency change take
7763 	 * place in between the computation of X and the setting of the
7764 	 * variable.  To protect against this problem, all updates of
7765 	 * the per_cpu tsc_khz variable are done in an interrupt
7766 	 * protected IPI, and all callers wishing to update the value
7767 	 * must wait for a synchronous IPI to complete (which is trivial
7768 	 * if the caller is on the CPU already).  This establishes the
7769 	 * necessary total order on variable updates.
7770 	 *
7771 	 * Note that because a guest time update may take place
7772 	 * anytime after the setting of the VCPU's request bit, the
7773 	 * correct TSC value must be set before the request.  However,
7774 	 * to ensure the update actually makes it to any guest which
7775 	 * starts running in hardware virtualization between the set
7776 	 * and the acquisition of the spinlock, we must also ping the
7777 	 * CPU after setting the request bit.
7778 	 *
7779 	 */
7780 
7781 	smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
7782 
7783 	mutex_lock(&kvm_lock);
7784 	list_for_each_entry(kvm, &vm_list, vm_list) {
7785 		kvm_for_each_vcpu(i, vcpu, kvm) {
7786 			if (vcpu->cpu != cpu)
7787 				continue;
7788 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7789 			if (vcpu->cpu != raw_smp_processor_id())
7790 				send_ipi = 1;
7791 		}
7792 	}
7793 	mutex_unlock(&kvm_lock);
7794 
7795 	if (freq->old < freq->new && send_ipi) {
7796 		/*
7797 		 * We upscale the frequency.  Must make the guest
7798 		 * doesn't see old kvmclock values while running with
7799 		 * the new frequency, otherwise we risk the guest sees
7800 		 * time go backwards.
7801 		 *
7802 		 * In case we update the frequency for another cpu
7803 		 * (which might be in guest context) send an interrupt
7804 		 * to kick the cpu out of guest context.  Next time
7805 		 * guest context is entered kvmclock will be updated,
7806 		 * so the guest will not see stale values.
7807 		 */
7808 		smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
7809 	}
7810 }
7811 
kvmclock_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)7812 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
7813 				     void *data)
7814 {
7815 	struct cpufreq_freqs *freq = data;
7816 	int cpu;
7817 
7818 	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
7819 		return 0;
7820 	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
7821 		return 0;
7822 
7823 	for_each_cpu(cpu, freq->policy->cpus)
7824 		__kvmclock_cpufreq_notifier(freq, cpu);
7825 
7826 	return 0;
7827 }
7828 
7829 static struct notifier_block kvmclock_cpufreq_notifier_block = {
7830 	.notifier_call  = kvmclock_cpufreq_notifier
7831 };
7832 
kvmclock_cpu_online(unsigned int cpu)7833 static int kvmclock_cpu_online(unsigned int cpu)
7834 {
7835 	tsc_khz_changed(NULL);
7836 	return 0;
7837 }
7838 
kvm_timer_init(void)7839 static void kvm_timer_init(void)
7840 {
7841 	max_tsc_khz = tsc_khz;
7842 
7843 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
7844 #ifdef CONFIG_CPU_FREQ
7845 		struct cpufreq_policy *policy;
7846 		int cpu;
7847 
7848 		cpu = get_cpu();
7849 		policy = cpufreq_cpu_get(cpu);
7850 		if (policy) {
7851 			if (policy->cpuinfo.max_freq)
7852 				max_tsc_khz = policy->cpuinfo.max_freq;
7853 			cpufreq_cpu_put(policy);
7854 		}
7855 		put_cpu();
7856 #endif
7857 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
7858 					  CPUFREQ_TRANSITION_NOTIFIER);
7859 	}
7860 
7861 	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
7862 			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
7863 }
7864 
7865 DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
7866 EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
7867 
kvm_is_in_guest(void)7868 int kvm_is_in_guest(void)
7869 {
7870 	return __this_cpu_read(current_vcpu) != NULL;
7871 }
7872 
kvm_is_user_mode(void)7873 static int kvm_is_user_mode(void)
7874 {
7875 	int user_mode = 3;
7876 
7877 	if (__this_cpu_read(current_vcpu))
7878 		user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu));
7879 
7880 	return user_mode != 0;
7881 }
7882 
kvm_get_guest_ip(void)7883 static unsigned long kvm_get_guest_ip(void)
7884 {
7885 	unsigned long ip = 0;
7886 
7887 	if (__this_cpu_read(current_vcpu))
7888 		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
7889 
7890 	return ip;
7891 }
7892 
kvm_handle_intel_pt_intr(void)7893 static void kvm_handle_intel_pt_intr(void)
7894 {
7895 	struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
7896 
7897 	kvm_make_request(KVM_REQ_PMI, vcpu);
7898 	__set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
7899 			(unsigned long *)&vcpu->arch.pmu.global_status);
7900 }
7901 
7902 static struct perf_guest_info_callbacks kvm_guest_cbs = {
7903 	.is_in_guest		= kvm_is_in_guest,
7904 	.is_user_mode		= kvm_is_user_mode,
7905 	.get_guest_ip		= kvm_get_guest_ip,
7906 	.handle_intel_pt_intr	= NULL,
7907 };
7908 
7909 #ifdef CONFIG_X86_64
pvclock_gtod_update_fn(struct work_struct * work)7910 static void pvclock_gtod_update_fn(struct work_struct *work)
7911 {
7912 	struct kvm *kvm;
7913 
7914 	struct kvm_vcpu *vcpu;
7915 	int i;
7916 
7917 	mutex_lock(&kvm_lock);
7918 	list_for_each_entry(kvm, &vm_list, vm_list)
7919 		kvm_for_each_vcpu(i, vcpu, kvm)
7920 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7921 	atomic_set(&kvm_guest_has_master_clock, 0);
7922 	mutex_unlock(&kvm_lock);
7923 }
7924 
7925 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
7926 
7927 /*
7928  * Indirection to move queue_work() out of the tk_core.seq write held
7929  * region to prevent possible deadlocks against time accessors which
7930  * are invoked with work related locks held.
7931  */
pvclock_irq_work_fn(struct irq_work * w)7932 static void pvclock_irq_work_fn(struct irq_work *w)
7933 {
7934 	queue_work(system_long_wq, &pvclock_gtod_work);
7935 }
7936 
7937 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
7938 
7939 /*
7940  * Notification about pvclock gtod data update.
7941  */
pvclock_gtod_notify(struct notifier_block * nb,unsigned long unused,void * priv)7942 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
7943 			       void *priv)
7944 {
7945 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
7946 	struct timekeeper *tk = priv;
7947 
7948 	update_pvclock_gtod(tk);
7949 
7950 	/*
7951 	 * Disable master clock if host does not trust, or does not use,
7952 	 * TSC based clocksource. Delegate queue_work() to irq_work as
7953 	 * this is invoked with tk_core.seq write held.
7954 	 */
7955 	if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
7956 	    atomic_read(&kvm_guest_has_master_clock) != 0)
7957 		irq_work_queue(&pvclock_irq_work);
7958 	return 0;
7959 }
7960 
7961 static struct notifier_block pvclock_gtod_notifier = {
7962 	.notifier_call = pvclock_gtod_notify,
7963 };
7964 #endif
7965 
kvm_arch_init(void * opaque)7966 int kvm_arch_init(void *opaque)
7967 {
7968 	struct kvm_x86_init_ops *ops = opaque;
7969 	int r;
7970 
7971 	if (kvm_x86_ops.hardware_enable) {
7972 		printk(KERN_ERR "kvm: already loaded the other module\n");
7973 		r = -EEXIST;
7974 		goto out;
7975 	}
7976 
7977 	if (!ops->cpu_has_kvm_support()) {
7978 		pr_err_ratelimited("kvm: no hardware support\n");
7979 		r = -EOPNOTSUPP;
7980 		goto out;
7981 	}
7982 	if (ops->disabled_by_bios()) {
7983 		pr_err_ratelimited("kvm: disabled by bios\n");
7984 		r = -EOPNOTSUPP;
7985 		goto out;
7986 	}
7987 
7988 	/*
7989 	 * KVM explicitly assumes that the guest has an FPU and
7990 	 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
7991 	 * vCPU's FPU state as a fxregs_state struct.
7992 	 */
7993 	if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
7994 		printk(KERN_ERR "kvm: inadequate fpu\n");
7995 		r = -EOPNOTSUPP;
7996 		goto out;
7997 	}
7998 
7999 	r = -ENOMEM;
8000 	x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu),
8001 					  __alignof__(struct fpu), SLAB_ACCOUNT,
8002 					  NULL);
8003 	if (!x86_fpu_cache) {
8004 		printk(KERN_ERR "kvm: failed to allocate cache for x86 fpu\n");
8005 		goto out;
8006 	}
8007 
8008 	x86_emulator_cache = kvm_alloc_emulator_cache();
8009 	if (!x86_emulator_cache) {
8010 		pr_err("kvm: failed to allocate cache for x86 emulator\n");
8011 		goto out_free_x86_fpu_cache;
8012 	}
8013 
8014 	user_return_msrs = alloc_percpu(struct kvm_user_return_msrs);
8015 	if (!user_return_msrs) {
8016 		printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
8017 		goto out_free_x86_emulator_cache;
8018 	}
8019 
8020 	r = kvm_mmu_module_init();
8021 	if (r)
8022 		goto out_free_percpu;
8023 
8024 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
8025 			PT_DIRTY_MASK, PT64_NX_MASK, 0,
8026 			PT_PRESENT_MASK, 0, sme_me_mask);
8027 	kvm_timer_init();
8028 
8029 	if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
8030 		kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
8031 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
8032 
8033 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
8034 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
8035 		supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
8036 	}
8037 
8038 	kvm_lapic_init();
8039 	if (pi_inject_timer == -1)
8040 		pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER);
8041 #ifdef CONFIG_X86_64
8042 	pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
8043 
8044 	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
8045 		set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
8046 #endif
8047 
8048 	return 0;
8049 
8050 out_free_percpu:
8051 	free_percpu(user_return_msrs);
8052 out_free_x86_emulator_cache:
8053 	kmem_cache_destroy(x86_emulator_cache);
8054 out_free_x86_fpu_cache:
8055 	kmem_cache_destroy(x86_fpu_cache);
8056 out:
8057 	return r;
8058 }
8059 
kvm_arch_exit(void)8060 void kvm_arch_exit(void)
8061 {
8062 #ifdef CONFIG_X86_64
8063 	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
8064 		clear_hv_tscchange_cb();
8065 #endif
8066 	kvm_lapic_exit();
8067 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
8068 	kvm_guest_cbs.handle_intel_pt_intr = NULL;
8069 
8070 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
8071 		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
8072 					    CPUFREQ_TRANSITION_NOTIFIER);
8073 	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
8074 #ifdef CONFIG_X86_64
8075 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
8076 	irq_work_sync(&pvclock_irq_work);
8077 	cancel_work_sync(&pvclock_gtod_work);
8078 #endif
8079 	kvm_x86_ops.hardware_enable = NULL;
8080 	kvm_mmu_module_exit();
8081 	free_percpu(user_return_msrs);
8082 	kmem_cache_destroy(x86_emulator_cache);
8083 	kmem_cache_destroy(x86_fpu_cache);
8084 }
8085 
kvm_vcpu_halt(struct kvm_vcpu * vcpu)8086 int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
8087 {
8088 	++vcpu->stat.halt_exits;
8089 	if (lapic_in_kernel(vcpu)) {
8090 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8091 		return 1;
8092 	} else {
8093 		vcpu->run->exit_reason = KVM_EXIT_HLT;
8094 		return 0;
8095 	}
8096 }
8097 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
8098 
kvm_emulate_halt(struct kvm_vcpu * vcpu)8099 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
8100 {
8101 	int ret = kvm_skip_emulated_instruction(vcpu);
8102 	/*
8103 	 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
8104 	 * KVM_EXIT_DEBUG here.
8105 	 */
8106 	return kvm_vcpu_halt(vcpu) && ret;
8107 }
8108 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
8109 
8110 #ifdef CONFIG_X86_64
kvm_pv_clock_pairing(struct kvm_vcpu * vcpu,gpa_t paddr,unsigned long clock_type)8111 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
8112 			        unsigned long clock_type)
8113 {
8114 	struct kvm_clock_pairing clock_pairing;
8115 	struct timespec64 ts;
8116 	u64 cycle;
8117 	int ret;
8118 
8119 	if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
8120 		return -KVM_EOPNOTSUPP;
8121 
8122 	if (kvm_get_walltime_and_clockread(&ts, &cycle) == false)
8123 		return -KVM_EOPNOTSUPP;
8124 
8125 	clock_pairing.sec = ts.tv_sec;
8126 	clock_pairing.nsec = ts.tv_nsec;
8127 	clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
8128 	clock_pairing.flags = 0;
8129 	memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
8130 
8131 	ret = 0;
8132 	if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
8133 			    sizeof(struct kvm_clock_pairing)))
8134 		ret = -KVM_EFAULT;
8135 
8136 	return ret;
8137 }
8138 #endif
8139 
8140 /*
8141  * kvm_pv_kick_cpu_op:  Kick a vcpu.
8142  *
8143  * @apicid - apicid of vcpu to be kicked.
8144  */
kvm_pv_kick_cpu_op(struct kvm * kvm,unsigned long flags,int apicid)8145 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
8146 {
8147 	struct kvm_lapic_irq lapic_irq;
8148 
8149 	lapic_irq.shorthand = APIC_DEST_NOSHORT;
8150 	lapic_irq.dest_mode = APIC_DEST_PHYSICAL;
8151 	lapic_irq.level = 0;
8152 	lapic_irq.dest_id = apicid;
8153 	lapic_irq.msi_redir_hint = false;
8154 
8155 	lapic_irq.delivery_mode = APIC_DM_REMRD;
8156 	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
8157 }
8158 
kvm_apicv_activated(struct kvm * kvm)8159 bool kvm_apicv_activated(struct kvm *kvm)
8160 {
8161 	return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
8162 }
8163 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
8164 
kvm_apicv_init(struct kvm * kvm,bool enable)8165 void kvm_apicv_init(struct kvm *kvm, bool enable)
8166 {
8167 	if (enable)
8168 		clear_bit(APICV_INHIBIT_REASON_DISABLE,
8169 			  &kvm->arch.apicv_inhibit_reasons);
8170 	else
8171 		set_bit(APICV_INHIBIT_REASON_DISABLE,
8172 			&kvm->arch.apicv_inhibit_reasons);
8173 }
8174 EXPORT_SYMBOL_GPL(kvm_apicv_init);
8175 
kvm_sched_yield(struct kvm * kvm,unsigned long dest_id)8176 static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
8177 {
8178 	struct kvm_vcpu *target = NULL;
8179 	struct kvm_apic_map *map;
8180 
8181 	rcu_read_lock();
8182 	map = rcu_dereference(kvm->arch.apic_map);
8183 
8184 	if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
8185 		target = map->phys_map[dest_id]->vcpu;
8186 
8187 	rcu_read_unlock();
8188 
8189 	if (target && READ_ONCE(target->ready))
8190 		kvm_vcpu_yield_to(target);
8191 }
8192 
kvm_emulate_hypercall(struct kvm_vcpu * vcpu)8193 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
8194 {
8195 	unsigned long nr, a0, a1, a2, a3, ret;
8196 	int op_64_bit;
8197 
8198 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
8199 		return kvm_hv_hypercall(vcpu);
8200 
8201 	nr = kvm_rax_read(vcpu);
8202 	a0 = kvm_rbx_read(vcpu);
8203 	a1 = kvm_rcx_read(vcpu);
8204 	a2 = kvm_rdx_read(vcpu);
8205 	a3 = kvm_rsi_read(vcpu);
8206 
8207 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
8208 
8209 	op_64_bit = is_64_bit_mode(vcpu);
8210 	if (!op_64_bit) {
8211 		nr &= 0xFFFFFFFF;
8212 		a0 &= 0xFFFFFFFF;
8213 		a1 &= 0xFFFFFFFF;
8214 		a2 &= 0xFFFFFFFF;
8215 		a3 &= 0xFFFFFFFF;
8216 	}
8217 
8218 	if (kvm_x86_ops.get_cpl(vcpu) != 0) {
8219 		ret = -KVM_EPERM;
8220 		goto out;
8221 	}
8222 
8223 	ret = -KVM_ENOSYS;
8224 
8225 	switch (nr) {
8226 	case KVM_HC_VAPIC_POLL_IRQ:
8227 		ret = 0;
8228 		break;
8229 	case KVM_HC_KICK_CPU:
8230 		if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT))
8231 			break;
8232 
8233 		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
8234 		kvm_sched_yield(vcpu->kvm, a1);
8235 		ret = 0;
8236 		break;
8237 #ifdef CONFIG_X86_64
8238 	case KVM_HC_CLOCK_PAIRING:
8239 		ret = kvm_pv_clock_pairing(vcpu, a0, a1);
8240 		break;
8241 #endif
8242 	case KVM_HC_SEND_IPI:
8243 		if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI))
8244 			break;
8245 
8246 		ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
8247 		break;
8248 	case KVM_HC_SCHED_YIELD:
8249 		if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
8250 			break;
8251 
8252 		kvm_sched_yield(vcpu->kvm, a0);
8253 		ret = 0;
8254 		break;
8255 	default:
8256 		ret = -KVM_ENOSYS;
8257 		break;
8258 	}
8259 out:
8260 	if (!op_64_bit)
8261 		ret = (u32)ret;
8262 	kvm_rax_write(vcpu, ret);
8263 
8264 	++vcpu->stat.hypercalls;
8265 	return kvm_skip_emulated_instruction(vcpu);
8266 }
8267 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
8268 
emulator_fix_hypercall(struct x86_emulate_ctxt * ctxt)8269 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
8270 {
8271 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8272 	char instruction[3];
8273 	unsigned long rip = kvm_rip_read(vcpu);
8274 
8275 	kvm_x86_ops.patch_hypercall(vcpu, instruction);
8276 
8277 	return emulator_write_emulated(ctxt, rip, instruction, 3,
8278 		&ctxt->exception);
8279 }
8280 
dm_request_for_irq_injection(struct kvm_vcpu * vcpu)8281 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
8282 {
8283 	return vcpu->run->request_interrupt_window &&
8284 		likely(!pic_in_kernel(vcpu->kvm));
8285 }
8286 
post_kvm_run_save(struct kvm_vcpu * vcpu)8287 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
8288 {
8289 	struct kvm_run *kvm_run = vcpu->run;
8290 
8291 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
8292 	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
8293 	kvm_run->cr8 = kvm_get_cr8(vcpu);
8294 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
8295 	kvm_run->ready_for_interrupt_injection =
8296 		pic_in_kernel(vcpu->kvm) ||
8297 		kvm_vcpu_ready_for_interrupt_injection(vcpu);
8298 }
8299 
update_cr8_intercept(struct kvm_vcpu * vcpu)8300 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
8301 {
8302 	int max_irr, tpr;
8303 
8304 	if (!kvm_x86_ops.update_cr8_intercept)
8305 		return;
8306 
8307 	if (!lapic_in_kernel(vcpu))
8308 		return;
8309 
8310 	if (vcpu->arch.apicv_active)
8311 		return;
8312 
8313 	if (!vcpu->arch.apic->vapic_addr)
8314 		max_irr = kvm_lapic_find_highest_irr(vcpu);
8315 	else
8316 		max_irr = -1;
8317 
8318 	if (max_irr != -1)
8319 		max_irr >>= 4;
8320 
8321 	tpr = kvm_lapic_get_cr8(vcpu);
8322 
8323 	kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
8324 }
8325 
kvm_inject_exception(struct kvm_vcpu * vcpu)8326 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
8327 {
8328 	if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
8329 		vcpu->arch.exception.error_code = false;
8330 	kvm_x86_ops.queue_exception(vcpu);
8331 }
8332 
inject_pending_event(struct kvm_vcpu * vcpu,bool * req_immediate_exit)8333 static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
8334 {
8335 	int r;
8336 	bool can_inject = true;
8337 
8338 	/* try to reinject previous events if any */
8339 
8340 	if (vcpu->arch.exception.injected) {
8341 		kvm_inject_exception(vcpu);
8342 		can_inject = false;
8343 	}
8344 	/*
8345 	 * Do not inject an NMI or interrupt if there is a pending
8346 	 * exception.  Exceptions and interrupts are recognized at
8347 	 * instruction boundaries, i.e. the start of an instruction.
8348 	 * Trap-like exceptions, e.g. #DB, have higher priority than
8349 	 * NMIs and interrupts, i.e. traps are recognized before an
8350 	 * NMI/interrupt that's pending on the same instruction.
8351 	 * Fault-like exceptions, e.g. #GP and #PF, are the lowest
8352 	 * priority, but are only generated (pended) during instruction
8353 	 * execution, i.e. a pending fault-like exception means the
8354 	 * fault occurred on the *previous* instruction and must be
8355 	 * serviced prior to recognizing any new events in order to
8356 	 * fully complete the previous instruction.
8357 	 */
8358 	else if (!vcpu->arch.exception.pending) {
8359 		if (vcpu->arch.nmi_injected) {
8360 			kvm_x86_ops.set_nmi(vcpu);
8361 			can_inject = false;
8362 		} else if (vcpu->arch.interrupt.injected) {
8363 			kvm_x86_ops.set_irq(vcpu);
8364 			can_inject = false;
8365 		}
8366 	}
8367 
8368 	WARN_ON_ONCE(vcpu->arch.exception.injected &&
8369 		     vcpu->arch.exception.pending);
8370 
8371 	/*
8372 	 * Call check_nested_events() even if we reinjected a previous event
8373 	 * in order for caller to determine if it should require immediate-exit
8374 	 * from L2 to L1 due to pending L1 events which require exit
8375 	 * from L2 to L1.
8376 	 */
8377 	if (is_guest_mode(vcpu)) {
8378 		r = kvm_x86_ops.nested_ops->check_events(vcpu);
8379 		if (r < 0)
8380 			goto busy;
8381 	}
8382 
8383 	/* try to inject new event if pending */
8384 	if (vcpu->arch.exception.pending) {
8385 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
8386 					vcpu->arch.exception.has_error_code,
8387 					vcpu->arch.exception.error_code);
8388 
8389 		vcpu->arch.exception.pending = false;
8390 		vcpu->arch.exception.injected = true;
8391 
8392 		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
8393 			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
8394 					     X86_EFLAGS_RF);
8395 
8396 		if (vcpu->arch.exception.nr == DB_VECTOR) {
8397 			kvm_deliver_exception_payload(vcpu);
8398 			if (vcpu->arch.dr7 & DR7_GD) {
8399 				vcpu->arch.dr7 &= ~DR7_GD;
8400 				kvm_update_dr7(vcpu);
8401 			}
8402 		}
8403 
8404 		kvm_inject_exception(vcpu);
8405 		can_inject = false;
8406 	}
8407 
8408 	/*
8409 	 * Finally, inject interrupt events.  If an event cannot be injected
8410 	 * due to architectural conditions (e.g. IF=0) a window-open exit
8411 	 * will re-request KVM_REQ_EVENT.  Sometimes however an event is pending
8412 	 * and can architecturally be injected, but we cannot do it right now:
8413 	 * an interrupt could have arrived just now and we have to inject it
8414 	 * as a vmexit, or there could already an event in the queue, which is
8415 	 * indicated by can_inject.  In that case we request an immediate exit
8416 	 * in order to make progress and get back here for another iteration.
8417 	 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
8418 	 */
8419 	if (vcpu->arch.smi_pending) {
8420 		r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY;
8421 		if (r < 0)
8422 			goto busy;
8423 		if (r) {
8424 			vcpu->arch.smi_pending = false;
8425 			++vcpu->arch.smi_count;
8426 			enter_smm(vcpu);
8427 			can_inject = false;
8428 		} else
8429 			kvm_x86_ops.enable_smi_window(vcpu);
8430 	}
8431 
8432 	if (vcpu->arch.nmi_pending) {
8433 		r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY;
8434 		if (r < 0)
8435 			goto busy;
8436 		if (r) {
8437 			--vcpu->arch.nmi_pending;
8438 			vcpu->arch.nmi_injected = true;
8439 			kvm_x86_ops.set_nmi(vcpu);
8440 			can_inject = false;
8441 			WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0);
8442 		}
8443 		if (vcpu->arch.nmi_pending)
8444 			kvm_x86_ops.enable_nmi_window(vcpu);
8445 	}
8446 
8447 	if (kvm_cpu_has_injectable_intr(vcpu)) {
8448 		r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY;
8449 		if (r < 0)
8450 			goto busy;
8451 		if (r) {
8452 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
8453 			kvm_x86_ops.set_irq(vcpu);
8454 			WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0);
8455 		}
8456 		if (kvm_cpu_has_injectable_intr(vcpu))
8457 			kvm_x86_ops.enable_irq_window(vcpu);
8458 	}
8459 
8460 	if (is_guest_mode(vcpu) &&
8461 	    kvm_x86_ops.nested_ops->hv_timer_pending &&
8462 	    kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
8463 		*req_immediate_exit = true;
8464 
8465 	WARN_ON(vcpu->arch.exception.pending);
8466 	return;
8467 
8468 busy:
8469 	*req_immediate_exit = true;
8470 	return;
8471 }
8472 
process_nmi(struct kvm_vcpu * vcpu)8473 static void process_nmi(struct kvm_vcpu *vcpu)
8474 {
8475 	unsigned limit = 2;
8476 
8477 	/*
8478 	 * x86 is limited to one NMI running, and one NMI pending after it.
8479 	 * If an NMI is already in progress, limit further NMIs to just one.
8480 	 * Otherwise, allow two (and we'll inject the first one immediately).
8481 	 */
8482 	if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
8483 		limit = 1;
8484 
8485 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
8486 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
8487 	kvm_make_request(KVM_REQ_EVENT, vcpu);
8488 }
8489 
enter_smm_get_segment_flags(struct kvm_segment * seg)8490 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
8491 {
8492 	u32 flags = 0;
8493 	flags |= seg->g       << 23;
8494 	flags |= seg->db      << 22;
8495 	flags |= seg->l       << 21;
8496 	flags |= seg->avl     << 20;
8497 	flags |= seg->present << 15;
8498 	flags |= seg->dpl     << 13;
8499 	flags |= seg->s       << 12;
8500 	flags |= seg->type    << 8;
8501 	return flags;
8502 }
8503 
enter_smm_save_seg_32(struct kvm_vcpu * vcpu,char * buf,int n)8504 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
8505 {
8506 	struct kvm_segment seg;
8507 	int offset;
8508 
8509 	kvm_get_segment(vcpu, &seg, n);
8510 	put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
8511 
8512 	if (n < 3)
8513 		offset = 0x7f84 + n * 12;
8514 	else
8515 		offset = 0x7f2c + (n - 3) * 12;
8516 
8517 	put_smstate(u32, buf, offset + 8, seg.base);
8518 	put_smstate(u32, buf, offset + 4, seg.limit);
8519 	put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
8520 }
8521 
8522 #ifdef CONFIG_X86_64
enter_smm_save_seg_64(struct kvm_vcpu * vcpu,char * buf,int n)8523 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
8524 {
8525 	struct kvm_segment seg;
8526 	int offset;
8527 	u16 flags;
8528 
8529 	kvm_get_segment(vcpu, &seg, n);
8530 	offset = 0x7e00 + n * 16;
8531 
8532 	flags = enter_smm_get_segment_flags(&seg) >> 8;
8533 	put_smstate(u16, buf, offset, seg.selector);
8534 	put_smstate(u16, buf, offset + 2, flags);
8535 	put_smstate(u32, buf, offset + 4, seg.limit);
8536 	put_smstate(u64, buf, offset + 8, seg.base);
8537 }
8538 #endif
8539 
enter_smm_save_state_32(struct kvm_vcpu * vcpu,char * buf)8540 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
8541 {
8542 	struct desc_ptr dt;
8543 	struct kvm_segment seg;
8544 	unsigned long val;
8545 	int i;
8546 
8547 	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
8548 	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
8549 	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
8550 	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
8551 
8552 	for (i = 0; i < 8; i++)
8553 		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
8554 
8555 	kvm_get_dr(vcpu, 6, &val);
8556 	put_smstate(u32, buf, 0x7fcc, (u32)val);
8557 	kvm_get_dr(vcpu, 7, &val);
8558 	put_smstate(u32, buf, 0x7fc8, (u32)val);
8559 
8560 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
8561 	put_smstate(u32, buf, 0x7fc4, seg.selector);
8562 	put_smstate(u32, buf, 0x7f64, seg.base);
8563 	put_smstate(u32, buf, 0x7f60, seg.limit);
8564 	put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
8565 
8566 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
8567 	put_smstate(u32, buf, 0x7fc0, seg.selector);
8568 	put_smstate(u32, buf, 0x7f80, seg.base);
8569 	put_smstate(u32, buf, 0x7f7c, seg.limit);
8570 	put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
8571 
8572 	kvm_x86_ops.get_gdt(vcpu, &dt);
8573 	put_smstate(u32, buf, 0x7f74, dt.address);
8574 	put_smstate(u32, buf, 0x7f70, dt.size);
8575 
8576 	kvm_x86_ops.get_idt(vcpu, &dt);
8577 	put_smstate(u32, buf, 0x7f58, dt.address);
8578 	put_smstate(u32, buf, 0x7f54, dt.size);
8579 
8580 	for (i = 0; i < 6; i++)
8581 		enter_smm_save_seg_32(vcpu, buf, i);
8582 
8583 	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
8584 
8585 	/* revision id */
8586 	put_smstate(u32, buf, 0x7efc, 0x00020000);
8587 	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
8588 }
8589 
8590 #ifdef CONFIG_X86_64
enter_smm_save_state_64(struct kvm_vcpu * vcpu,char * buf)8591 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
8592 {
8593 	struct desc_ptr dt;
8594 	struct kvm_segment seg;
8595 	unsigned long val;
8596 	int i;
8597 
8598 	for (i = 0; i < 16; i++)
8599 		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
8600 
8601 	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
8602 	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
8603 
8604 	kvm_get_dr(vcpu, 6, &val);
8605 	put_smstate(u64, buf, 0x7f68, val);
8606 	kvm_get_dr(vcpu, 7, &val);
8607 	put_smstate(u64, buf, 0x7f60, val);
8608 
8609 	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
8610 	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
8611 	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
8612 
8613 	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
8614 
8615 	/* revision id */
8616 	put_smstate(u32, buf, 0x7efc, 0x00020064);
8617 
8618 	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
8619 
8620 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
8621 	put_smstate(u16, buf, 0x7e90, seg.selector);
8622 	put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
8623 	put_smstate(u32, buf, 0x7e94, seg.limit);
8624 	put_smstate(u64, buf, 0x7e98, seg.base);
8625 
8626 	kvm_x86_ops.get_idt(vcpu, &dt);
8627 	put_smstate(u32, buf, 0x7e84, dt.size);
8628 	put_smstate(u64, buf, 0x7e88, dt.address);
8629 
8630 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
8631 	put_smstate(u16, buf, 0x7e70, seg.selector);
8632 	put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
8633 	put_smstate(u32, buf, 0x7e74, seg.limit);
8634 	put_smstate(u64, buf, 0x7e78, seg.base);
8635 
8636 	kvm_x86_ops.get_gdt(vcpu, &dt);
8637 	put_smstate(u32, buf, 0x7e64, dt.size);
8638 	put_smstate(u64, buf, 0x7e68, dt.address);
8639 
8640 	for (i = 0; i < 6; i++)
8641 		enter_smm_save_seg_64(vcpu, buf, i);
8642 }
8643 #endif
8644 
enter_smm(struct kvm_vcpu * vcpu)8645 static void enter_smm(struct kvm_vcpu *vcpu)
8646 {
8647 	struct kvm_segment cs, ds;
8648 	struct desc_ptr dt;
8649 	char buf[512];
8650 	u32 cr0;
8651 
8652 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
8653 	memset(buf, 0, 512);
8654 #ifdef CONFIG_X86_64
8655 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
8656 		enter_smm_save_state_64(vcpu, buf);
8657 	else
8658 #endif
8659 		enter_smm_save_state_32(vcpu, buf);
8660 
8661 	/*
8662 	 * Give pre_enter_smm() a chance to make ISA-specific changes to the
8663 	 * vCPU state (e.g. leave guest mode) after we've saved the state into
8664 	 * the SMM state-save area.
8665 	 */
8666 	kvm_x86_ops.pre_enter_smm(vcpu, buf);
8667 
8668 	vcpu->arch.hflags |= HF_SMM_MASK;
8669 	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
8670 
8671 	if (kvm_x86_ops.get_nmi_mask(vcpu))
8672 		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
8673 	else
8674 		kvm_x86_ops.set_nmi_mask(vcpu, true);
8675 
8676 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
8677 	kvm_rip_write(vcpu, 0x8000);
8678 
8679 	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
8680 	kvm_x86_ops.set_cr0(vcpu, cr0);
8681 	vcpu->arch.cr0 = cr0;
8682 
8683 	kvm_x86_ops.set_cr4(vcpu, 0);
8684 
8685 	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
8686 	dt.address = dt.size = 0;
8687 	kvm_x86_ops.set_idt(vcpu, &dt);
8688 
8689 	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
8690 
8691 	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
8692 	cs.base = vcpu->arch.smbase;
8693 
8694 	ds.selector = 0;
8695 	ds.base = 0;
8696 
8697 	cs.limit    = ds.limit = 0xffffffff;
8698 	cs.type     = ds.type = 0x3;
8699 	cs.dpl      = ds.dpl = 0;
8700 	cs.db       = ds.db = 0;
8701 	cs.s        = ds.s = 1;
8702 	cs.l        = ds.l = 0;
8703 	cs.g        = ds.g = 1;
8704 	cs.avl      = ds.avl = 0;
8705 	cs.present  = ds.present = 1;
8706 	cs.unusable = ds.unusable = 0;
8707 	cs.padding  = ds.padding = 0;
8708 
8709 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
8710 	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
8711 	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
8712 	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
8713 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
8714 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
8715 
8716 #ifdef CONFIG_X86_64
8717 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
8718 		kvm_x86_ops.set_efer(vcpu, 0);
8719 #endif
8720 
8721 	kvm_update_cpuid_runtime(vcpu);
8722 	kvm_mmu_reset_context(vcpu);
8723 }
8724 
process_smi(struct kvm_vcpu * vcpu)8725 static void process_smi(struct kvm_vcpu *vcpu)
8726 {
8727 	vcpu->arch.smi_pending = true;
8728 	kvm_make_request(KVM_REQ_EVENT, vcpu);
8729 }
8730 
kvm_make_scan_ioapic_request_mask(struct kvm * kvm,unsigned long * vcpu_bitmap)8731 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
8732 				       unsigned long *vcpu_bitmap)
8733 {
8734 	cpumask_var_t cpus;
8735 
8736 	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
8737 
8738 	kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC,
8739 				    NULL, vcpu_bitmap, cpus);
8740 
8741 	free_cpumask_var(cpus);
8742 }
8743 
kvm_make_scan_ioapic_request(struct kvm * kvm)8744 void kvm_make_scan_ioapic_request(struct kvm *kvm)
8745 {
8746 	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
8747 }
8748 
kvm_vcpu_update_apicv(struct kvm_vcpu * vcpu)8749 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
8750 {
8751 	if (!lapic_in_kernel(vcpu))
8752 		return;
8753 
8754 	vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
8755 	kvm_apic_update_apicv(vcpu);
8756 	kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu);
8757 }
8758 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
8759 
8760 /*
8761  * NOTE: Do not hold any lock prior to calling this.
8762  *
8763  * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
8764  * locked, because it calls __x86_set_memory_region() which does
8765  * synchronize_srcu(&kvm->srcu).
8766  */
kvm_request_apicv_update(struct kvm * kvm,bool activate,ulong bit)8767 void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
8768 {
8769 	struct kvm_vcpu *except;
8770 	unsigned long old, new, expected;
8771 
8772 	if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
8773 	    !kvm_x86_ops.check_apicv_inhibit_reasons(bit))
8774 		return;
8775 
8776 	old = READ_ONCE(kvm->arch.apicv_inhibit_reasons);
8777 	do {
8778 		expected = new = old;
8779 		if (activate)
8780 			__clear_bit(bit, &new);
8781 		else
8782 			__set_bit(bit, &new);
8783 		if (new == old)
8784 			break;
8785 		old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new);
8786 	} while (old != expected);
8787 
8788 	if (!!old == !!new)
8789 		return;
8790 
8791 	trace_kvm_apicv_update_request(activate, bit);
8792 	if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
8793 		kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate);
8794 
8795 	/*
8796 	 * Sending request to update APICV for all other vcpus,
8797 	 * while update the calling vcpu immediately instead of
8798 	 * waiting for another #VMEXIT to handle the request.
8799 	 */
8800 	except = kvm_get_running_vcpu();
8801 	kvm_make_all_cpus_request_except(kvm, KVM_REQ_APICV_UPDATE,
8802 					 except);
8803 	if (except)
8804 		kvm_vcpu_update_apicv(except);
8805 }
8806 EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
8807 
vcpu_scan_ioapic(struct kvm_vcpu * vcpu)8808 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
8809 {
8810 	if (!kvm_apic_present(vcpu))
8811 		return;
8812 
8813 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
8814 
8815 	if (irqchip_split(vcpu->kvm))
8816 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
8817 	else {
8818 		if (vcpu->arch.apicv_active)
8819 			kvm_x86_ops.sync_pir_to_irr(vcpu);
8820 		if (ioapic_in_kernel(vcpu->kvm))
8821 			kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
8822 	}
8823 
8824 	if (is_guest_mode(vcpu))
8825 		vcpu->arch.load_eoi_exitmap_pending = true;
8826 	else
8827 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
8828 }
8829 
vcpu_load_eoi_exitmap(struct kvm_vcpu * vcpu)8830 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
8831 {
8832 	u64 eoi_exit_bitmap[4];
8833 
8834 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
8835 		return;
8836 
8837 	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
8838 		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
8839 	kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
8840 }
8841 
kvm_arch_mmu_notifier_invalidate_range(struct kvm * kvm,unsigned long start,unsigned long end)8842 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
8843 					    unsigned long start, unsigned long end)
8844 {
8845 	unsigned long apic_address;
8846 
8847 	/*
8848 	 * The physical address of apic access page is stored in the VMCS.
8849 	 * Update it when it becomes invalid.
8850 	 */
8851 	apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
8852 	if (start <= apic_address && apic_address < end)
8853 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
8854 }
8855 
kvm_vcpu_reload_apic_access_page(struct kvm_vcpu * vcpu)8856 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
8857 {
8858 	if (!lapic_in_kernel(vcpu))
8859 		return;
8860 
8861 	if (!kvm_x86_ops.set_apic_access_page_addr)
8862 		return;
8863 
8864 	kvm_x86_ops.set_apic_access_page_addr(vcpu);
8865 }
8866 
__kvm_request_immediate_exit(struct kvm_vcpu * vcpu)8867 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
8868 {
8869 	smp_send_reschedule(vcpu->cpu);
8870 }
8871 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
8872 
8873 /*
8874  * Returns 1 to let vcpu_run() continue the guest execution loop without
8875  * exiting to the userspace.  Otherwise, the value will be returned to the
8876  * userspace.
8877  */
vcpu_enter_guest(struct kvm_vcpu * vcpu)8878 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
8879 {
8880 	int r;
8881 	bool req_int_win =
8882 		dm_request_for_irq_injection(vcpu) &&
8883 		kvm_cpu_accept_dm_intr(vcpu);
8884 	fastpath_t exit_fastpath;
8885 
8886 	bool req_immediate_exit = false;
8887 
8888 	if (kvm_request_pending(vcpu)) {
8889 		if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
8890 			if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
8891 				r = 0;
8892 				goto out;
8893 			}
8894 		}
8895 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
8896 			kvm_mmu_unload(vcpu);
8897 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
8898 			__kvm_migrate_timers(vcpu);
8899 		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
8900 			kvm_gen_update_masterclock(vcpu->kvm);
8901 		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
8902 			kvm_gen_kvmclock_update(vcpu);
8903 		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
8904 			r = kvm_guest_time_update(vcpu);
8905 			if (unlikely(r))
8906 				goto out;
8907 		}
8908 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
8909 			kvm_mmu_sync_roots(vcpu);
8910 		if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
8911 			kvm_mmu_load_pgd(vcpu);
8912 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
8913 			kvm_vcpu_flush_tlb_all(vcpu);
8914 
8915 			/* Flushing all ASIDs flushes the current ASID... */
8916 			kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
8917 		}
8918 		if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
8919 			kvm_vcpu_flush_tlb_current(vcpu);
8920 		if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
8921 			kvm_vcpu_flush_tlb_guest(vcpu);
8922 
8923 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
8924 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
8925 			r = 0;
8926 			goto out;
8927 		}
8928 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
8929 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
8930 			vcpu->mmio_needed = 0;
8931 			r = 0;
8932 			goto out;
8933 		}
8934 		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
8935 			/* Page is swapped out. Do synthetic halt */
8936 			vcpu->arch.apf.halted = true;
8937 			r = 1;
8938 			goto out;
8939 		}
8940 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
8941 			record_steal_time(vcpu);
8942 		if (kvm_check_request(KVM_REQ_SMI, vcpu))
8943 			process_smi(vcpu);
8944 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
8945 			process_nmi(vcpu);
8946 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
8947 			kvm_pmu_handle_event(vcpu);
8948 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
8949 			kvm_pmu_deliver_pmi(vcpu);
8950 		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
8951 			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
8952 			if (test_bit(vcpu->arch.pending_ioapic_eoi,
8953 				     vcpu->arch.ioapic_handled_vectors)) {
8954 				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
8955 				vcpu->run->eoi.vector =
8956 						vcpu->arch.pending_ioapic_eoi;
8957 				r = 0;
8958 				goto out;
8959 			}
8960 		}
8961 		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
8962 			vcpu_scan_ioapic(vcpu);
8963 		if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
8964 			vcpu_load_eoi_exitmap(vcpu);
8965 		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
8966 			kvm_vcpu_reload_apic_access_page(vcpu);
8967 		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
8968 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
8969 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
8970 			r = 0;
8971 			goto out;
8972 		}
8973 		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
8974 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
8975 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
8976 			r = 0;
8977 			goto out;
8978 		}
8979 		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
8980 			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
8981 			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
8982 			r = 0;
8983 			goto out;
8984 		}
8985 
8986 		/*
8987 		 * KVM_REQ_HV_STIMER has to be processed after
8988 		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
8989 		 * depend on the guest clock being up-to-date
8990 		 */
8991 		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
8992 			kvm_hv_process_stimers(vcpu);
8993 		if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
8994 			kvm_vcpu_update_apicv(vcpu);
8995 		if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
8996 			kvm_check_async_pf_completion(vcpu);
8997 		if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
8998 			kvm_x86_ops.msr_filter_changed(vcpu);
8999 	}
9000 
9001 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
9002 		++vcpu->stat.req_event;
9003 		kvm_apic_accept_events(vcpu);
9004 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
9005 			r = 1;
9006 			goto out;
9007 		}
9008 
9009 		inject_pending_event(vcpu, &req_immediate_exit);
9010 		if (req_int_win)
9011 			kvm_x86_ops.enable_irq_window(vcpu);
9012 
9013 		if (kvm_lapic_enabled(vcpu)) {
9014 			update_cr8_intercept(vcpu);
9015 			kvm_lapic_sync_to_vapic(vcpu);
9016 		}
9017 	}
9018 
9019 	r = kvm_mmu_reload(vcpu);
9020 	if (unlikely(r)) {
9021 		goto cancel_injection;
9022 	}
9023 
9024 	preempt_disable();
9025 
9026 	kvm_x86_ops.prepare_guest_switch(vcpu);
9027 
9028 	/*
9029 	 * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
9030 	 * IPI are then delayed after guest entry, which ensures that they
9031 	 * result in virtual interrupt delivery.
9032 	 */
9033 	local_irq_disable();
9034 	vcpu->mode = IN_GUEST_MODE;
9035 
9036 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
9037 
9038 	/*
9039 	 * 1) We should set ->mode before checking ->requests.  Please see
9040 	 * the comment in kvm_vcpu_exiting_guest_mode().
9041 	 *
9042 	 * 2) For APICv, we should set ->mode before checking PID.ON. This
9043 	 * pairs with the memory barrier implicit in pi_test_and_set_on
9044 	 * (see vmx_deliver_posted_interrupt).
9045 	 *
9046 	 * 3) This also orders the write to mode from any reads to the page
9047 	 * tables done while the VCPU is running.  Please see the comment
9048 	 * in kvm_flush_remote_tlbs.
9049 	 */
9050 	smp_mb__after_srcu_read_unlock();
9051 
9052 	/*
9053 	 * This handles the case where a posted interrupt was
9054 	 * notified with kvm_vcpu_kick.
9055 	 */
9056 	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
9057 		kvm_x86_ops.sync_pir_to_irr(vcpu);
9058 
9059 	if (kvm_vcpu_exit_request(vcpu)) {
9060 		vcpu->mode = OUTSIDE_GUEST_MODE;
9061 		smp_wmb();
9062 		local_irq_enable();
9063 		preempt_enable();
9064 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
9065 		r = 1;
9066 		goto cancel_injection;
9067 	}
9068 
9069 	if (req_immediate_exit) {
9070 		kvm_make_request(KVM_REQ_EVENT, vcpu);
9071 		kvm_x86_ops.request_immediate_exit(vcpu);
9072 	}
9073 
9074 	trace_kvm_entry(vcpu);
9075 
9076 	fpregs_assert_state_consistent();
9077 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
9078 		switch_fpu_return();
9079 
9080 	if (unlikely(vcpu->arch.switch_db_regs)) {
9081 		set_debugreg(0, 7);
9082 		set_debugreg(vcpu->arch.eff_db[0], 0);
9083 		set_debugreg(vcpu->arch.eff_db[1], 1);
9084 		set_debugreg(vcpu->arch.eff_db[2], 2);
9085 		set_debugreg(vcpu->arch.eff_db[3], 3);
9086 		set_debugreg(vcpu->arch.dr6, 6);
9087 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
9088 	} else if (unlikely(hw_breakpoint_active())) {
9089 		set_debugreg(0, 7);
9090 	}
9091 
9092 	exit_fastpath = kvm_x86_ops.run(vcpu);
9093 
9094 	/*
9095 	 * Do this here before restoring debug registers on the host.  And
9096 	 * since we do this before handling the vmexit, a DR access vmexit
9097 	 * can (a) read the correct value of the debug registers, (b) set
9098 	 * KVM_DEBUGREG_WONT_EXIT again.
9099 	 */
9100 	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
9101 		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
9102 		kvm_x86_ops.sync_dirty_debug_regs(vcpu);
9103 		kvm_update_dr0123(vcpu);
9104 		kvm_update_dr7(vcpu);
9105 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
9106 	}
9107 
9108 	/*
9109 	 * If the guest has used debug registers, at least dr7
9110 	 * will be disabled while returning to the host.
9111 	 * If we don't have active breakpoints in the host, we don't
9112 	 * care about the messed up debug address registers. But if
9113 	 * we have some of them active, restore the old state.
9114 	 */
9115 	if (hw_breakpoint_active())
9116 		hw_breakpoint_restore();
9117 
9118 	vcpu->arch.last_vmentry_cpu = vcpu->cpu;
9119 	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
9120 
9121 	vcpu->mode = OUTSIDE_GUEST_MODE;
9122 	smp_wmb();
9123 
9124 	kvm_x86_ops.handle_exit_irqoff(vcpu);
9125 
9126 	/*
9127 	 * Consume any pending interrupts, including the possible source of
9128 	 * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
9129 	 * An instruction is required after local_irq_enable() to fully unblock
9130 	 * interrupts on processors that implement an interrupt shadow, the
9131 	 * stat.exits increment will do nicely.
9132 	 */
9133 	kvm_before_interrupt(vcpu);
9134 	local_irq_enable();
9135 	++vcpu->stat.exits;
9136 	local_irq_disable();
9137 	kvm_after_interrupt(vcpu);
9138 
9139 	/*
9140 	 * Wait until after servicing IRQs to account guest time so that any
9141 	 * ticks that occurred while running the guest are properly accounted
9142 	 * to the guest.  Waiting until IRQs are enabled degrades the accuracy
9143 	 * of accounting via context tracking, but the loss of accuracy is
9144 	 * acceptable for all known use cases.
9145 	 */
9146 	vtime_account_guest_exit();
9147 
9148 	if (lapic_in_kernel(vcpu)) {
9149 		s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
9150 		if (delta != S64_MIN) {
9151 			trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta);
9152 			vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
9153 		}
9154 	}
9155 
9156 	local_irq_enable();
9157 	preempt_enable();
9158 
9159 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
9160 
9161 	/*
9162 	 * Profile KVM exit RIPs:
9163 	 */
9164 	if (unlikely(prof_on == KVM_PROFILING)) {
9165 		unsigned long rip = kvm_rip_read(vcpu);
9166 		profile_hit(KVM_PROFILING, (void *)rip);
9167 	}
9168 
9169 	if (unlikely(vcpu->arch.tsc_always_catchup))
9170 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
9171 
9172 	if (vcpu->arch.apic_attention)
9173 		kvm_lapic_sync_from_vapic(vcpu);
9174 
9175 	r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath);
9176 	return r;
9177 
9178 cancel_injection:
9179 	if (req_immediate_exit)
9180 		kvm_make_request(KVM_REQ_EVENT, vcpu);
9181 	kvm_x86_ops.cancel_injection(vcpu);
9182 	if (unlikely(vcpu->arch.apic_attention))
9183 		kvm_lapic_sync_from_vapic(vcpu);
9184 out:
9185 	return r;
9186 }
9187 
vcpu_block(struct kvm * kvm,struct kvm_vcpu * vcpu)9188 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
9189 {
9190 	if (!kvm_arch_vcpu_runnable(vcpu) &&
9191 	    (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) {
9192 		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
9193 		kvm_vcpu_block(vcpu);
9194 		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
9195 
9196 		if (kvm_x86_ops.post_block)
9197 			kvm_x86_ops.post_block(vcpu);
9198 
9199 		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
9200 			return 1;
9201 	}
9202 
9203 	kvm_apic_accept_events(vcpu);
9204 	switch(vcpu->arch.mp_state) {
9205 	case KVM_MP_STATE_HALTED:
9206 		vcpu->arch.pv.pv_unhalted = false;
9207 		vcpu->arch.mp_state =
9208 			KVM_MP_STATE_RUNNABLE;
9209 		fallthrough;
9210 	case KVM_MP_STATE_RUNNABLE:
9211 		vcpu->arch.apf.halted = false;
9212 		break;
9213 	case KVM_MP_STATE_INIT_RECEIVED:
9214 		break;
9215 	default:
9216 		return -EINTR;
9217 	}
9218 	return 1;
9219 }
9220 
kvm_vcpu_running(struct kvm_vcpu * vcpu)9221 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
9222 {
9223 	if (is_guest_mode(vcpu))
9224 		kvm_x86_ops.nested_ops->check_events(vcpu);
9225 
9226 	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
9227 		!vcpu->arch.apf.halted);
9228 }
9229 
vcpu_run(struct kvm_vcpu * vcpu)9230 static int vcpu_run(struct kvm_vcpu *vcpu)
9231 {
9232 	int r;
9233 	struct kvm *kvm = vcpu->kvm;
9234 
9235 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
9236 	vcpu->arch.l1tf_flush_l1d = true;
9237 
9238 	for (;;) {
9239 		/*
9240 		 * If another guest vCPU requests a PV TLB flush in the middle
9241 		 * of instruction emulation, the rest of the emulation could
9242 		 * use a stale page translation. Assume that any code after
9243 		 * this point can start executing an instruction.
9244 		 */
9245 		vcpu->arch.at_instruction_boundary = false;
9246 		if (kvm_vcpu_running(vcpu)) {
9247 			r = vcpu_enter_guest(vcpu);
9248 		} else {
9249 			r = vcpu_block(kvm, vcpu);
9250 		}
9251 
9252 		if (r <= 0)
9253 			break;
9254 
9255 		kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
9256 		if (kvm_cpu_has_pending_timer(vcpu))
9257 			kvm_inject_pending_timer_irqs(vcpu);
9258 
9259 		if (dm_request_for_irq_injection(vcpu) &&
9260 			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
9261 			r = 0;
9262 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
9263 			++vcpu->stat.request_irq_exits;
9264 			break;
9265 		}
9266 
9267 		if (__xfer_to_guest_mode_work_pending()) {
9268 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
9269 			r = xfer_to_guest_mode_handle_work(vcpu);
9270 			if (r)
9271 				return r;
9272 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
9273 		}
9274 	}
9275 
9276 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
9277 
9278 	return r;
9279 }
9280 
complete_emulated_io(struct kvm_vcpu * vcpu)9281 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
9282 {
9283 	int r;
9284 
9285 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
9286 	r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
9287 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
9288 	return r;
9289 }
9290 
complete_emulated_pio(struct kvm_vcpu * vcpu)9291 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
9292 {
9293 	BUG_ON(!vcpu->arch.pio.count);
9294 
9295 	return complete_emulated_io(vcpu);
9296 }
9297 
9298 /*
9299  * Implements the following, as a state machine:
9300  *
9301  * read:
9302  *   for each fragment
9303  *     for each mmio piece in the fragment
9304  *       write gpa, len
9305  *       exit
9306  *       copy data
9307  *   execute insn
9308  *
9309  * write:
9310  *   for each fragment
9311  *     for each mmio piece in the fragment
9312  *       write gpa, len
9313  *       copy data
9314  *       exit
9315  */
complete_emulated_mmio(struct kvm_vcpu * vcpu)9316 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
9317 {
9318 	struct kvm_run *run = vcpu->run;
9319 	struct kvm_mmio_fragment *frag;
9320 	unsigned len;
9321 
9322 	BUG_ON(!vcpu->mmio_needed);
9323 
9324 	/* Complete previous fragment */
9325 	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
9326 	len = min(8u, frag->len);
9327 	if (!vcpu->mmio_is_write)
9328 		memcpy(frag->data, run->mmio.data, len);
9329 
9330 	if (frag->len <= 8) {
9331 		/* Switch to the next fragment. */
9332 		frag++;
9333 		vcpu->mmio_cur_fragment++;
9334 	} else {
9335 		/* Go forward to the next mmio piece. */
9336 		frag->data += len;
9337 		frag->gpa += len;
9338 		frag->len -= len;
9339 	}
9340 
9341 	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
9342 		vcpu->mmio_needed = 0;
9343 
9344 		/* FIXME: return into emulator if single-stepping.  */
9345 		if (vcpu->mmio_is_write)
9346 			return 1;
9347 		vcpu->mmio_read_completed = 1;
9348 		return complete_emulated_io(vcpu);
9349 	}
9350 
9351 	run->exit_reason = KVM_EXIT_MMIO;
9352 	run->mmio.phys_addr = frag->gpa;
9353 	if (vcpu->mmio_is_write)
9354 		memcpy(run->mmio.data, frag->data, min(8u, frag->len));
9355 	run->mmio.len = min(8u, frag->len);
9356 	run->mmio.is_write = vcpu->mmio_is_write;
9357 	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9358 	return 0;
9359 }
9360 
kvm_save_current_fpu(struct fpu * fpu)9361 static void kvm_save_current_fpu(struct fpu *fpu)
9362 {
9363 	/*
9364 	 * If the target FPU state is not resident in the CPU registers, just
9365 	 * memcpy() from current, else save CPU state directly to the target.
9366 	 */
9367 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
9368 		memcpy(&fpu->state, &current->thread.fpu.state,
9369 		       fpu_kernel_xstate_size);
9370 	else
9371 		copy_fpregs_to_fpstate(fpu);
9372 }
9373 
9374 /* Swap (qemu) user FPU context for the guest FPU context. */
kvm_load_guest_fpu(struct kvm_vcpu * vcpu)9375 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
9376 {
9377 	fpregs_lock();
9378 
9379 	kvm_save_current_fpu(vcpu->arch.user_fpu);
9380 
9381 	/* PKRU is separately restored in kvm_x86_ops.run.  */
9382 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
9383 				~XFEATURE_MASK_PKRU);
9384 
9385 	fpregs_mark_activate();
9386 	fpregs_unlock();
9387 
9388 	trace_kvm_fpu(1);
9389 }
9390 
9391 /* When vcpu_run ends, restore user space FPU context. */
kvm_put_guest_fpu(struct kvm_vcpu * vcpu)9392 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
9393 {
9394 	fpregs_lock();
9395 
9396 	kvm_save_current_fpu(vcpu->arch.guest_fpu);
9397 
9398 	copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
9399 
9400 	fpregs_mark_activate();
9401 	fpregs_unlock();
9402 
9403 	++vcpu->stat.fpu_reload;
9404 	trace_kvm_fpu(0);
9405 }
9406 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)9407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
9408 {
9409 	struct kvm_run *kvm_run = vcpu->run;
9410 	int r;
9411 
9412 	vcpu_load(vcpu);
9413 	kvm_sigset_activate(vcpu);
9414 	kvm_load_guest_fpu(vcpu);
9415 
9416 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
9417 		if (kvm_run->immediate_exit) {
9418 			r = -EINTR;
9419 			goto out;
9420 		}
9421 		kvm_vcpu_block(vcpu);
9422 		kvm_apic_accept_events(vcpu);
9423 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
9424 		r = -EAGAIN;
9425 		if (signal_pending(current)) {
9426 			r = -EINTR;
9427 			kvm_run->exit_reason = KVM_EXIT_INTR;
9428 			++vcpu->stat.signal_exits;
9429 		}
9430 		goto out;
9431 	}
9432 
9433 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
9434 		r = -EINVAL;
9435 		goto out;
9436 	}
9437 
9438 	if (kvm_run->kvm_dirty_regs) {
9439 		r = sync_regs(vcpu);
9440 		if (r != 0)
9441 			goto out;
9442 	}
9443 
9444 	/* re-sync apic's tpr */
9445 	if (!lapic_in_kernel(vcpu)) {
9446 		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
9447 			r = -EINVAL;
9448 			goto out;
9449 		}
9450 	}
9451 
9452 	if (unlikely(vcpu->arch.complete_userspace_io)) {
9453 		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
9454 		vcpu->arch.complete_userspace_io = NULL;
9455 		r = cui(vcpu);
9456 		if (r <= 0)
9457 			goto out;
9458 	} else
9459 		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
9460 
9461 	if (kvm_run->immediate_exit)
9462 		r = -EINTR;
9463 	else
9464 		r = vcpu_run(vcpu);
9465 
9466 out:
9467 	kvm_put_guest_fpu(vcpu);
9468 	if (kvm_run->kvm_valid_regs)
9469 		store_regs(vcpu);
9470 	post_kvm_run_save(vcpu);
9471 	kvm_sigset_deactivate(vcpu);
9472 
9473 	vcpu_put(vcpu);
9474 	return r;
9475 }
9476 
__get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)9477 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
9478 {
9479 	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
9480 		/*
9481 		 * We are here if userspace calls get_regs() in the middle of
9482 		 * instruction emulation. Registers state needs to be copied
9483 		 * back from emulation context to vcpu. Userspace shouldn't do
9484 		 * that usually, but some bad designed PV devices (vmware
9485 		 * backdoor interface) need this to work
9486 		 */
9487 		emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
9488 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9489 	}
9490 	regs->rax = kvm_rax_read(vcpu);
9491 	regs->rbx = kvm_rbx_read(vcpu);
9492 	regs->rcx = kvm_rcx_read(vcpu);
9493 	regs->rdx = kvm_rdx_read(vcpu);
9494 	regs->rsi = kvm_rsi_read(vcpu);
9495 	regs->rdi = kvm_rdi_read(vcpu);
9496 	regs->rsp = kvm_rsp_read(vcpu);
9497 	regs->rbp = kvm_rbp_read(vcpu);
9498 #ifdef CONFIG_X86_64
9499 	regs->r8 = kvm_r8_read(vcpu);
9500 	regs->r9 = kvm_r9_read(vcpu);
9501 	regs->r10 = kvm_r10_read(vcpu);
9502 	regs->r11 = kvm_r11_read(vcpu);
9503 	regs->r12 = kvm_r12_read(vcpu);
9504 	regs->r13 = kvm_r13_read(vcpu);
9505 	regs->r14 = kvm_r14_read(vcpu);
9506 	regs->r15 = kvm_r15_read(vcpu);
9507 #endif
9508 
9509 	regs->rip = kvm_rip_read(vcpu);
9510 	regs->rflags = kvm_get_rflags(vcpu);
9511 }
9512 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)9513 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
9514 {
9515 	vcpu_load(vcpu);
9516 	__get_regs(vcpu, regs);
9517 	vcpu_put(vcpu);
9518 	return 0;
9519 }
9520 
__set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)9521 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
9522 {
9523 	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
9524 	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9525 
9526 	kvm_rax_write(vcpu, regs->rax);
9527 	kvm_rbx_write(vcpu, regs->rbx);
9528 	kvm_rcx_write(vcpu, regs->rcx);
9529 	kvm_rdx_write(vcpu, regs->rdx);
9530 	kvm_rsi_write(vcpu, regs->rsi);
9531 	kvm_rdi_write(vcpu, regs->rdi);
9532 	kvm_rsp_write(vcpu, regs->rsp);
9533 	kvm_rbp_write(vcpu, regs->rbp);
9534 #ifdef CONFIG_X86_64
9535 	kvm_r8_write(vcpu, regs->r8);
9536 	kvm_r9_write(vcpu, regs->r9);
9537 	kvm_r10_write(vcpu, regs->r10);
9538 	kvm_r11_write(vcpu, regs->r11);
9539 	kvm_r12_write(vcpu, regs->r12);
9540 	kvm_r13_write(vcpu, regs->r13);
9541 	kvm_r14_write(vcpu, regs->r14);
9542 	kvm_r15_write(vcpu, regs->r15);
9543 #endif
9544 
9545 	kvm_rip_write(vcpu, regs->rip);
9546 	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
9547 
9548 	vcpu->arch.exception.pending = false;
9549 
9550 	kvm_make_request(KVM_REQ_EVENT, vcpu);
9551 }
9552 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)9553 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
9554 {
9555 	vcpu_load(vcpu);
9556 	__set_regs(vcpu, regs);
9557 	vcpu_put(vcpu);
9558 	return 0;
9559 }
9560 
kvm_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)9561 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
9562 {
9563 	struct kvm_segment cs;
9564 
9565 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
9566 	*db = cs.db;
9567 	*l = cs.l;
9568 }
9569 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
9570 
__get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)9571 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
9572 {
9573 	struct desc_ptr dt;
9574 
9575 	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
9576 	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
9577 	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
9578 	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
9579 	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
9580 	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
9581 
9582 	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
9583 	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
9584 
9585 	kvm_x86_ops.get_idt(vcpu, &dt);
9586 	sregs->idt.limit = dt.size;
9587 	sregs->idt.base = dt.address;
9588 	kvm_x86_ops.get_gdt(vcpu, &dt);
9589 	sregs->gdt.limit = dt.size;
9590 	sregs->gdt.base = dt.address;
9591 
9592 	sregs->cr0 = kvm_read_cr0(vcpu);
9593 	sregs->cr2 = vcpu->arch.cr2;
9594 	sregs->cr3 = kvm_read_cr3(vcpu);
9595 	sregs->cr4 = kvm_read_cr4(vcpu);
9596 	sregs->cr8 = kvm_get_cr8(vcpu);
9597 	sregs->efer = vcpu->arch.efer;
9598 	sregs->apic_base = kvm_get_apic_base(vcpu);
9599 
9600 	memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
9601 
9602 	if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
9603 		set_bit(vcpu->arch.interrupt.nr,
9604 			(unsigned long *)sregs->interrupt_bitmap);
9605 }
9606 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)9607 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
9608 				  struct kvm_sregs *sregs)
9609 {
9610 	vcpu_load(vcpu);
9611 	__get_sregs(vcpu, sregs);
9612 	vcpu_put(vcpu);
9613 	return 0;
9614 }
9615 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)9616 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
9617 				    struct kvm_mp_state *mp_state)
9618 {
9619 	vcpu_load(vcpu);
9620 	if (kvm_mpx_supported())
9621 		kvm_load_guest_fpu(vcpu);
9622 
9623 	kvm_apic_accept_events(vcpu);
9624 	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
9625 					vcpu->arch.pv.pv_unhalted)
9626 		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
9627 	else
9628 		mp_state->mp_state = vcpu->arch.mp_state;
9629 
9630 	if (kvm_mpx_supported())
9631 		kvm_put_guest_fpu(vcpu);
9632 	vcpu_put(vcpu);
9633 	return 0;
9634 }
9635 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)9636 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
9637 				    struct kvm_mp_state *mp_state)
9638 {
9639 	int ret = -EINVAL;
9640 
9641 	vcpu_load(vcpu);
9642 
9643 	if (!lapic_in_kernel(vcpu) &&
9644 	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
9645 		goto out;
9646 
9647 	/*
9648 	 * KVM_MP_STATE_INIT_RECEIVED means the processor is in
9649 	 * INIT state; latched init should be reported using
9650 	 * KVM_SET_VCPU_EVENTS, so reject it here.
9651 	 */
9652 	if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) &&
9653 	    (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
9654 	     mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
9655 		goto out;
9656 
9657 	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
9658 		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
9659 		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
9660 	} else
9661 		vcpu->arch.mp_state = mp_state->mp_state;
9662 	kvm_make_request(KVM_REQ_EVENT, vcpu);
9663 
9664 	ret = 0;
9665 out:
9666 	vcpu_put(vcpu);
9667 	return ret;
9668 }
9669 
kvm_task_switch(struct kvm_vcpu * vcpu,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)9670 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
9671 		    int reason, bool has_error_code, u32 error_code)
9672 {
9673 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9674 	int ret;
9675 
9676 	init_emulate_ctxt(vcpu);
9677 
9678 	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
9679 				   has_error_code, error_code);
9680 	if (ret) {
9681 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9682 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
9683 		vcpu->run->internal.ndata = 0;
9684 		return 0;
9685 	}
9686 
9687 	kvm_rip_write(vcpu, ctxt->eip);
9688 	kvm_set_rflags(vcpu, ctxt->eflags);
9689 	return 1;
9690 }
9691 EXPORT_SYMBOL_GPL(kvm_task_switch);
9692 
kvm_valid_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)9693 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
9694 {
9695 	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
9696 		/*
9697 		 * When EFER.LME and CR0.PG are set, the processor is in
9698 		 * 64-bit mode (though maybe in a 32-bit code segment).
9699 		 * CR4.PAE and EFER.LMA must be set.
9700 		 */
9701 		if (!(sregs->cr4 & X86_CR4_PAE)
9702 		    || !(sregs->efer & EFER_LMA))
9703 			return -EINVAL;
9704 		if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
9705 			return -EINVAL;
9706 	} else {
9707 		/*
9708 		 * Not in 64-bit mode: EFER.LMA is clear and the code
9709 		 * segment cannot be 64-bit.
9710 		 */
9711 		if (sregs->efer & EFER_LMA || sregs->cs.l)
9712 			return -EINVAL;
9713 	}
9714 
9715 	return kvm_valid_cr4(vcpu, sregs->cr4);
9716 }
9717 
__set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)9718 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
9719 {
9720 	struct msr_data apic_base_msr;
9721 	int mmu_reset_needed = 0;
9722 	int cpuid_update_needed = 0;
9723 	int pending_vec, max_bits, idx;
9724 	struct desc_ptr dt;
9725 	int ret = -EINVAL;
9726 
9727 	if (kvm_valid_sregs(vcpu, sregs))
9728 		goto out;
9729 
9730 	apic_base_msr.data = sregs->apic_base;
9731 	apic_base_msr.host_initiated = true;
9732 	if (kvm_set_apic_base(vcpu, &apic_base_msr))
9733 		goto out;
9734 
9735 	dt.size = sregs->idt.limit;
9736 	dt.address = sregs->idt.base;
9737 	kvm_x86_ops.set_idt(vcpu, &dt);
9738 	dt.size = sregs->gdt.limit;
9739 	dt.address = sregs->gdt.base;
9740 	kvm_x86_ops.set_gdt(vcpu, &dt);
9741 
9742 	vcpu->arch.cr2 = sregs->cr2;
9743 	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
9744 	vcpu->arch.cr3 = sregs->cr3;
9745 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
9746 
9747 	kvm_set_cr8(vcpu, sregs->cr8);
9748 
9749 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
9750 	kvm_x86_ops.set_efer(vcpu, sregs->efer);
9751 
9752 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
9753 	kvm_x86_ops.set_cr0(vcpu, sregs->cr0);
9754 	vcpu->arch.cr0 = sregs->cr0;
9755 
9756 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
9757 	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
9758 				(X86_CR4_OSXSAVE | X86_CR4_PKE));
9759 	kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
9760 	if (cpuid_update_needed)
9761 		kvm_update_cpuid_runtime(vcpu);
9762 
9763 	idx = srcu_read_lock(&vcpu->kvm->srcu);
9764 	if (is_pae_paging(vcpu)) {
9765 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
9766 		mmu_reset_needed = 1;
9767 	}
9768 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
9769 
9770 	if (mmu_reset_needed)
9771 		kvm_mmu_reset_context(vcpu);
9772 
9773 	max_bits = KVM_NR_INTERRUPTS;
9774 	pending_vec = find_first_bit(
9775 		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
9776 	if (pending_vec < max_bits) {
9777 		kvm_queue_interrupt(vcpu, pending_vec, false);
9778 		pr_debug("Set back pending irq %d\n", pending_vec);
9779 	}
9780 
9781 	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
9782 	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
9783 	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
9784 	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
9785 	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
9786 	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
9787 
9788 	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
9789 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
9790 
9791 	update_cr8_intercept(vcpu);
9792 
9793 	/* Older userspace won't unhalt the vcpu on reset. */
9794 	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
9795 	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
9796 	    !is_protmode(vcpu))
9797 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
9798 
9799 	kvm_make_request(KVM_REQ_EVENT, vcpu);
9800 
9801 	ret = 0;
9802 out:
9803 	return ret;
9804 }
9805 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)9806 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
9807 				  struct kvm_sregs *sregs)
9808 {
9809 	int ret;
9810 
9811 	vcpu_load(vcpu);
9812 	ret = __set_sregs(vcpu, sregs);
9813 	vcpu_put(vcpu);
9814 	return ret;
9815 }
9816 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)9817 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
9818 					struct kvm_guest_debug *dbg)
9819 {
9820 	unsigned long rflags;
9821 	int i, r;
9822 
9823 	vcpu_load(vcpu);
9824 
9825 	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
9826 		r = -EBUSY;
9827 		if (vcpu->arch.exception.pending)
9828 			goto out;
9829 		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
9830 			kvm_queue_exception(vcpu, DB_VECTOR);
9831 		else
9832 			kvm_queue_exception(vcpu, BP_VECTOR);
9833 	}
9834 
9835 	/*
9836 	 * Read rflags as long as potentially injected trace flags are still
9837 	 * filtered out.
9838 	 */
9839 	rflags = kvm_get_rflags(vcpu);
9840 
9841 	vcpu->guest_debug = dbg->control;
9842 	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
9843 		vcpu->guest_debug = 0;
9844 
9845 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
9846 		for (i = 0; i < KVM_NR_DB_REGS; ++i)
9847 			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
9848 		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
9849 	} else {
9850 		for (i = 0; i < KVM_NR_DB_REGS; i++)
9851 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
9852 	}
9853 	kvm_update_dr7(vcpu);
9854 
9855 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
9856 		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
9857 			get_segment_base(vcpu, VCPU_SREG_CS);
9858 
9859 	/*
9860 	 * Trigger an rflags update that will inject or remove the trace
9861 	 * flags.
9862 	 */
9863 	kvm_set_rflags(vcpu, rflags);
9864 
9865 	kvm_x86_ops.update_exception_bitmap(vcpu);
9866 
9867 	r = 0;
9868 
9869 out:
9870 	vcpu_put(vcpu);
9871 	return r;
9872 }
9873 
9874 /*
9875  * Translate a guest virtual address to a guest physical address.
9876  */
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)9877 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
9878 				    struct kvm_translation *tr)
9879 {
9880 	unsigned long vaddr = tr->linear_address;
9881 	gpa_t gpa;
9882 	int idx;
9883 
9884 	vcpu_load(vcpu);
9885 
9886 	idx = srcu_read_lock(&vcpu->kvm->srcu);
9887 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
9888 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
9889 	tr->physical_address = gpa;
9890 	tr->valid = gpa != UNMAPPED_GVA;
9891 	tr->writeable = 1;
9892 	tr->usermode = 0;
9893 
9894 	vcpu_put(vcpu);
9895 	return 0;
9896 }
9897 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)9898 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
9899 {
9900 	struct fxregs_state *fxsave;
9901 
9902 	vcpu_load(vcpu);
9903 
9904 	fxsave = &vcpu->arch.guest_fpu->state.fxsave;
9905 	memcpy(fpu->fpr, fxsave->st_space, 128);
9906 	fpu->fcw = fxsave->cwd;
9907 	fpu->fsw = fxsave->swd;
9908 	fpu->ftwx = fxsave->twd;
9909 	fpu->last_opcode = fxsave->fop;
9910 	fpu->last_ip = fxsave->rip;
9911 	fpu->last_dp = fxsave->rdp;
9912 	memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
9913 
9914 	vcpu_put(vcpu);
9915 	return 0;
9916 }
9917 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)9918 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
9919 {
9920 	struct fxregs_state *fxsave;
9921 
9922 	vcpu_load(vcpu);
9923 
9924 	fxsave = &vcpu->arch.guest_fpu->state.fxsave;
9925 
9926 	memcpy(fxsave->st_space, fpu->fpr, 128);
9927 	fxsave->cwd = fpu->fcw;
9928 	fxsave->swd = fpu->fsw;
9929 	fxsave->twd = fpu->ftwx;
9930 	fxsave->fop = fpu->last_opcode;
9931 	fxsave->rip = fpu->last_ip;
9932 	fxsave->rdp = fpu->last_dp;
9933 	memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
9934 
9935 	vcpu_put(vcpu);
9936 	return 0;
9937 }
9938 
store_regs(struct kvm_vcpu * vcpu)9939 static void store_regs(struct kvm_vcpu *vcpu)
9940 {
9941 	BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
9942 
9943 	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
9944 		__get_regs(vcpu, &vcpu->run->s.regs.regs);
9945 
9946 	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
9947 		__get_sregs(vcpu, &vcpu->run->s.regs.sregs);
9948 
9949 	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
9950 		kvm_vcpu_ioctl_x86_get_vcpu_events(
9951 				vcpu, &vcpu->run->s.regs.events);
9952 }
9953 
sync_regs(struct kvm_vcpu * vcpu)9954 static int sync_regs(struct kvm_vcpu *vcpu)
9955 {
9956 	if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
9957 		return -EINVAL;
9958 
9959 	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
9960 		__set_regs(vcpu, &vcpu->run->s.regs.regs);
9961 		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
9962 	}
9963 	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
9964 		if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
9965 			return -EINVAL;
9966 		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
9967 	}
9968 	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
9969 		if (kvm_vcpu_ioctl_x86_set_vcpu_events(
9970 				vcpu, &vcpu->run->s.regs.events))
9971 			return -EINVAL;
9972 		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
9973 	}
9974 
9975 	return 0;
9976 }
9977 
fx_init(struct kvm_vcpu * vcpu)9978 static void fx_init(struct kvm_vcpu *vcpu)
9979 {
9980 	fpstate_init(&vcpu->arch.guest_fpu->state);
9981 	if (boot_cpu_has(X86_FEATURE_XSAVES))
9982 		vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv =
9983 			host_xcr0 | XSTATE_COMPACTION_ENABLED;
9984 
9985 	/*
9986 	 * Ensure guest xcr0 is valid for loading
9987 	 */
9988 	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
9989 
9990 	vcpu->arch.cr0 |= X86_CR0_ET;
9991 }
9992 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)9993 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
9994 {
9995 	if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
9996 		pr_warn_once("kvm: SMP vm created on host with unstable TSC; "
9997 			     "guest TSC will not be reliable\n");
9998 
9999 	return 0;
10000 }
10001 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)10002 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
10003 {
10004 	struct page *page;
10005 	int r;
10006 
10007 	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
10008 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
10009 	else
10010 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
10011 
10012 	kvm_set_tsc_khz(vcpu, max_tsc_khz);
10013 
10014 	r = kvm_mmu_create(vcpu);
10015 	if (r < 0)
10016 		return r;
10017 
10018 	if (irqchip_in_kernel(vcpu->kvm)) {
10019 		r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
10020 		if (r < 0)
10021 			goto fail_mmu_destroy;
10022 		if (kvm_apicv_activated(vcpu->kvm))
10023 			vcpu->arch.apicv_active = true;
10024 	} else
10025 		static_key_slow_inc(&kvm_no_apic_vcpu);
10026 
10027 	r = -ENOMEM;
10028 
10029 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
10030 	if (!page)
10031 		goto fail_free_lapic;
10032 	vcpu->arch.pio_data = page_address(page);
10033 
10034 	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
10035 				       GFP_KERNEL_ACCOUNT);
10036 	if (!vcpu->arch.mce_banks)
10037 		goto fail_free_pio_data;
10038 	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
10039 
10040 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
10041 				GFP_KERNEL_ACCOUNT))
10042 		goto fail_free_mce_banks;
10043 
10044 	if (!alloc_emulate_ctxt(vcpu))
10045 		goto free_wbinvd_dirty_mask;
10046 
10047 	vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
10048 						GFP_KERNEL_ACCOUNT);
10049 	if (!vcpu->arch.user_fpu) {
10050 		pr_err("kvm: failed to allocate userspace's fpu\n");
10051 		goto free_emulate_ctxt;
10052 	}
10053 
10054 	vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
10055 						 GFP_KERNEL_ACCOUNT);
10056 	if (!vcpu->arch.guest_fpu) {
10057 		pr_err("kvm: failed to allocate vcpu's fpu\n");
10058 		goto free_user_fpu;
10059 	}
10060 	fx_init(vcpu);
10061 
10062 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
10063 	vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
10064 
10065 	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
10066 
10067 	kvm_async_pf_hash_reset(vcpu);
10068 	kvm_pmu_init(vcpu);
10069 
10070 	vcpu->arch.pending_external_vector = -1;
10071 	vcpu->arch.preempted_in_kernel = false;
10072 
10073 	kvm_hv_vcpu_init(vcpu);
10074 
10075 	r = kvm_x86_ops.vcpu_create(vcpu);
10076 	if (r)
10077 		goto free_guest_fpu;
10078 
10079 	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
10080 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
10081 	kvm_vcpu_mtrr_init(vcpu);
10082 	vcpu_load(vcpu);
10083 	kvm_vcpu_reset(vcpu, false);
10084 	kvm_init_mmu(vcpu, false);
10085 	vcpu_put(vcpu);
10086 	return 0;
10087 
10088 free_guest_fpu:
10089 	kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
10090 free_user_fpu:
10091 	kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
10092 free_emulate_ctxt:
10093 	kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
10094 free_wbinvd_dirty_mask:
10095 	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
10096 fail_free_mce_banks:
10097 	kfree(vcpu->arch.mce_banks);
10098 fail_free_pio_data:
10099 	free_page((unsigned long)vcpu->arch.pio_data);
10100 fail_free_lapic:
10101 	kvm_free_lapic(vcpu);
10102 fail_mmu_destroy:
10103 	kvm_mmu_destroy(vcpu);
10104 	return r;
10105 }
10106 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)10107 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
10108 {
10109 	struct kvm *kvm = vcpu->kvm;
10110 
10111 	kvm_hv_vcpu_postcreate(vcpu);
10112 
10113 	if (mutex_lock_killable(&vcpu->mutex))
10114 		return;
10115 	vcpu_load(vcpu);
10116 	kvm_synchronize_tsc(vcpu, 0);
10117 	vcpu_put(vcpu);
10118 
10119 	/* poll control enabled by default */
10120 	vcpu->arch.msr_kvm_poll_control = 1;
10121 
10122 	mutex_unlock(&vcpu->mutex);
10123 
10124 	if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
10125 		schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
10126 						KVMCLOCK_SYNC_PERIOD);
10127 }
10128 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)10129 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
10130 {
10131 	struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
10132 	int idx;
10133 
10134 	kvm_release_pfn(cache->pfn, cache->dirty, cache);
10135 
10136 	kvmclock_reset(vcpu);
10137 
10138 	kvm_x86_ops.vcpu_free(vcpu);
10139 
10140 	kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
10141 	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
10142 	kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
10143 	kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
10144 
10145 	kvm_hv_vcpu_uninit(vcpu);
10146 	kvm_pmu_destroy(vcpu);
10147 	kfree(vcpu->arch.mce_banks);
10148 	kvm_free_lapic(vcpu);
10149 	idx = srcu_read_lock(&vcpu->kvm->srcu);
10150 	kvm_mmu_destroy(vcpu);
10151 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
10152 	free_page((unsigned long)vcpu->arch.pio_data);
10153 	kvfree(vcpu->arch.cpuid_entries);
10154 	if (!lapic_in_kernel(vcpu))
10155 		static_key_slow_dec(&kvm_no_apic_vcpu);
10156 }
10157 
kvm_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)10158 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
10159 {
10160 	kvm_lapic_reset(vcpu, init_event);
10161 
10162 	vcpu->arch.hflags = 0;
10163 
10164 	vcpu->arch.smi_pending = 0;
10165 	vcpu->arch.smi_count = 0;
10166 	atomic_set(&vcpu->arch.nmi_queued, 0);
10167 	vcpu->arch.nmi_pending = 0;
10168 	vcpu->arch.nmi_injected = false;
10169 	kvm_clear_interrupt_queue(vcpu);
10170 	kvm_clear_exception_queue(vcpu);
10171 
10172 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
10173 	kvm_update_dr0123(vcpu);
10174 	vcpu->arch.dr6 = DR6_INIT;
10175 	vcpu->arch.dr7 = DR7_FIXED_1;
10176 	kvm_update_dr7(vcpu);
10177 
10178 	vcpu->arch.cr2 = 0;
10179 
10180 	kvm_make_request(KVM_REQ_EVENT, vcpu);
10181 	vcpu->arch.apf.msr_en_val = 0;
10182 	vcpu->arch.apf.msr_int_val = 0;
10183 	vcpu->arch.st.msr_val = 0;
10184 
10185 	kvmclock_reset(vcpu);
10186 
10187 	kvm_clear_async_pf_completion_queue(vcpu);
10188 	kvm_async_pf_hash_reset(vcpu);
10189 	vcpu->arch.apf.halted = false;
10190 
10191 	if (kvm_mpx_supported()) {
10192 		void *mpx_state_buffer;
10193 
10194 		/*
10195 		 * To avoid have the INIT path from kvm_apic_has_events() that be
10196 		 * called with loaded FPU and does not let userspace fix the state.
10197 		 */
10198 		if (init_event)
10199 			kvm_put_guest_fpu(vcpu);
10200 		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
10201 					XFEATURE_BNDREGS);
10202 		if (mpx_state_buffer)
10203 			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
10204 		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
10205 					XFEATURE_BNDCSR);
10206 		if (mpx_state_buffer)
10207 			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
10208 		if (init_event)
10209 			kvm_load_guest_fpu(vcpu);
10210 	}
10211 
10212 	if (!init_event) {
10213 		kvm_pmu_reset(vcpu);
10214 		vcpu->arch.smbase = 0x30000;
10215 
10216 		vcpu->arch.msr_misc_features_enables = 0;
10217 
10218 		vcpu->arch.xcr0 = XFEATURE_MASK_FP;
10219 	}
10220 
10221 	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
10222 	vcpu->arch.regs_avail = ~0;
10223 	vcpu->arch.regs_dirty = ~0;
10224 
10225 	vcpu->arch.ia32_xss = 0;
10226 
10227 	kvm_x86_ops.vcpu_reset(vcpu, init_event);
10228 }
10229 
kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)10230 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
10231 {
10232 	struct kvm_segment cs;
10233 
10234 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
10235 	cs.selector = vector << 8;
10236 	cs.base = vector << 12;
10237 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
10238 	kvm_rip_write(vcpu, 0);
10239 }
10240 
kvm_arch_hardware_enable(void)10241 int kvm_arch_hardware_enable(void)
10242 {
10243 	struct kvm *kvm;
10244 	struct kvm_vcpu *vcpu;
10245 	int i;
10246 	int ret;
10247 	u64 local_tsc;
10248 	u64 max_tsc = 0;
10249 	bool stable, backwards_tsc = false;
10250 
10251 	kvm_user_return_msr_cpu_online();
10252 	ret = kvm_x86_ops.hardware_enable();
10253 	if (ret != 0)
10254 		return ret;
10255 
10256 	local_tsc = rdtsc();
10257 	stable = !kvm_check_tsc_unstable();
10258 	list_for_each_entry(kvm, &vm_list, vm_list) {
10259 		kvm_for_each_vcpu(i, vcpu, kvm) {
10260 			if (!stable && vcpu->cpu == smp_processor_id())
10261 				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
10262 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
10263 				backwards_tsc = true;
10264 				if (vcpu->arch.last_host_tsc > max_tsc)
10265 					max_tsc = vcpu->arch.last_host_tsc;
10266 			}
10267 		}
10268 	}
10269 
10270 	/*
10271 	 * Sometimes, even reliable TSCs go backwards.  This happens on
10272 	 * platforms that reset TSC during suspend or hibernate actions, but
10273 	 * maintain synchronization.  We must compensate.  Fortunately, we can
10274 	 * detect that condition here, which happens early in CPU bringup,
10275 	 * before any KVM threads can be running.  Unfortunately, we can't
10276 	 * bring the TSCs fully up to date with real time, as we aren't yet far
10277 	 * enough into CPU bringup that we know how much real time has actually
10278 	 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
10279 	 * variables that haven't been updated yet.
10280 	 *
10281 	 * So we simply find the maximum observed TSC above, then record the
10282 	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
10283 	 * the adjustment will be applied.  Note that we accumulate
10284 	 * adjustments, in case multiple suspend cycles happen before some VCPU
10285 	 * gets a chance to run again.  In the event that no KVM threads get a
10286 	 * chance to run, we will miss the entire elapsed period, as we'll have
10287 	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
10288 	 * loose cycle time.  This isn't too big a deal, since the loss will be
10289 	 * uniform across all VCPUs (not to mention the scenario is extremely
10290 	 * unlikely). It is possible that a second hibernate recovery happens
10291 	 * much faster than a first, causing the observed TSC here to be
10292 	 * smaller; this would require additional padding adjustment, which is
10293 	 * why we set last_host_tsc to the local tsc observed here.
10294 	 *
10295 	 * N.B. - this code below runs only on platforms with reliable TSC,
10296 	 * as that is the only way backwards_tsc is set above.  Also note
10297 	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
10298 	 * have the same delta_cyc adjustment applied if backwards_tsc
10299 	 * is detected.  Note further, this adjustment is only done once,
10300 	 * as we reset last_host_tsc on all VCPUs to stop this from being
10301 	 * called multiple times (one for each physical CPU bringup).
10302 	 *
10303 	 * Platforms with unreliable TSCs don't have to deal with this, they
10304 	 * will be compensated by the logic in vcpu_load, which sets the TSC to
10305 	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
10306 	 * guarantee that they stay in perfect synchronization.
10307 	 */
10308 	if (backwards_tsc) {
10309 		u64 delta_cyc = max_tsc - local_tsc;
10310 		list_for_each_entry(kvm, &vm_list, vm_list) {
10311 			kvm->arch.backwards_tsc_observed = true;
10312 			kvm_for_each_vcpu(i, vcpu, kvm) {
10313 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
10314 				vcpu->arch.last_host_tsc = local_tsc;
10315 				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
10316 			}
10317 
10318 			/*
10319 			 * We have to disable TSC offset matching.. if you were
10320 			 * booting a VM while issuing an S4 host suspend....
10321 			 * you may have some problem.  Solving this issue is
10322 			 * left as an exercise to the reader.
10323 			 */
10324 			kvm->arch.last_tsc_nsec = 0;
10325 			kvm->arch.last_tsc_write = 0;
10326 		}
10327 
10328 	}
10329 	return 0;
10330 }
10331 
kvm_arch_hardware_disable(void)10332 void kvm_arch_hardware_disable(void)
10333 {
10334 	kvm_x86_ops.hardware_disable();
10335 	drop_user_return_notifiers();
10336 }
10337 
kvm_arch_hardware_setup(void * opaque)10338 int kvm_arch_hardware_setup(void *opaque)
10339 {
10340 	struct kvm_x86_init_ops *ops = opaque;
10341 	int r;
10342 
10343 	rdmsrl_safe(MSR_EFER, &host_efer);
10344 
10345 	if (boot_cpu_has(X86_FEATURE_XSAVES))
10346 		rdmsrl(MSR_IA32_XSS, host_xss);
10347 
10348 	r = ops->hardware_setup();
10349 	if (r != 0)
10350 		return r;
10351 
10352 	memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
10353 
10354 	if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
10355 		supported_xss = 0;
10356 
10357 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
10358 	cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
10359 #undef __kvm_cpu_cap_has
10360 
10361 	if (kvm_has_tsc_control) {
10362 		/*
10363 		 * Make sure the user can only configure tsc_khz values that
10364 		 * fit into a signed integer.
10365 		 * A min value is not calculated because it will always
10366 		 * be 1 on all machines.
10367 		 */
10368 		u64 max = min(0x7fffffffULL,
10369 			      __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
10370 		kvm_max_guest_tsc_khz = max;
10371 
10372 		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
10373 	}
10374 
10375 	kvm_init_msr_list();
10376 	return 0;
10377 }
10378 
kvm_arch_hardware_unsetup(void)10379 void kvm_arch_hardware_unsetup(void)
10380 {
10381 	kvm_x86_ops.hardware_unsetup();
10382 }
10383 
kvm_arch_check_processor_compat(void * opaque)10384 int kvm_arch_check_processor_compat(void *opaque)
10385 {
10386 	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
10387 	struct kvm_x86_init_ops *ops = opaque;
10388 
10389 	WARN_ON(!irqs_disabled());
10390 
10391 	if (__cr4_reserved_bits(cpu_has, c) !=
10392 	    __cr4_reserved_bits(cpu_has, &boot_cpu_data))
10393 		return -EIO;
10394 
10395 	return ops->check_processor_compatibility();
10396 }
10397 
kvm_vcpu_is_reset_bsp(struct kvm_vcpu * vcpu)10398 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
10399 {
10400 	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
10401 }
10402 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
10403 
kvm_vcpu_is_bsp(struct kvm_vcpu * vcpu)10404 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
10405 {
10406 	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
10407 }
10408 
10409 struct static_key kvm_no_apic_vcpu __read_mostly;
10410 EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
10411 
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)10412 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
10413 {
10414 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
10415 
10416 	vcpu->arch.l1tf_flush_l1d = true;
10417 	if (pmu->version && unlikely(pmu->event_count)) {
10418 		pmu->need_cleanup = true;
10419 		kvm_make_request(KVM_REQ_PMU, vcpu);
10420 	}
10421 	kvm_x86_ops.sched_in(vcpu, cpu);
10422 }
10423 
kvm_arch_free_vm(struct kvm * kvm)10424 void kvm_arch_free_vm(struct kvm *kvm)
10425 {
10426 	kfree(kvm->arch.hyperv.hv_pa_pg);
10427 	vfree(kvm);
10428 }
10429 
10430 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)10431 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
10432 {
10433 	int ret;
10434 
10435 	if (type)
10436 		return -EINVAL;
10437 
10438 	ret = kvm_page_track_init(kvm);
10439 	if (ret)
10440 		return ret;
10441 
10442 	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
10443 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
10444 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
10445 	INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
10446 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
10447 	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
10448 
10449 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
10450 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
10451 	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
10452 	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
10453 		&kvm->arch.irq_sources_bitmap);
10454 
10455 	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
10456 	mutex_init(&kvm->arch.apic_map_lock);
10457 	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
10458 
10459 	kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
10460 	pvclock_update_vm_gtod_copy(kvm);
10461 
10462 	kvm->arch.guest_can_read_msr_platform_info = true;
10463 
10464 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
10465 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
10466 
10467 	kvm_hv_init_vm(kvm);
10468 	kvm_mmu_init_vm(kvm);
10469 
10470 	return kvm_x86_ops.vm_init(kvm);
10471 }
10472 
kvm_arch_post_init_vm(struct kvm * kvm)10473 int kvm_arch_post_init_vm(struct kvm *kvm)
10474 {
10475 	return kvm_mmu_post_init_vm(kvm);
10476 }
10477 
kvm_unload_vcpu_mmu(struct kvm_vcpu * vcpu)10478 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
10479 {
10480 	vcpu_load(vcpu);
10481 	kvm_mmu_unload(vcpu);
10482 	vcpu_put(vcpu);
10483 }
10484 
kvm_free_vcpus(struct kvm * kvm)10485 static void kvm_free_vcpus(struct kvm *kvm)
10486 {
10487 	unsigned int i;
10488 	struct kvm_vcpu *vcpu;
10489 
10490 	/*
10491 	 * Unpin any mmu pages first.
10492 	 */
10493 	kvm_for_each_vcpu(i, vcpu, kvm) {
10494 		kvm_clear_async_pf_completion_queue(vcpu);
10495 		kvm_unload_vcpu_mmu(vcpu);
10496 	}
10497 	kvm_for_each_vcpu(i, vcpu, kvm)
10498 		kvm_vcpu_destroy(vcpu);
10499 
10500 	mutex_lock(&kvm->lock);
10501 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
10502 		kvm->vcpus[i] = NULL;
10503 
10504 	atomic_set(&kvm->online_vcpus, 0);
10505 	mutex_unlock(&kvm->lock);
10506 }
10507 
kvm_arch_sync_events(struct kvm * kvm)10508 void kvm_arch_sync_events(struct kvm *kvm)
10509 {
10510 	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
10511 	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
10512 	kvm_free_pit(kvm);
10513 }
10514 
__x86_set_memory_region(struct kvm * kvm,int id,gpa_t gpa,u32 size)10515 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
10516 {
10517 	int i, r;
10518 	unsigned long hva, old_npages;
10519 	struct kvm_memslots *slots = kvm_memslots(kvm);
10520 	struct kvm_memory_slot *slot;
10521 
10522 	/* Called with kvm->slots_lock held.  */
10523 	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
10524 		return -EINVAL;
10525 
10526 	slot = id_to_memslot(slots, id);
10527 	if (size) {
10528 		if (slot && slot->npages)
10529 			return -EEXIST;
10530 
10531 		/*
10532 		 * MAP_SHARED to prevent internal slot pages from being moved
10533 		 * by fork()/COW.
10534 		 */
10535 		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
10536 			      MAP_SHARED | MAP_ANONYMOUS, 0);
10537 		if (IS_ERR((void *)hva))
10538 			return PTR_ERR((void *)hva);
10539 	} else {
10540 		if (!slot || !slot->npages)
10541 			return 0;
10542 
10543 		old_npages = slot->npages;
10544 		hva = 0;
10545 	}
10546 
10547 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
10548 		struct kvm_userspace_memory_region m;
10549 
10550 		m.slot = id | (i << 16);
10551 		m.flags = 0;
10552 		m.guest_phys_addr = gpa;
10553 		m.userspace_addr = hva;
10554 		m.memory_size = size;
10555 		r = __kvm_set_memory_region(kvm, &m);
10556 		if (r < 0)
10557 			return r;
10558 	}
10559 
10560 	if (!size)
10561 		vm_munmap(hva, old_npages * PAGE_SIZE);
10562 
10563 	return 0;
10564 }
10565 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
10566 
kvm_arch_pre_destroy_vm(struct kvm * kvm)10567 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
10568 {
10569 	kvm_mmu_pre_destroy_vm(kvm);
10570 }
10571 
kvm_arch_destroy_vm(struct kvm * kvm)10572 void kvm_arch_destroy_vm(struct kvm *kvm)
10573 {
10574 	if (current->mm == kvm->mm) {
10575 		/*
10576 		 * Free memory regions allocated on behalf of userspace,
10577 		 * unless the the memory map has changed due to process exit
10578 		 * or fd copying.
10579 		 */
10580 		mutex_lock(&kvm->slots_lock);
10581 		__x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
10582 					0, 0);
10583 		__x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
10584 					0, 0);
10585 		__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
10586 		mutex_unlock(&kvm->slots_lock);
10587 	}
10588 	if (kvm_x86_ops.vm_destroy)
10589 		kvm_x86_ops.vm_destroy(kvm);
10590 	kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
10591 	kvm_pic_destroy(kvm);
10592 	kvm_ioapic_destroy(kvm);
10593 	kvm_free_vcpus(kvm);
10594 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
10595 	kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
10596 	kvm_mmu_uninit_vm(kvm);
10597 	kvm_page_track_cleanup(kvm);
10598 	kvm_hv_destroy_vm(kvm);
10599 }
10600 
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)10601 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
10602 {
10603 	int i;
10604 
10605 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
10606 		kvfree(slot->arch.rmap[i]);
10607 		slot->arch.rmap[i] = NULL;
10608 
10609 		if (i == 0)
10610 			continue;
10611 
10612 		kvfree(slot->arch.lpage_info[i - 1]);
10613 		slot->arch.lpage_info[i - 1] = NULL;
10614 	}
10615 
10616 	kvm_page_track_free_memslot(slot);
10617 }
10618 
kvm_alloc_memslot_metadata(struct kvm_memory_slot * slot,unsigned long npages)10619 static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
10620 				      unsigned long npages)
10621 {
10622 	int i;
10623 
10624 	/*
10625 	 * Clear out the previous array pointers for the KVM_MR_MOVE case.  The
10626 	 * old arrays will be freed by __kvm_set_memory_region() if installing
10627 	 * the new memslot is successful.
10628 	 */
10629 	memset(&slot->arch, 0, sizeof(slot->arch));
10630 
10631 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
10632 		struct kvm_lpage_info *linfo;
10633 		unsigned long ugfn;
10634 		int lpages;
10635 		int level = i + 1;
10636 
10637 		lpages = gfn_to_index(slot->base_gfn + npages - 1,
10638 				      slot->base_gfn, level) + 1;
10639 
10640 		slot->arch.rmap[i] =
10641 			kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
10642 				 GFP_KERNEL_ACCOUNT);
10643 		if (!slot->arch.rmap[i])
10644 			goto out_free;
10645 		if (i == 0)
10646 			continue;
10647 
10648 		linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
10649 		if (!linfo)
10650 			goto out_free;
10651 
10652 		slot->arch.lpage_info[i - 1] = linfo;
10653 
10654 		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
10655 			linfo[0].disallow_lpage = 1;
10656 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
10657 			linfo[lpages - 1].disallow_lpage = 1;
10658 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
10659 		/*
10660 		 * If the gfn and userspace address are not aligned wrt each
10661 		 * other, disable large page support for this slot.
10662 		 */
10663 		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
10664 			unsigned long j;
10665 
10666 			for (j = 0; j < lpages; ++j)
10667 				linfo[j].disallow_lpage = 1;
10668 		}
10669 	}
10670 
10671 	if (kvm_page_track_create_memslot(slot, npages))
10672 		goto out_free;
10673 
10674 	return 0;
10675 
10676 out_free:
10677 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
10678 		kvfree(slot->arch.rmap[i]);
10679 		slot->arch.rmap[i] = NULL;
10680 		if (i == 0)
10681 			continue;
10682 
10683 		kvfree(slot->arch.lpage_info[i - 1]);
10684 		slot->arch.lpage_info[i - 1] = NULL;
10685 	}
10686 	return -ENOMEM;
10687 }
10688 
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)10689 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
10690 {
10691 	struct kvm_vcpu *vcpu;
10692 	int i;
10693 
10694 	/*
10695 	 * memslots->generation has been incremented.
10696 	 * mmio generation may have reached its maximum value.
10697 	 */
10698 	kvm_mmu_invalidate_mmio_sptes(kvm, gen);
10699 
10700 	/* Force re-initialization of steal_time cache */
10701 	kvm_for_each_vcpu(i, vcpu, kvm)
10702 		kvm_vcpu_kick(vcpu);
10703 }
10704 
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)10705 int kvm_arch_prepare_memory_region(struct kvm *kvm,
10706 				struct kvm_memory_slot *memslot,
10707 				const struct kvm_userspace_memory_region *mem,
10708 				enum kvm_mr_change change)
10709 {
10710 	if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
10711 		return kvm_alloc_memslot_metadata(memslot,
10712 						  mem->memory_size >> PAGE_SHIFT);
10713 	return 0;
10714 }
10715 
kvm_mmu_slot_apply_flags(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)10716 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
10717 				     struct kvm_memory_slot *old,
10718 				     struct kvm_memory_slot *new,
10719 				     enum kvm_mr_change change)
10720 {
10721 	/*
10722 	 * Nothing to do for RO slots or CREATE/MOVE/DELETE of a slot.
10723 	 * See comments below.
10724 	 */
10725 	if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY))
10726 		return;
10727 
10728 	/*
10729 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
10730 	 * sptes have to be split.  If live migration is successful, the guest
10731 	 * in the source machine will be destroyed and large sptes will be
10732 	 * created in the destination. However, if the guest continues to run
10733 	 * in the source machine (for example if live migration fails), small
10734 	 * sptes will remain around and cause bad performance.
10735 	 *
10736 	 * Scan sptes if dirty logging has been stopped, dropping those
10737 	 * which can be collapsed into a single large-page spte.  Later
10738 	 * page faults will create the large-page sptes.
10739 	 *
10740 	 * There is no need to do this in any of the following cases:
10741 	 * CREATE:      No dirty mappings will already exist.
10742 	 * MOVE/DELETE: The old mappings will already have been cleaned up by
10743 	 *		kvm_arch_flush_shadow_memslot()
10744 	 */
10745 	if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
10746 	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
10747 		kvm_mmu_zap_collapsible_sptes(kvm, new);
10748 
10749 	/*
10750 	 * Enable or disable dirty logging for the slot.
10751 	 *
10752 	 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of the old
10753 	 * slot have been zapped so no dirty logging updates are needed for
10754 	 * the old slot.
10755 	 * For KVM_MR_CREATE and KVM_MR_MOVE, once the new slot is visible
10756 	 * any mappings that might be created in it will consume the
10757 	 * properties of the new slot and do not need to be updated here.
10758 	 *
10759 	 * When PML is enabled, the kvm_x86_ops dirty logging hooks are
10760 	 * called to enable/disable dirty logging.
10761 	 *
10762 	 * When disabling dirty logging with PML enabled, the D-bit is set
10763 	 * for sptes in the slot in order to prevent unnecessary GPA
10764 	 * logging in the PML buffer (and potential PML buffer full VMEXIT).
10765 	 * This guarantees leaving PML enabled for the guest's lifetime
10766 	 * won't have any additional overhead from PML when the guest is
10767 	 * running with dirty logging disabled.
10768 	 *
10769 	 * When enabling dirty logging, large sptes are write-protected
10770 	 * so they can be split on first write.  New large sptes cannot
10771 	 * be created for this slot until the end of the logging.
10772 	 * See the comments in fast_page_fault().
10773 	 * For small sptes, nothing is done if the dirty log is in the
10774 	 * initial-all-set state.  Otherwise, depending on whether pml
10775 	 * is enabled the D-bit or the W-bit will be cleared.
10776 	 */
10777 	if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
10778 		if (kvm_x86_ops.slot_enable_log_dirty) {
10779 			kvm_x86_ops.slot_enable_log_dirty(kvm, new);
10780 		} else {
10781 			int level =
10782 				kvm_dirty_log_manual_protect_and_init_set(kvm) ?
10783 				PG_LEVEL_2M : PG_LEVEL_4K;
10784 
10785 			/*
10786 			 * If we're with initial-all-set, we don't need
10787 			 * to write protect any small page because
10788 			 * they're reported as dirty already.  However
10789 			 * we still need to write-protect huge pages
10790 			 * so that the page split can happen lazily on
10791 			 * the first write to the huge page.
10792 			 */
10793 			kvm_mmu_slot_remove_write_access(kvm, new, level);
10794 		}
10795 	} else {
10796 		if (kvm_x86_ops.slot_disable_log_dirty)
10797 			kvm_x86_ops.slot_disable_log_dirty(kvm, new);
10798 	}
10799 }
10800 
kvm_arch_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)10801 void kvm_arch_commit_memory_region(struct kvm *kvm,
10802 				const struct kvm_userspace_memory_region *mem,
10803 				struct kvm_memory_slot *old,
10804 				const struct kvm_memory_slot *new,
10805 				enum kvm_mr_change change)
10806 {
10807 	if (!kvm->arch.n_requested_mmu_pages)
10808 		kvm_mmu_change_mmu_pages(kvm,
10809 				kvm_mmu_calculate_default_mmu_pages(kvm));
10810 
10811 	/*
10812 	 * FIXME: const-ify all uses of struct kvm_memory_slot.
10813 	 */
10814 	kvm_mmu_slot_apply_flags(kvm, old, (struct kvm_memory_slot *) new, change);
10815 
10816 	/* Free the arrays associated with the old memslot. */
10817 	if (change == KVM_MR_MOVE)
10818 		kvm_arch_free_memslot(kvm, old);
10819 }
10820 
kvm_arch_flush_shadow_all(struct kvm * kvm)10821 void kvm_arch_flush_shadow_all(struct kvm *kvm)
10822 {
10823 	kvm_mmu_zap_all(kvm);
10824 }
10825 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)10826 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
10827 				   struct kvm_memory_slot *slot)
10828 {
10829 	kvm_page_track_flush_slot(kvm, slot);
10830 }
10831 
kvm_guest_apic_has_interrupt(struct kvm_vcpu * vcpu)10832 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
10833 {
10834 	return (is_guest_mode(vcpu) &&
10835 			kvm_x86_ops.guest_apic_has_interrupt &&
10836 			kvm_x86_ops.guest_apic_has_interrupt(vcpu));
10837 }
10838 
kvm_vcpu_has_events(struct kvm_vcpu * vcpu)10839 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
10840 {
10841 	if (!list_empty_careful(&vcpu->async_pf.done))
10842 		return true;
10843 
10844 	if (kvm_apic_has_events(vcpu))
10845 		return true;
10846 
10847 	if (vcpu->arch.pv.pv_unhalted)
10848 		return true;
10849 
10850 	if (vcpu->arch.exception.pending)
10851 		return true;
10852 
10853 	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
10854 	    (vcpu->arch.nmi_pending &&
10855 	     kvm_x86_ops.nmi_allowed(vcpu, false)))
10856 		return true;
10857 
10858 	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
10859 	    (vcpu->arch.smi_pending &&
10860 	     kvm_x86_ops.smi_allowed(vcpu, false)))
10861 		return true;
10862 
10863 	if (kvm_arch_interrupt_allowed(vcpu) &&
10864 	    (kvm_cpu_has_interrupt(vcpu) ||
10865 	    kvm_guest_apic_has_interrupt(vcpu)))
10866 		return true;
10867 
10868 	if (kvm_hv_has_stimer_pending(vcpu))
10869 		return true;
10870 
10871 	if (is_guest_mode(vcpu) &&
10872 	    kvm_x86_ops.nested_ops->hv_timer_pending &&
10873 	    kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
10874 		return true;
10875 
10876 	return false;
10877 }
10878 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)10879 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
10880 {
10881 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
10882 }
10883 
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)10884 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
10885 {
10886 	if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
10887 		return true;
10888 
10889 	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
10890 		kvm_test_request(KVM_REQ_SMI, vcpu) ||
10891 		 kvm_test_request(KVM_REQ_EVENT, vcpu))
10892 		return true;
10893 
10894 	if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu))
10895 		return true;
10896 
10897 	return false;
10898 }
10899 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)10900 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
10901 {
10902 	return vcpu->arch.preempted_in_kernel;
10903 }
10904 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)10905 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
10906 {
10907 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
10908 }
10909 
kvm_arch_interrupt_allowed(struct kvm_vcpu * vcpu)10910 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
10911 {
10912 	return kvm_x86_ops.interrupt_allowed(vcpu, false);
10913 }
10914 
kvm_get_linear_rip(struct kvm_vcpu * vcpu)10915 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
10916 {
10917 	if (is_64_bit_mode(vcpu))
10918 		return kvm_rip_read(vcpu);
10919 	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
10920 		     kvm_rip_read(vcpu));
10921 }
10922 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
10923 
kvm_is_linear_rip(struct kvm_vcpu * vcpu,unsigned long linear_rip)10924 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
10925 {
10926 	return kvm_get_linear_rip(vcpu) == linear_rip;
10927 }
10928 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
10929 
kvm_get_rflags(struct kvm_vcpu * vcpu)10930 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
10931 {
10932 	unsigned long rflags;
10933 
10934 	rflags = kvm_x86_ops.get_rflags(vcpu);
10935 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
10936 		rflags &= ~X86_EFLAGS_TF;
10937 	return rflags;
10938 }
10939 EXPORT_SYMBOL_GPL(kvm_get_rflags);
10940 
__kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)10941 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
10942 {
10943 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
10944 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
10945 		rflags |= X86_EFLAGS_TF;
10946 	kvm_x86_ops.set_rflags(vcpu, rflags);
10947 }
10948 
kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)10949 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
10950 {
10951 	__kvm_set_rflags(vcpu, rflags);
10952 	kvm_make_request(KVM_REQ_EVENT, vcpu);
10953 }
10954 EXPORT_SYMBOL_GPL(kvm_set_rflags);
10955 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)10956 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
10957 {
10958 	int r;
10959 
10960 	if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
10961 	      work->wakeup_all)
10962 		return;
10963 
10964 	r = kvm_mmu_reload(vcpu);
10965 	if (unlikely(r))
10966 		return;
10967 
10968 	if (!vcpu->arch.mmu->direct_map &&
10969 	      work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
10970 		return;
10971 
10972 	kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
10973 }
10974 
kvm_async_pf_hash_fn(gfn_t gfn)10975 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
10976 {
10977 	BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
10978 
10979 	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
10980 }
10981 
kvm_async_pf_next_probe(u32 key)10982 static inline u32 kvm_async_pf_next_probe(u32 key)
10983 {
10984 	return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
10985 }
10986 
kvm_add_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)10987 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
10988 {
10989 	u32 key = kvm_async_pf_hash_fn(gfn);
10990 
10991 	while (vcpu->arch.apf.gfns[key] != ~0)
10992 		key = kvm_async_pf_next_probe(key);
10993 
10994 	vcpu->arch.apf.gfns[key] = gfn;
10995 }
10996 
kvm_async_pf_gfn_slot(struct kvm_vcpu * vcpu,gfn_t gfn)10997 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
10998 {
10999 	int i;
11000 	u32 key = kvm_async_pf_hash_fn(gfn);
11001 
11002 	for (i = 0; i < ASYNC_PF_PER_VCPU &&
11003 		     (vcpu->arch.apf.gfns[key] != gfn &&
11004 		      vcpu->arch.apf.gfns[key] != ~0); i++)
11005 		key = kvm_async_pf_next_probe(key);
11006 
11007 	return key;
11008 }
11009 
kvm_find_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)11010 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
11011 {
11012 	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
11013 }
11014 
kvm_del_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)11015 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
11016 {
11017 	u32 i, j, k;
11018 
11019 	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
11020 
11021 	if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
11022 		return;
11023 
11024 	while (true) {
11025 		vcpu->arch.apf.gfns[i] = ~0;
11026 		do {
11027 			j = kvm_async_pf_next_probe(j);
11028 			if (vcpu->arch.apf.gfns[j] == ~0)
11029 				return;
11030 			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
11031 			/*
11032 			 * k lies cyclically in ]i,j]
11033 			 * |    i.k.j |
11034 			 * |....j i.k.| or  |.k..j i...|
11035 			 */
11036 		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
11037 		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
11038 		i = j;
11039 	}
11040 }
11041 
apf_put_user_notpresent(struct kvm_vcpu * vcpu)11042 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
11043 {
11044 	u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
11045 
11046 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
11047 				      sizeof(reason));
11048 }
11049 
apf_put_user_ready(struct kvm_vcpu * vcpu,u32 token)11050 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
11051 {
11052 	unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
11053 
11054 	return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
11055 					     &token, offset, sizeof(token));
11056 }
11057 
apf_pageready_slot_free(struct kvm_vcpu * vcpu)11058 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
11059 {
11060 	unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
11061 	u32 val;
11062 
11063 	if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
11064 					 &val, offset, sizeof(val)))
11065 		return false;
11066 
11067 	return !val;
11068 }
11069 
kvm_can_deliver_async_pf(struct kvm_vcpu * vcpu)11070 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
11071 {
11072 	if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
11073 		return false;
11074 
11075 	if (!kvm_pv_async_pf_enabled(vcpu) ||
11076 	    (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0))
11077 		return false;
11078 
11079 	return true;
11080 }
11081 
kvm_can_do_async_pf(struct kvm_vcpu * vcpu)11082 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
11083 {
11084 	if (unlikely(!lapic_in_kernel(vcpu) ||
11085 		     kvm_event_needs_reinjection(vcpu) ||
11086 		     vcpu->arch.exception.pending))
11087 		return false;
11088 
11089 	if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
11090 		return false;
11091 
11092 	/*
11093 	 * If interrupts are off we cannot even use an artificial
11094 	 * halt state.
11095 	 */
11096 	return kvm_arch_interrupt_allowed(vcpu);
11097 }
11098 
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)11099 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
11100 				     struct kvm_async_pf *work)
11101 {
11102 	struct x86_exception fault;
11103 
11104 	trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
11105 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
11106 
11107 	if (kvm_can_deliver_async_pf(vcpu) &&
11108 	    !apf_put_user_notpresent(vcpu)) {
11109 		fault.vector = PF_VECTOR;
11110 		fault.error_code_valid = true;
11111 		fault.error_code = 0;
11112 		fault.nested_page_fault = false;
11113 		fault.address = work->arch.token;
11114 		fault.async_page_fault = true;
11115 		kvm_inject_page_fault(vcpu, &fault);
11116 		return true;
11117 	} else {
11118 		/*
11119 		 * It is not possible to deliver a paravirtualized asynchronous
11120 		 * page fault, but putting the guest in an artificial halt state
11121 		 * can be beneficial nevertheless: if an interrupt arrives, we
11122 		 * can deliver it timely and perhaps the guest will schedule
11123 		 * another process.  When the instruction that triggered a page
11124 		 * fault is retried, hopefully the page will be ready in the host.
11125 		 */
11126 		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
11127 		return false;
11128 	}
11129 }
11130 
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)11131 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
11132 				 struct kvm_async_pf *work)
11133 {
11134 	struct kvm_lapic_irq irq = {
11135 		.delivery_mode = APIC_DM_FIXED,
11136 		.vector = vcpu->arch.apf.vec
11137 	};
11138 
11139 	if (work->wakeup_all)
11140 		work->arch.token = ~0; /* broadcast wakeup */
11141 	else
11142 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
11143 	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
11144 
11145 	if ((work->wakeup_all || work->notpresent_injected) &&
11146 	    kvm_pv_async_pf_enabled(vcpu) &&
11147 	    !apf_put_user_ready(vcpu, work->arch.token)) {
11148 		vcpu->arch.apf.pageready_pending = true;
11149 		kvm_apic_set_irq(vcpu, &irq, NULL);
11150 	}
11151 
11152 	vcpu->arch.apf.halted = false;
11153 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11154 }
11155 
kvm_arch_async_page_present_queued(struct kvm_vcpu * vcpu)11156 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
11157 {
11158 	kvm_make_request(KVM_REQ_APF_READY, vcpu);
11159 	if (!vcpu->arch.apf.pageready_pending)
11160 		kvm_vcpu_kick(vcpu);
11161 }
11162 
kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu * vcpu)11163 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
11164 {
11165 	if (!kvm_pv_async_pf_enabled(vcpu))
11166 		return true;
11167 	else
11168 		return apf_pageready_slot_free(vcpu);
11169 }
11170 
kvm_arch_start_assignment(struct kvm * kvm)11171 void kvm_arch_start_assignment(struct kvm *kvm)
11172 {
11173 	atomic_inc(&kvm->arch.assigned_device_count);
11174 }
11175 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
11176 
kvm_arch_end_assignment(struct kvm * kvm)11177 void kvm_arch_end_assignment(struct kvm *kvm)
11178 {
11179 	atomic_dec(&kvm->arch.assigned_device_count);
11180 }
11181 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
11182 
kvm_arch_has_assigned_device(struct kvm * kvm)11183 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
11184 {
11185 	return arch_atomic_read(&kvm->arch.assigned_device_count);
11186 }
11187 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
11188 
kvm_arch_register_noncoherent_dma(struct kvm * kvm)11189 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
11190 {
11191 	atomic_inc(&kvm->arch.noncoherent_dma_count);
11192 }
11193 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
11194 
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)11195 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
11196 {
11197 	atomic_dec(&kvm->arch.noncoherent_dma_count);
11198 }
11199 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
11200 
kvm_arch_has_noncoherent_dma(struct kvm * kvm)11201 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
11202 {
11203 	return atomic_read(&kvm->arch.noncoherent_dma_count);
11204 }
11205 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
11206 
kvm_arch_has_irq_bypass(void)11207 bool kvm_arch_has_irq_bypass(void)
11208 {
11209 	return true;
11210 }
11211 
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)11212 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
11213 				      struct irq_bypass_producer *prod)
11214 {
11215 	struct kvm_kernel_irqfd *irqfd =
11216 		container_of(cons, struct kvm_kernel_irqfd, consumer);
11217 	int ret;
11218 
11219 	irqfd->producer = prod;
11220 	kvm_arch_start_assignment(irqfd->kvm);
11221 	ret = kvm_x86_ops.update_pi_irte(irqfd->kvm,
11222 					 prod->irq, irqfd->gsi, 1);
11223 
11224 	if (ret)
11225 		kvm_arch_end_assignment(irqfd->kvm);
11226 
11227 	return ret;
11228 }
11229 
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)11230 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
11231 				      struct irq_bypass_producer *prod)
11232 {
11233 	int ret;
11234 	struct kvm_kernel_irqfd *irqfd =
11235 		container_of(cons, struct kvm_kernel_irqfd, consumer);
11236 
11237 	WARN_ON(irqfd->producer != prod);
11238 	irqfd->producer = NULL;
11239 
11240 	/*
11241 	 * When producer of consumer is unregistered, we change back to
11242 	 * remapped mode, so we can re-use the current implementation
11243 	 * when the irq is masked/disabled or the consumer side (KVM
11244 	 * int this case doesn't want to receive the interrupts.
11245 	*/
11246 	ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
11247 	if (ret)
11248 		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
11249 		       " fails: %d\n", irqfd->consumer.token, ret);
11250 
11251 	kvm_arch_end_assignment(irqfd->kvm);
11252 }
11253 
kvm_arch_update_irqfd_routing(struct kvm * kvm,unsigned int host_irq,uint32_t guest_irq,bool set)11254 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
11255 				   uint32_t guest_irq, bool set)
11256 {
11257 	return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set);
11258 }
11259 
kvm_vector_hashing_enabled(void)11260 bool kvm_vector_hashing_enabled(void)
11261 {
11262 	return vector_hashing;
11263 }
11264 
kvm_arch_no_poll(struct kvm_vcpu * vcpu)11265 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
11266 {
11267 	return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
11268 }
11269 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
11270 
11271 
kvm_spec_ctrl_test_value(u64 value)11272 int kvm_spec_ctrl_test_value(u64 value)
11273 {
11274 	/*
11275 	 * test that setting IA32_SPEC_CTRL to given value
11276 	 * is allowed by the host processor
11277 	 */
11278 
11279 	u64 saved_value;
11280 	unsigned long flags;
11281 	int ret = 0;
11282 
11283 	local_irq_save(flags);
11284 
11285 	if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
11286 		ret = 1;
11287 	else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
11288 		ret = 1;
11289 	else
11290 		wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
11291 
11292 	local_irq_restore(flags);
11293 
11294 	return ret;
11295 }
11296 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
11297 
kvm_fixup_and_inject_pf_error(struct kvm_vcpu * vcpu,gva_t gva,u16 error_code)11298 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
11299 {
11300 	struct x86_exception fault;
11301 	u32 access = error_code &
11302 		(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
11303 
11304 	if (!(error_code & PFERR_PRESENT_MASK) ||
11305 	    vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
11306 		/*
11307 		 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
11308 		 * tables probably do not match the TLB.  Just proceed
11309 		 * with the error code that the processor gave.
11310 		 */
11311 		fault.vector = PF_VECTOR;
11312 		fault.error_code_valid = true;
11313 		fault.error_code = error_code;
11314 		fault.nested_page_fault = false;
11315 		fault.address = gva;
11316 	}
11317 	vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
11318 }
11319 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
11320 
11321 /*
11322  * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
11323  * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
11324  * indicates whether exit to userspace is needed.
11325  */
kvm_handle_memory_failure(struct kvm_vcpu * vcpu,int r,struct x86_exception * e)11326 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
11327 			      struct x86_exception *e)
11328 {
11329 	if (r == X86EMUL_PROPAGATE_FAULT) {
11330 		kvm_inject_emulated_page_fault(vcpu, e);
11331 		return 1;
11332 	}
11333 
11334 	/*
11335 	 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
11336 	 * while handling a VMX instruction KVM could've handled the request
11337 	 * correctly by exiting to userspace and performing I/O but there
11338 	 * doesn't seem to be a real use-case behind such requests, just return
11339 	 * KVM_EXIT_INTERNAL_ERROR for now.
11340 	 */
11341 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
11342 	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
11343 	vcpu->run->internal.ndata = 0;
11344 
11345 	return 0;
11346 }
11347 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
11348 
kvm_handle_invpcid(struct kvm_vcpu * vcpu,unsigned long type,gva_t gva)11349 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
11350 {
11351 	bool pcid_enabled;
11352 	struct x86_exception e;
11353 	unsigned i;
11354 	unsigned long roots_to_free = 0;
11355 	struct {
11356 		u64 pcid;
11357 		u64 gla;
11358 	} operand;
11359 	int r;
11360 
11361 	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
11362 	if (r != X86EMUL_CONTINUE)
11363 		return kvm_handle_memory_failure(vcpu, r, &e);
11364 
11365 	if (operand.pcid >> 12 != 0) {
11366 		kvm_inject_gp(vcpu, 0);
11367 		return 1;
11368 	}
11369 
11370 	pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
11371 
11372 	switch (type) {
11373 	case INVPCID_TYPE_INDIV_ADDR:
11374 		if ((!pcid_enabled && (operand.pcid != 0)) ||
11375 		    is_noncanonical_address(operand.gla, vcpu)) {
11376 			kvm_inject_gp(vcpu, 0);
11377 			return 1;
11378 		}
11379 		kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
11380 		return kvm_skip_emulated_instruction(vcpu);
11381 
11382 	case INVPCID_TYPE_SINGLE_CTXT:
11383 		if (!pcid_enabled && (operand.pcid != 0)) {
11384 			kvm_inject_gp(vcpu, 0);
11385 			return 1;
11386 		}
11387 
11388 		if (kvm_get_active_pcid(vcpu) == operand.pcid) {
11389 			kvm_mmu_sync_roots(vcpu);
11390 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
11391 		}
11392 
11393 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
11394 			if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
11395 			    == operand.pcid)
11396 				roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
11397 
11398 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
11399 		/*
11400 		 * If neither the current cr3 nor any of the prev_roots use the
11401 		 * given PCID, then nothing needs to be done here because a
11402 		 * resync will happen anyway before switching to any other CR3.
11403 		 */
11404 
11405 		return kvm_skip_emulated_instruction(vcpu);
11406 
11407 	case INVPCID_TYPE_ALL_NON_GLOBAL:
11408 		/*
11409 		 * Currently, KVM doesn't mark global entries in the shadow
11410 		 * page tables, so a non-global flush just degenerates to a
11411 		 * global flush. If needed, we could optimize this later by
11412 		 * keeping track of global entries in shadow page tables.
11413 		 */
11414 
11415 		fallthrough;
11416 	case INVPCID_TYPE_ALL_INCL_GLOBAL:
11417 		kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
11418 		return kvm_skip_emulated_instruction(vcpu);
11419 
11420 	default:
11421 		BUG(); /* We have already checked above that type <= 3 */
11422 	}
11423 }
11424 EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
11425 
11426 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
11427 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
11428 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
11429 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
11430 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
11431 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
11432 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
11433 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
11434 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
11435 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
11436 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
11437 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
11438 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
11439 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
11440 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
11441 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
11442 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
11443 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
11444 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
11445 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
11446 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
11447 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
11448