1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17 #include <linux/kvm_host.h>
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23 #include "cpuid.h"
24 #include "pmu.h"
25
26 #include <linux/module.h>
27 #include <linux/mod_devicetable.h>
28 #include <linux/kernel.h>
29 #include <linux/vmalloc.h>
30 #include <linux/highmem.h>
31 #include <linux/sched.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34
35 #include <asm/perf_event.h>
36 #include <asm/tlbflush.h>
37 #include <asm/desc.h>
38 #include <asm/debugreg.h>
39 #include <asm/kvm_para.h>
40 #include <asm/microcode.h>
41 #include <asm/spec-ctrl.h>
42
43 #include <asm/virtext.h>
44 #include "trace.h"
45
46 #define __ex(x) __kvm_handle_fault_on_reboot(x)
47
48 MODULE_AUTHOR("Qumranet");
49 MODULE_LICENSE("GPL");
50
51 static const struct x86_cpu_id svm_cpu_id[] = {
52 X86_FEATURE_MATCH(X86_FEATURE_SVM),
53 {}
54 };
55 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
56
57 #define IOPM_ALLOC_ORDER 2
58 #define MSRPM_ALLOC_ORDER 1
59
60 #define SEG_TYPE_LDT 2
61 #define SEG_TYPE_BUSY_TSS16 3
62
63 #define SVM_FEATURE_NPT (1 << 0)
64 #define SVM_FEATURE_LBRV (1 << 1)
65 #define SVM_FEATURE_SVML (1 << 2)
66 #define SVM_FEATURE_NRIP (1 << 3)
67 #define SVM_FEATURE_TSC_RATE (1 << 4)
68 #define SVM_FEATURE_VMCB_CLEAN (1 << 5)
69 #define SVM_FEATURE_FLUSH_ASID (1 << 6)
70 #define SVM_FEATURE_DECODE_ASSIST (1 << 7)
71 #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
72
73 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
74 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
75 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
76
77 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
78
79 #define TSC_RATIO_RSVD 0xffffff0000000000ULL
80 #define TSC_RATIO_MIN 0x0000000000000001ULL
81 #define TSC_RATIO_MAX 0x000000ffffffffffULL
82
83 static bool erratum_383_found __read_mostly;
84
85 static const u32 host_save_user_msrs[] = {
86 #ifdef CONFIG_X86_64
87 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
88 MSR_FS_BASE,
89 #endif
90 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
91 };
92
93 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
94
95 struct kvm_vcpu;
96
97 struct nested_state {
98 struct vmcb *hsave;
99 u64 hsave_msr;
100 u64 vm_cr_msr;
101 u64 vmcb;
102
103 /* These are the merged vectors */
104 u32 *msrpm;
105
106 /* gpa pointers to the real vectors */
107 u64 vmcb_msrpm;
108 u64 vmcb_iopm;
109
110 /* A VMEXIT is required but not yet emulated */
111 bool exit_required;
112
113 /* cache for intercepts of the guest */
114 u32 intercept_cr;
115 u32 intercept_dr;
116 u32 intercept_exceptions;
117 u64 intercept;
118
119 /* Nested Paging related state */
120 u64 nested_cr3;
121 };
122
123 #define MSRPM_OFFSETS 16
124 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
125
126 /*
127 * Set osvw_len to higher value when updated Revision Guides
128 * are published and we know what the new status bits are
129 */
130 static uint64_t osvw_len = 4, osvw_status;
131
132 struct vcpu_svm {
133 struct kvm_vcpu vcpu;
134 struct vmcb *vmcb;
135 unsigned long vmcb_pa;
136 struct svm_cpu_data *svm_data;
137 uint64_t asid_generation;
138 uint64_t sysenter_esp;
139 uint64_t sysenter_eip;
140
141 u64 next_rip;
142
143 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
144 struct {
145 u16 fs;
146 u16 gs;
147 u16 ldt;
148 u64 gs_base;
149 } host;
150
151 u64 spec_ctrl;
152 /*
153 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
154 * translated into the appropriate L2_CFG bits on the host to
155 * perform speculative control.
156 */
157 u64 virt_spec_ctrl;
158
159 u32 *msrpm;
160
161 ulong nmi_iret_rip;
162
163 struct nested_state nested;
164
165 bool nmi_singlestep;
166
167 unsigned int3_injected;
168 unsigned long int3_rip;
169 u32 apf_reason;
170
171 /* cached guest cpuid flags for faster access */
172 bool nrips_enabled : 1;
173 };
174
175 static DEFINE_PER_CPU(u64, current_tsc_ratio);
176 #define TSC_RATIO_DEFAULT 0x0100000000ULL
177
178 #define MSR_INVALID 0xffffffffU
179
180 static const struct svm_direct_access_msrs {
181 u32 index; /* Index of the MSR */
182 bool always; /* True if intercept is always on */
183 } direct_access_msrs[] = {
184 { .index = MSR_STAR, .always = true },
185 { .index = MSR_IA32_SYSENTER_CS, .always = true },
186 #ifdef CONFIG_X86_64
187 { .index = MSR_GS_BASE, .always = true },
188 { .index = MSR_FS_BASE, .always = true },
189 { .index = MSR_KERNEL_GS_BASE, .always = true },
190 { .index = MSR_LSTAR, .always = true },
191 { .index = MSR_CSTAR, .always = true },
192 { .index = MSR_SYSCALL_MASK, .always = true },
193 #endif
194 { .index = MSR_IA32_SPEC_CTRL, .always = false },
195 { .index = MSR_IA32_PRED_CMD, .always = false },
196 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
197 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
198 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
199 { .index = MSR_IA32_LASTINTTOIP, .always = false },
200 { .index = MSR_INVALID, .always = false },
201 };
202
203 /* enable NPT for AMD64 and X86 with PAE */
204 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
205 static bool npt_enabled = true;
206 #else
207 static bool npt_enabled;
208 #endif
209
210 /* allow nested paging (virtualized MMU) for all guests */
211 static int npt = true;
212 module_param(npt, int, S_IRUGO);
213
214 /* allow nested virtualization in KVM/SVM */
215 static int nested = true;
216 module_param(nested, int, S_IRUGO);
217
218 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
219 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
220 static void svm_complete_interrupts(struct vcpu_svm *svm);
221
222 static int nested_svm_exit_handled(struct vcpu_svm *svm);
223 static int nested_svm_intercept(struct vcpu_svm *svm);
224 static int nested_svm_vmexit(struct vcpu_svm *svm);
225 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
226 bool has_error_code, u32 error_code);
227
228 enum {
229 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
230 pause filter count */
231 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
232 VMCB_ASID, /* ASID */
233 VMCB_INTR, /* int_ctl, int_vector */
234 VMCB_NPT, /* npt_en, nCR3, gPAT */
235 VMCB_CR, /* CR0, CR3, CR4, EFER */
236 VMCB_DR, /* DR6, DR7 */
237 VMCB_DT, /* GDT, IDT */
238 VMCB_SEG, /* CS, DS, SS, ES, CPL */
239 VMCB_CR2, /* CR2 only */
240 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
241 VMCB_DIRTY_MAX,
242 };
243
244 /* TPR and CR2 are always written before VMRUN */
245 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
246
mark_all_dirty(struct vmcb * vmcb)247 static inline void mark_all_dirty(struct vmcb *vmcb)
248 {
249 vmcb->control.clean = 0;
250 }
251
mark_all_clean(struct vmcb * vmcb)252 static inline void mark_all_clean(struct vmcb *vmcb)
253 {
254 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
255 & ~VMCB_ALWAYS_DIRTY_MASK;
256 }
257
mark_dirty(struct vmcb * vmcb,int bit)258 static inline void mark_dirty(struct vmcb *vmcb, int bit)
259 {
260 vmcb->control.clean &= ~(1 << bit);
261 }
262
to_svm(struct kvm_vcpu * vcpu)263 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
264 {
265 return container_of(vcpu, struct vcpu_svm, vcpu);
266 }
267
recalc_intercepts(struct vcpu_svm * svm)268 static void recalc_intercepts(struct vcpu_svm *svm)
269 {
270 struct vmcb_control_area *c, *h;
271 struct nested_state *g;
272
273 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
274
275 if (!is_guest_mode(&svm->vcpu))
276 return;
277
278 c = &svm->vmcb->control;
279 h = &svm->nested.hsave->control;
280 g = &svm->nested;
281
282 c->intercept_cr = h->intercept_cr | g->intercept_cr;
283 c->intercept_dr = h->intercept_dr | g->intercept_dr;
284 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
285 c->intercept = h->intercept | g->intercept;
286 }
287
get_host_vmcb(struct vcpu_svm * svm)288 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
289 {
290 if (is_guest_mode(&svm->vcpu))
291 return svm->nested.hsave;
292 else
293 return svm->vmcb;
294 }
295
set_cr_intercept(struct vcpu_svm * svm,int bit)296 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
297 {
298 struct vmcb *vmcb = get_host_vmcb(svm);
299
300 vmcb->control.intercept_cr |= (1U << bit);
301
302 recalc_intercepts(svm);
303 }
304
clr_cr_intercept(struct vcpu_svm * svm,int bit)305 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
306 {
307 struct vmcb *vmcb = get_host_vmcb(svm);
308
309 vmcb->control.intercept_cr &= ~(1U << bit);
310
311 recalc_intercepts(svm);
312 }
313
is_cr_intercept(struct vcpu_svm * svm,int bit)314 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
315 {
316 struct vmcb *vmcb = get_host_vmcb(svm);
317
318 return vmcb->control.intercept_cr & (1U << bit);
319 }
320
set_dr_intercepts(struct vcpu_svm * svm)321 static inline void set_dr_intercepts(struct vcpu_svm *svm)
322 {
323 struct vmcb *vmcb = get_host_vmcb(svm);
324
325 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
326 | (1 << INTERCEPT_DR1_READ)
327 | (1 << INTERCEPT_DR2_READ)
328 | (1 << INTERCEPT_DR3_READ)
329 | (1 << INTERCEPT_DR4_READ)
330 | (1 << INTERCEPT_DR5_READ)
331 | (1 << INTERCEPT_DR6_READ)
332 | (1 << INTERCEPT_DR7_READ)
333 | (1 << INTERCEPT_DR0_WRITE)
334 | (1 << INTERCEPT_DR1_WRITE)
335 | (1 << INTERCEPT_DR2_WRITE)
336 | (1 << INTERCEPT_DR3_WRITE)
337 | (1 << INTERCEPT_DR4_WRITE)
338 | (1 << INTERCEPT_DR5_WRITE)
339 | (1 << INTERCEPT_DR6_WRITE)
340 | (1 << INTERCEPT_DR7_WRITE);
341
342 recalc_intercepts(svm);
343 }
344
clr_dr_intercepts(struct vcpu_svm * svm)345 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
346 {
347 struct vmcb *vmcb = get_host_vmcb(svm);
348
349 vmcb->control.intercept_dr = 0;
350
351 recalc_intercepts(svm);
352 }
353
set_exception_intercept(struct vcpu_svm * svm,int bit)354 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
355 {
356 struct vmcb *vmcb = get_host_vmcb(svm);
357
358 vmcb->control.intercept_exceptions |= (1U << bit);
359
360 recalc_intercepts(svm);
361 }
362
clr_exception_intercept(struct vcpu_svm * svm,int bit)363 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
364 {
365 struct vmcb *vmcb = get_host_vmcb(svm);
366
367 vmcb->control.intercept_exceptions &= ~(1U << bit);
368
369 recalc_intercepts(svm);
370 }
371
set_intercept(struct vcpu_svm * svm,int bit)372 static inline void set_intercept(struct vcpu_svm *svm, int bit)
373 {
374 struct vmcb *vmcb = get_host_vmcb(svm);
375
376 vmcb->control.intercept |= (1ULL << bit);
377
378 recalc_intercepts(svm);
379 }
380
clr_intercept(struct vcpu_svm * svm,int bit)381 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
382 {
383 struct vmcb *vmcb = get_host_vmcb(svm);
384
385 vmcb->control.intercept &= ~(1ULL << bit);
386
387 recalc_intercepts(svm);
388 }
389
enable_gif(struct vcpu_svm * svm)390 static inline void enable_gif(struct vcpu_svm *svm)
391 {
392 svm->vcpu.arch.hflags |= HF_GIF_MASK;
393 }
394
disable_gif(struct vcpu_svm * svm)395 static inline void disable_gif(struct vcpu_svm *svm)
396 {
397 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
398 }
399
gif_set(struct vcpu_svm * svm)400 static inline bool gif_set(struct vcpu_svm *svm)
401 {
402 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
403 }
404
405 static unsigned long iopm_base;
406
407 struct kvm_ldttss_desc {
408 u16 limit0;
409 u16 base0;
410 unsigned base1:8, type:5, dpl:2, p:1;
411 unsigned limit1:4, zero0:3, g:1, base2:8;
412 u32 base3;
413 u32 zero1;
414 } __attribute__((packed));
415
416 struct svm_cpu_data {
417 int cpu;
418
419 u64 asid_generation;
420 u32 max_asid;
421 u32 next_asid;
422 struct kvm_ldttss_desc *tss_desc;
423
424 struct page *save_area;
425 struct vmcb *current_vmcb;
426 };
427
428 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
429
430 struct svm_init_data {
431 int cpu;
432 int r;
433 };
434
435 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
436
437 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
438 #define MSRS_RANGE_SIZE 2048
439 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
440
svm_msrpm_offset(u32 msr)441 static u32 svm_msrpm_offset(u32 msr)
442 {
443 u32 offset;
444 int i;
445
446 for (i = 0; i < NUM_MSR_MAPS; i++) {
447 if (msr < msrpm_ranges[i] ||
448 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
449 continue;
450
451 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
452 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
453
454 /* Now we have the u8 offset - but need the u32 offset */
455 return offset / 4;
456 }
457
458 /* MSR not in any range */
459 return MSR_INVALID;
460 }
461
462 #define MAX_INST_SIZE 15
463
clgi(void)464 static inline void clgi(void)
465 {
466 asm volatile (__ex(SVM_CLGI));
467 }
468
stgi(void)469 static inline void stgi(void)
470 {
471 asm volatile (__ex(SVM_STGI));
472 }
473
invlpga(unsigned long addr,u32 asid)474 static inline void invlpga(unsigned long addr, u32 asid)
475 {
476 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
477 }
478
get_npt_level(void)479 static int get_npt_level(void)
480 {
481 #ifdef CONFIG_X86_64
482 return PT64_ROOT_LEVEL;
483 #else
484 return PT32E_ROOT_LEVEL;
485 #endif
486 }
487
svm_set_efer(struct kvm_vcpu * vcpu,u64 efer)488 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
489 {
490 vcpu->arch.efer = efer;
491 if (!npt_enabled && !(efer & EFER_LMA))
492 efer &= ~EFER_LME;
493
494 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
495 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
496 }
497
is_external_interrupt(u32 info)498 static int is_external_interrupt(u32 info)
499 {
500 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
501 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
502 }
503
svm_get_interrupt_shadow(struct kvm_vcpu * vcpu)504 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
505 {
506 struct vcpu_svm *svm = to_svm(vcpu);
507 u32 ret = 0;
508
509 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
510 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
511 return ret;
512 }
513
svm_set_interrupt_shadow(struct kvm_vcpu * vcpu,int mask)514 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
515 {
516 struct vcpu_svm *svm = to_svm(vcpu);
517
518 if (mask == 0)
519 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
520 else
521 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
522
523 }
524
skip_emulated_instruction(struct kvm_vcpu * vcpu)525 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
526 {
527 struct vcpu_svm *svm = to_svm(vcpu);
528
529 if (svm->vmcb->control.next_rip != 0) {
530 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
531 svm->next_rip = svm->vmcb->control.next_rip;
532 }
533
534 if (!svm->next_rip) {
535 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
536 EMULATE_DONE)
537 printk(KERN_DEBUG "%s: NOP\n", __func__);
538 return;
539 }
540 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
541 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
542 __func__, kvm_rip_read(vcpu), svm->next_rip);
543
544 kvm_rip_write(vcpu, svm->next_rip);
545 svm_set_interrupt_shadow(vcpu, 0);
546 }
547
svm_queue_exception(struct kvm_vcpu * vcpu,unsigned nr,bool has_error_code,u32 error_code,bool reinject)548 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
549 bool has_error_code, u32 error_code,
550 bool reinject)
551 {
552 struct vcpu_svm *svm = to_svm(vcpu);
553
554 /*
555 * If we are within a nested VM we'd better #VMEXIT and let the guest
556 * handle the exception
557 */
558 if (!reinject &&
559 nested_svm_check_exception(svm, nr, has_error_code, error_code))
560 return;
561
562 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
563 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
564
565 /*
566 * For guest debugging where we have to reinject #BP if some
567 * INT3 is guest-owned:
568 * Emulate nRIP by moving RIP forward. Will fail if injection
569 * raises a fault that is not intercepted. Still better than
570 * failing in all cases.
571 */
572 skip_emulated_instruction(&svm->vcpu);
573 rip = kvm_rip_read(&svm->vcpu);
574 svm->int3_rip = rip + svm->vmcb->save.cs.base;
575 svm->int3_injected = rip - old_rip;
576 }
577
578 svm->vmcb->control.event_inj = nr
579 | SVM_EVTINJ_VALID
580 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
581 | SVM_EVTINJ_TYPE_EXEPT;
582 svm->vmcb->control.event_inj_err = error_code;
583 }
584
svm_init_erratum_383(void)585 static void svm_init_erratum_383(void)
586 {
587 u32 low, high;
588 int err;
589 u64 val;
590
591 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
592 return;
593
594 /* Use _safe variants to not break nested virtualization */
595 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
596 if (err)
597 return;
598
599 val |= (1ULL << 47);
600
601 low = lower_32_bits(val);
602 high = upper_32_bits(val);
603
604 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
605
606 erratum_383_found = true;
607 }
608
svm_init_osvw(struct kvm_vcpu * vcpu)609 static void svm_init_osvw(struct kvm_vcpu *vcpu)
610 {
611 /*
612 * Guests should see errata 400 and 415 as fixed (assuming that
613 * HLT and IO instructions are intercepted).
614 */
615 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
616 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
617
618 /*
619 * By increasing VCPU's osvw.length to 3 we are telling the guest that
620 * all osvw.status bits inside that length, including bit 0 (which is
621 * reserved for erratum 298), are valid. However, if host processor's
622 * osvw_len is 0 then osvw_status[0] carries no information. We need to
623 * be conservative here and therefore we tell the guest that erratum 298
624 * is present (because we really don't know).
625 */
626 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
627 vcpu->arch.osvw.status |= 1;
628 }
629
has_svm(void)630 static int has_svm(void)
631 {
632 const char *msg;
633
634 if (!cpu_has_svm(&msg)) {
635 printk(KERN_INFO "has_svm: %s\n", msg);
636 return 0;
637 }
638
639 return 1;
640 }
641
svm_hardware_disable(void)642 static void svm_hardware_disable(void)
643 {
644 /* Make sure we clean up behind us */
645 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
646 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
647
648 cpu_svm_disable();
649
650 amd_pmu_disable_virt();
651 }
652
svm_hardware_enable(void)653 static int svm_hardware_enable(void)
654 {
655
656 struct svm_cpu_data *sd;
657 uint64_t efer;
658 struct desc_ptr gdt_descr;
659 struct desc_struct *gdt;
660 int me = raw_smp_processor_id();
661
662 rdmsrl(MSR_EFER, efer);
663 if (efer & EFER_SVME)
664 return -EBUSY;
665
666 if (!has_svm()) {
667 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
668 return -EINVAL;
669 }
670 sd = per_cpu(svm_data, me);
671 if (!sd) {
672 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
673 return -EINVAL;
674 }
675
676 sd->asid_generation = 1;
677 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
678 sd->next_asid = sd->max_asid + 1;
679
680 native_store_gdt(&gdt_descr);
681 gdt = (struct desc_struct *)gdt_descr.address;
682 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
683
684 wrmsrl(MSR_EFER, efer | EFER_SVME);
685
686 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
687
688 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
689 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
690 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
691 }
692
693
694 /*
695 * Get OSVW bits.
696 *
697 * Note that it is possible to have a system with mixed processor
698 * revisions and therefore different OSVW bits. If bits are not the same
699 * on different processors then choose the worst case (i.e. if erratum
700 * is present on one processor and not on another then assume that the
701 * erratum is present everywhere).
702 */
703 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
704 uint64_t len, status = 0;
705 int err;
706
707 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
708 if (!err)
709 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
710 &err);
711
712 if (err)
713 osvw_status = osvw_len = 0;
714 else {
715 if (len < osvw_len)
716 osvw_len = len;
717 osvw_status |= status;
718 osvw_status &= (1ULL << osvw_len) - 1;
719 }
720 } else
721 osvw_status = osvw_len = 0;
722
723 svm_init_erratum_383();
724
725 amd_pmu_enable_virt();
726
727 return 0;
728 }
729
svm_cpu_uninit(int cpu)730 static void svm_cpu_uninit(int cpu)
731 {
732 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
733
734 if (!sd)
735 return;
736
737 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
738 __free_page(sd->save_area);
739 kfree(sd);
740 }
741
svm_cpu_init(int cpu)742 static int svm_cpu_init(int cpu)
743 {
744 struct svm_cpu_data *sd;
745 int r;
746
747 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
748 if (!sd)
749 return -ENOMEM;
750 sd->cpu = cpu;
751 sd->save_area = alloc_page(GFP_KERNEL);
752 r = -ENOMEM;
753 if (!sd->save_area)
754 goto err_1;
755
756 per_cpu(svm_data, cpu) = sd;
757
758 return 0;
759
760 err_1:
761 kfree(sd);
762 return r;
763
764 }
765
valid_msr_intercept(u32 index)766 static bool valid_msr_intercept(u32 index)
767 {
768 int i;
769
770 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
771 if (direct_access_msrs[i].index == index)
772 return true;
773
774 return false;
775 }
776
msr_write_intercepted(struct kvm_vcpu * vcpu,unsigned msr)777 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
778 {
779 u8 bit_write;
780 unsigned long tmp;
781 u32 offset;
782 u32 *msrpm;
783
784 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
785 to_svm(vcpu)->msrpm;
786
787 offset = svm_msrpm_offset(msr);
788 bit_write = 2 * (msr & 0x0f) + 1;
789 tmp = msrpm[offset];
790
791 BUG_ON(offset == MSR_INVALID);
792
793 return !!test_bit(bit_write, &tmp);
794 }
795
set_msr_interception(u32 * msrpm,unsigned msr,int read,int write)796 static void set_msr_interception(u32 *msrpm, unsigned msr,
797 int read, int write)
798 {
799 u8 bit_read, bit_write;
800 unsigned long tmp;
801 u32 offset;
802
803 /*
804 * If this warning triggers extend the direct_access_msrs list at the
805 * beginning of the file
806 */
807 WARN_ON(!valid_msr_intercept(msr));
808
809 offset = svm_msrpm_offset(msr);
810 bit_read = 2 * (msr & 0x0f);
811 bit_write = 2 * (msr & 0x0f) + 1;
812 tmp = msrpm[offset];
813
814 BUG_ON(offset == MSR_INVALID);
815
816 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
817 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
818
819 msrpm[offset] = tmp;
820 }
821
svm_vcpu_init_msrpm(u32 * msrpm)822 static void svm_vcpu_init_msrpm(u32 *msrpm)
823 {
824 int i;
825
826 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
827
828 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
829 if (!direct_access_msrs[i].always)
830 continue;
831
832 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
833 }
834 }
835
add_msr_offset(u32 offset)836 static void add_msr_offset(u32 offset)
837 {
838 int i;
839
840 for (i = 0; i < MSRPM_OFFSETS; ++i) {
841
842 /* Offset already in list? */
843 if (msrpm_offsets[i] == offset)
844 return;
845
846 /* Slot used by another offset? */
847 if (msrpm_offsets[i] != MSR_INVALID)
848 continue;
849
850 /* Add offset to list */
851 msrpm_offsets[i] = offset;
852
853 return;
854 }
855
856 /*
857 * If this BUG triggers the msrpm_offsets table has an overflow. Just
858 * increase MSRPM_OFFSETS in this case.
859 */
860 BUG();
861 }
862
init_msrpm_offsets(void)863 static void init_msrpm_offsets(void)
864 {
865 int i;
866
867 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
868
869 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
870 u32 offset;
871
872 offset = svm_msrpm_offset(direct_access_msrs[i].index);
873 BUG_ON(offset == MSR_INVALID);
874
875 add_msr_offset(offset);
876 }
877 }
878
svm_enable_lbrv(struct vcpu_svm * svm)879 static void svm_enable_lbrv(struct vcpu_svm *svm)
880 {
881 u32 *msrpm = svm->msrpm;
882
883 svm->vmcb->control.lbr_ctl = 1;
884 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
885 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
886 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
887 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
888 }
889
svm_disable_lbrv(struct vcpu_svm * svm)890 static void svm_disable_lbrv(struct vcpu_svm *svm)
891 {
892 u32 *msrpm = svm->msrpm;
893
894 svm->vmcb->control.lbr_ctl = 0;
895 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
896 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
897 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
898 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
899 }
900
svm_hardware_setup(void)901 static __init int svm_hardware_setup(void)
902 {
903 int cpu;
904 struct page *iopm_pages;
905 void *iopm_va;
906 int r;
907
908 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
909
910 if (!iopm_pages)
911 return -ENOMEM;
912
913 iopm_va = page_address(iopm_pages);
914 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
915 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
916
917 init_msrpm_offsets();
918
919 if (boot_cpu_has(X86_FEATURE_NX))
920 kvm_enable_efer_bits(EFER_NX);
921
922 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
923 kvm_enable_efer_bits(EFER_FFXSR);
924
925 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
926 kvm_has_tsc_control = true;
927 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
928 kvm_tsc_scaling_ratio_frac_bits = 32;
929 }
930
931 if (nested) {
932 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
933 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
934 }
935
936 for_each_possible_cpu(cpu) {
937 r = svm_cpu_init(cpu);
938 if (r)
939 goto err;
940 }
941
942 if (!boot_cpu_has(X86_FEATURE_NPT))
943 npt_enabled = false;
944
945 if (npt_enabled && !npt) {
946 printk(KERN_INFO "kvm: Nested Paging disabled\n");
947 npt_enabled = false;
948 }
949
950 if (npt_enabled) {
951 printk(KERN_INFO "kvm: Nested Paging enabled\n");
952 kvm_enable_tdp();
953 } else
954 kvm_disable_tdp();
955
956 return 0;
957
958 err:
959 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
960 iopm_base = 0;
961 return r;
962 }
963
svm_hardware_unsetup(void)964 static __exit void svm_hardware_unsetup(void)
965 {
966 int cpu;
967
968 for_each_possible_cpu(cpu)
969 svm_cpu_uninit(cpu);
970
971 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
972 iopm_base = 0;
973 }
974
init_seg(struct vmcb_seg * seg)975 static void init_seg(struct vmcb_seg *seg)
976 {
977 seg->selector = 0;
978 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
979 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
980 seg->limit = 0xffff;
981 seg->base = 0;
982 }
983
init_sys_seg(struct vmcb_seg * seg,uint32_t type)984 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
985 {
986 seg->selector = 0;
987 seg->attrib = SVM_SELECTOR_P_MASK | type;
988 seg->limit = 0xffff;
989 seg->base = 0;
990 }
991
svm_read_tsc_offset(struct kvm_vcpu * vcpu)992 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
993 {
994 struct vcpu_svm *svm = to_svm(vcpu);
995
996 return svm->vmcb->control.tsc_offset;
997 }
998
svm_write_tsc_offset(struct kvm_vcpu * vcpu,u64 offset)999 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1000 {
1001 struct vcpu_svm *svm = to_svm(vcpu);
1002 u64 g_tsc_offset = 0;
1003
1004 if (is_guest_mode(vcpu)) {
1005 g_tsc_offset = svm->vmcb->control.tsc_offset -
1006 svm->nested.hsave->control.tsc_offset;
1007 svm->nested.hsave->control.tsc_offset = offset;
1008 } else
1009 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1010 svm->vmcb->control.tsc_offset,
1011 offset);
1012
1013 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1014
1015 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1016 }
1017
svm_adjust_tsc_offset_guest(struct kvm_vcpu * vcpu,s64 adjustment)1018 static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
1019 {
1020 struct vcpu_svm *svm = to_svm(vcpu);
1021
1022 svm->vmcb->control.tsc_offset += adjustment;
1023 if (is_guest_mode(vcpu))
1024 svm->nested.hsave->control.tsc_offset += adjustment;
1025 else
1026 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1027 svm->vmcb->control.tsc_offset - adjustment,
1028 svm->vmcb->control.tsc_offset);
1029
1030 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1031 }
1032
init_vmcb(struct vcpu_svm * svm)1033 static void init_vmcb(struct vcpu_svm *svm)
1034 {
1035 struct vmcb_control_area *control = &svm->vmcb->control;
1036 struct vmcb_save_area *save = &svm->vmcb->save;
1037
1038 svm->vcpu.fpu_active = 1;
1039 svm->vcpu.arch.hflags = 0;
1040
1041 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1042 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1043 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1044 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1045 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1046 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1047 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1048
1049 set_dr_intercepts(svm);
1050
1051 set_exception_intercept(svm, PF_VECTOR);
1052 set_exception_intercept(svm, UD_VECTOR);
1053 set_exception_intercept(svm, MC_VECTOR);
1054 set_exception_intercept(svm, AC_VECTOR);
1055 set_exception_intercept(svm, DB_VECTOR);
1056
1057 set_intercept(svm, INTERCEPT_INTR);
1058 set_intercept(svm, INTERCEPT_NMI);
1059 set_intercept(svm, INTERCEPT_SMI);
1060 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1061 set_intercept(svm, INTERCEPT_RDPMC);
1062 set_intercept(svm, INTERCEPT_CPUID);
1063 set_intercept(svm, INTERCEPT_INVD);
1064 set_intercept(svm, INTERCEPT_HLT);
1065 set_intercept(svm, INTERCEPT_INVLPG);
1066 set_intercept(svm, INTERCEPT_INVLPGA);
1067 set_intercept(svm, INTERCEPT_IOIO_PROT);
1068 set_intercept(svm, INTERCEPT_MSR_PROT);
1069 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1070 set_intercept(svm, INTERCEPT_SHUTDOWN);
1071 set_intercept(svm, INTERCEPT_VMRUN);
1072 set_intercept(svm, INTERCEPT_VMMCALL);
1073 set_intercept(svm, INTERCEPT_VMLOAD);
1074 set_intercept(svm, INTERCEPT_VMSAVE);
1075 set_intercept(svm, INTERCEPT_STGI);
1076 set_intercept(svm, INTERCEPT_CLGI);
1077 set_intercept(svm, INTERCEPT_SKINIT);
1078 set_intercept(svm, INTERCEPT_WBINVD);
1079 set_intercept(svm, INTERCEPT_MONITOR);
1080 set_intercept(svm, INTERCEPT_MWAIT);
1081 set_intercept(svm, INTERCEPT_XSETBV);
1082
1083 control->iopm_base_pa = iopm_base;
1084 control->msrpm_base_pa = __pa(svm->msrpm);
1085 control->int_ctl = V_INTR_MASKING_MASK;
1086
1087 init_seg(&save->es);
1088 init_seg(&save->ss);
1089 init_seg(&save->ds);
1090 init_seg(&save->fs);
1091 init_seg(&save->gs);
1092
1093 save->cs.selector = 0xf000;
1094 save->cs.base = 0xffff0000;
1095 /* Executable/Readable Code Segment */
1096 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1097 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1098 save->cs.limit = 0xffff;
1099
1100 save->gdtr.limit = 0xffff;
1101 save->idtr.limit = 0xffff;
1102
1103 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1104 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1105
1106 svm_set_efer(&svm->vcpu, 0);
1107 save->dr6 = 0xffff0ff0;
1108 kvm_set_rflags(&svm->vcpu, 2);
1109 save->rip = 0x0000fff0;
1110 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1111
1112 /*
1113 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1114 * It also updates the guest-visible cr0 value.
1115 */
1116 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1117 kvm_mmu_reset_context(&svm->vcpu);
1118
1119 save->cr4 = X86_CR4_PAE;
1120 /* rdx = ?? */
1121
1122 if (npt_enabled) {
1123 /* Setup VMCB for Nested Paging */
1124 control->nested_ctl = 1;
1125 clr_intercept(svm, INTERCEPT_INVLPG);
1126 clr_exception_intercept(svm, PF_VECTOR);
1127 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1128 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1129 save->g_pat = svm->vcpu.arch.pat;
1130 save->cr3 = 0;
1131 save->cr4 = 0;
1132 }
1133 svm->asid_generation = 0;
1134
1135 svm->nested.vmcb = 0;
1136 svm->vcpu.arch.hflags = 0;
1137
1138 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1139 control->pause_filter_count = 3000;
1140 set_intercept(svm, INTERCEPT_PAUSE);
1141 }
1142
1143 mark_all_dirty(svm->vmcb);
1144
1145 enable_gif(svm);
1146 }
1147
svm_vcpu_reset(struct kvm_vcpu * vcpu,bool init_event)1148 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1149 {
1150 struct vcpu_svm *svm = to_svm(vcpu);
1151 u32 dummy;
1152 u32 eax = 1;
1153
1154 svm->spec_ctrl = 0;
1155 svm->virt_spec_ctrl = 0;
1156
1157 if (!init_event) {
1158 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1159 MSR_IA32_APICBASE_ENABLE;
1160 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1161 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1162 }
1163 init_vmcb(svm);
1164
1165 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1166 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1167 }
1168
svm_create_vcpu(struct kvm * kvm,unsigned int id)1169 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1170 {
1171 struct vcpu_svm *svm;
1172 struct page *page;
1173 struct page *msrpm_pages;
1174 struct page *hsave_page;
1175 struct page *nested_msrpm_pages;
1176 int err;
1177
1178 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1179 if (!svm) {
1180 err = -ENOMEM;
1181 goto out;
1182 }
1183
1184 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1185 if (err)
1186 goto free_svm;
1187
1188 err = -ENOMEM;
1189 page = alloc_page(GFP_KERNEL);
1190 if (!page)
1191 goto uninit;
1192
1193 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1194 if (!msrpm_pages)
1195 goto free_page1;
1196
1197 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1198 if (!nested_msrpm_pages)
1199 goto free_page2;
1200
1201 hsave_page = alloc_page(GFP_KERNEL);
1202 if (!hsave_page)
1203 goto free_page3;
1204
1205 svm->nested.hsave = page_address(hsave_page);
1206
1207 svm->msrpm = page_address(msrpm_pages);
1208 svm_vcpu_init_msrpm(svm->msrpm);
1209
1210 svm->nested.msrpm = page_address(nested_msrpm_pages);
1211 svm_vcpu_init_msrpm(svm->nested.msrpm);
1212
1213 svm->vmcb = page_address(page);
1214 clear_page(svm->vmcb);
1215 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1216 svm->asid_generation = 0;
1217 init_vmcb(svm);
1218
1219 svm_init_osvw(&svm->vcpu);
1220
1221 return &svm->vcpu;
1222
1223 free_page3:
1224 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1225 free_page2:
1226 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1227 free_page1:
1228 __free_page(page);
1229 uninit:
1230 kvm_vcpu_uninit(&svm->vcpu);
1231 free_svm:
1232 kmem_cache_free(kvm_vcpu_cache, svm);
1233 out:
1234 return ERR_PTR(err);
1235 }
1236
svm_free_vcpu(struct kvm_vcpu * vcpu)1237 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1238 {
1239 struct vcpu_svm *svm = to_svm(vcpu);
1240
1241 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1242 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1243 __free_page(virt_to_page(svm->nested.hsave));
1244 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1245 kvm_vcpu_uninit(vcpu);
1246 kmem_cache_free(kvm_vcpu_cache, svm);
1247 /*
1248 * The vmcb page can be recycled, causing a false negative in
1249 * svm_vcpu_load(). So do a full IBPB now.
1250 */
1251 indirect_branch_prediction_barrier();
1252 }
1253
svm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1254 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1255 {
1256 struct vcpu_svm *svm = to_svm(vcpu);
1257 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1258 int i;
1259
1260 if (unlikely(cpu != vcpu->cpu)) {
1261 svm->asid_generation = 0;
1262 mark_all_dirty(svm->vmcb);
1263 }
1264
1265 #ifdef CONFIG_X86_64
1266 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1267 #endif
1268 savesegment(fs, svm->host.fs);
1269 savesegment(gs, svm->host.gs);
1270 svm->host.ldt = kvm_read_ldt();
1271
1272 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1273 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1274
1275 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1276 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1277 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1278 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1279 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1280 }
1281 }
1282 if (sd->current_vmcb != svm->vmcb) {
1283 sd->current_vmcb = svm->vmcb;
1284 indirect_branch_prediction_barrier();
1285 }
1286 }
1287
svm_vcpu_put(struct kvm_vcpu * vcpu)1288 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1289 {
1290 struct vcpu_svm *svm = to_svm(vcpu);
1291 int i;
1292
1293 ++vcpu->stat.host_state_reload;
1294 kvm_load_ldt(svm->host.ldt);
1295 #ifdef CONFIG_X86_64
1296 loadsegment(fs, svm->host.fs);
1297 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
1298 load_gs_index(svm->host.gs);
1299 #else
1300 #ifdef CONFIG_X86_32_LAZY_GS
1301 loadsegment(gs, svm->host.gs);
1302 #endif
1303 #endif
1304 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1305 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1306 }
1307
svm_get_rflags(struct kvm_vcpu * vcpu)1308 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1309 {
1310 return to_svm(vcpu)->vmcb->save.rflags;
1311 }
1312
svm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)1313 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1314 {
1315 /*
1316 * Any change of EFLAGS.VM is accompained by a reload of SS
1317 * (caused by either a task switch or an inter-privilege IRET),
1318 * so we do not need to update the CPL here.
1319 */
1320 to_svm(vcpu)->vmcb->save.rflags = rflags;
1321 }
1322
svm_cache_reg(struct kvm_vcpu * vcpu,enum kvm_reg reg)1323 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1324 {
1325 switch (reg) {
1326 case VCPU_EXREG_PDPTR:
1327 BUG_ON(!npt_enabled);
1328 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1329 break;
1330 default:
1331 BUG();
1332 }
1333 }
1334
svm_set_vintr(struct vcpu_svm * svm)1335 static void svm_set_vintr(struct vcpu_svm *svm)
1336 {
1337 set_intercept(svm, INTERCEPT_VINTR);
1338 }
1339
svm_clear_vintr(struct vcpu_svm * svm)1340 static void svm_clear_vintr(struct vcpu_svm *svm)
1341 {
1342 clr_intercept(svm, INTERCEPT_VINTR);
1343 }
1344
svm_seg(struct kvm_vcpu * vcpu,int seg)1345 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1346 {
1347 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1348
1349 switch (seg) {
1350 case VCPU_SREG_CS: return &save->cs;
1351 case VCPU_SREG_DS: return &save->ds;
1352 case VCPU_SREG_ES: return &save->es;
1353 case VCPU_SREG_FS: return &save->fs;
1354 case VCPU_SREG_GS: return &save->gs;
1355 case VCPU_SREG_SS: return &save->ss;
1356 case VCPU_SREG_TR: return &save->tr;
1357 case VCPU_SREG_LDTR: return &save->ldtr;
1358 }
1359 BUG();
1360 return NULL;
1361 }
1362
svm_get_segment_base(struct kvm_vcpu * vcpu,int seg)1363 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1364 {
1365 struct vmcb_seg *s = svm_seg(vcpu, seg);
1366
1367 return s->base;
1368 }
1369
svm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)1370 static void svm_get_segment(struct kvm_vcpu *vcpu,
1371 struct kvm_segment *var, int seg)
1372 {
1373 struct vmcb_seg *s = svm_seg(vcpu, seg);
1374
1375 var->base = s->base;
1376 var->limit = s->limit;
1377 var->selector = s->selector;
1378 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1379 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1380 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1381 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1382 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1383 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1384 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1385
1386 /*
1387 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1388 * However, the SVM spec states that the G bit is not observed by the
1389 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1390 * So let's synthesize a legal G bit for all segments, this helps
1391 * running KVM nested. It also helps cross-vendor migration, because
1392 * Intel's vmentry has a check on the 'G' bit.
1393 */
1394 var->g = s->limit > 0xfffff;
1395
1396 /*
1397 * AMD's VMCB does not have an explicit unusable field, so emulate it
1398 * for cross vendor migration purposes by "not present"
1399 */
1400 var->unusable = !var->present || (var->type == 0);
1401
1402 switch (seg) {
1403 case VCPU_SREG_TR:
1404 /*
1405 * Work around a bug where the busy flag in the tr selector
1406 * isn't exposed
1407 */
1408 var->type |= 0x2;
1409 break;
1410 case VCPU_SREG_DS:
1411 case VCPU_SREG_ES:
1412 case VCPU_SREG_FS:
1413 case VCPU_SREG_GS:
1414 /*
1415 * The accessed bit must always be set in the segment
1416 * descriptor cache, although it can be cleared in the
1417 * descriptor, the cached bit always remains at 1. Since
1418 * Intel has a check on this, set it here to support
1419 * cross-vendor migration.
1420 */
1421 if (!var->unusable)
1422 var->type |= 0x1;
1423 break;
1424 case VCPU_SREG_SS:
1425 /*
1426 * On AMD CPUs sometimes the DB bit in the segment
1427 * descriptor is left as 1, although the whole segment has
1428 * been made unusable. Clear it here to pass an Intel VMX
1429 * entry check when cross vendor migrating.
1430 */
1431 if (var->unusable)
1432 var->db = 0;
1433 /* This is symmetric with svm_set_segment() */
1434 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1435 break;
1436 }
1437 }
1438
svm_get_cpl(struct kvm_vcpu * vcpu)1439 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1440 {
1441 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1442
1443 return save->cpl;
1444 }
1445
svm_get_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1446 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1447 {
1448 struct vcpu_svm *svm = to_svm(vcpu);
1449
1450 dt->size = svm->vmcb->save.idtr.limit;
1451 dt->address = svm->vmcb->save.idtr.base;
1452 }
1453
svm_set_idt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1454 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1455 {
1456 struct vcpu_svm *svm = to_svm(vcpu);
1457
1458 svm->vmcb->save.idtr.limit = dt->size;
1459 svm->vmcb->save.idtr.base = dt->address ;
1460 mark_dirty(svm->vmcb, VMCB_DT);
1461 }
1462
svm_get_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1463 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1464 {
1465 struct vcpu_svm *svm = to_svm(vcpu);
1466
1467 dt->size = svm->vmcb->save.gdtr.limit;
1468 dt->address = svm->vmcb->save.gdtr.base;
1469 }
1470
svm_set_gdt(struct kvm_vcpu * vcpu,struct desc_ptr * dt)1471 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1472 {
1473 struct vcpu_svm *svm = to_svm(vcpu);
1474
1475 svm->vmcb->save.gdtr.limit = dt->size;
1476 svm->vmcb->save.gdtr.base = dt->address ;
1477 mark_dirty(svm->vmcb, VMCB_DT);
1478 }
1479
svm_decache_cr0_guest_bits(struct kvm_vcpu * vcpu)1480 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1481 {
1482 }
1483
svm_decache_cr3(struct kvm_vcpu * vcpu)1484 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1485 {
1486 }
1487
svm_decache_cr4_guest_bits(struct kvm_vcpu * vcpu)1488 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1489 {
1490 }
1491
update_cr0_intercept(struct vcpu_svm * svm)1492 static void update_cr0_intercept(struct vcpu_svm *svm)
1493 {
1494 ulong gcr0 = svm->vcpu.arch.cr0;
1495 u64 *hcr0 = &svm->vmcb->save.cr0;
1496
1497 if (!svm->vcpu.fpu_active)
1498 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1499 else
1500 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1501 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1502
1503 mark_dirty(svm->vmcb, VMCB_CR);
1504
1505 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1506 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1507 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1508 } else {
1509 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1510 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1511 }
1512 }
1513
svm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)1514 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1515 {
1516 struct vcpu_svm *svm = to_svm(vcpu);
1517
1518 #ifdef CONFIG_X86_64
1519 if (vcpu->arch.efer & EFER_LME) {
1520 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1521 vcpu->arch.efer |= EFER_LMA;
1522 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1523 }
1524
1525 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1526 vcpu->arch.efer &= ~EFER_LMA;
1527 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1528 }
1529 }
1530 #endif
1531 vcpu->arch.cr0 = cr0;
1532
1533 if (!npt_enabled)
1534 cr0 |= X86_CR0_PG | X86_CR0_WP;
1535
1536 if (!vcpu->fpu_active)
1537 cr0 |= X86_CR0_TS;
1538 /*
1539 * re-enable caching here because the QEMU bios
1540 * does not do it - this results in some delay at
1541 * reboot
1542 */
1543 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1544 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1545 svm->vmcb->save.cr0 = cr0;
1546 mark_dirty(svm->vmcb, VMCB_CR);
1547 update_cr0_intercept(svm);
1548 }
1549
svm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)1550 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1551 {
1552 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1553 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1554
1555 if (cr4 & X86_CR4_VMXE)
1556 return 1;
1557
1558 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1559 svm_flush_tlb(vcpu);
1560
1561 vcpu->arch.cr4 = cr4;
1562 if (!npt_enabled)
1563 cr4 |= X86_CR4_PAE;
1564 cr4 |= host_cr4_mce;
1565 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1566 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1567 return 0;
1568 }
1569
svm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)1570 static void svm_set_segment(struct kvm_vcpu *vcpu,
1571 struct kvm_segment *var, int seg)
1572 {
1573 struct vcpu_svm *svm = to_svm(vcpu);
1574 struct vmcb_seg *s = svm_seg(vcpu, seg);
1575
1576 s->base = var->base;
1577 s->limit = var->limit;
1578 s->selector = var->selector;
1579 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1580 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1581 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1582 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1583 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1584 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1585 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1586 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1587
1588 /*
1589 * This is always accurate, except if SYSRET returned to a segment
1590 * with SS.DPL != 3. Intel does not have this quirk, and always
1591 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1592 * would entail passing the CPL to userspace and back.
1593 */
1594 if (seg == VCPU_SREG_SS)
1595 /* This is symmetric with svm_get_segment() */
1596 svm->vmcb->save.cpl = (var->dpl & 3);
1597
1598 mark_dirty(svm->vmcb, VMCB_SEG);
1599 }
1600
update_bp_intercept(struct kvm_vcpu * vcpu)1601 static void update_bp_intercept(struct kvm_vcpu *vcpu)
1602 {
1603 struct vcpu_svm *svm = to_svm(vcpu);
1604
1605 clr_exception_intercept(svm, BP_VECTOR);
1606
1607 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1608 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1609 set_exception_intercept(svm, BP_VECTOR);
1610 } else
1611 vcpu->guest_debug = 0;
1612 }
1613
new_asid(struct vcpu_svm * svm,struct svm_cpu_data * sd)1614 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1615 {
1616 if (sd->next_asid > sd->max_asid) {
1617 ++sd->asid_generation;
1618 sd->next_asid = 1;
1619 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1620 }
1621
1622 svm->asid_generation = sd->asid_generation;
1623 svm->vmcb->control.asid = sd->next_asid++;
1624
1625 mark_dirty(svm->vmcb, VMCB_ASID);
1626 }
1627
svm_get_dr6(struct kvm_vcpu * vcpu)1628 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1629 {
1630 return to_svm(vcpu)->vmcb->save.dr6;
1631 }
1632
svm_set_dr6(struct kvm_vcpu * vcpu,unsigned long value)1633 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1634 {
1635 struct vcpu_svm *svm = to_svm(vcpu);
1636
1637 svm->vmcb->save.dr6 = value;
1638 mark_dirty(svm->vmcb, VMCB_DR);
1639 }
1640
svm_sync_dirty_debug_regs(struct kvm_vcpu * vcpu)1641 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1642 {
1643 struct vcpu_svm *svm = to_svm(vcpu);
1644
1645 get_debugreg(vcpu->arch.db[0], 0);
1646 get_debugreg(vcpu->arch.db[1], 1);
1647 get_debugreg(vcpu->arch.db[2], 2);
1648 get_debugreg(vcpu->arch.db[3], 3);
1649 vcpu->arch.dr6 = svm_get_dr6(vcpu);
1650 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1651
1652 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1653 set_dr_intercepts(svm);
1654 }
1655
svm_set_dr7(struct kvm_vcpu * vcpu,unsigned long value)1656 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1657 {
1658 struct vcpu_svm *svm = to_svm(vcpu);
1659
1660 svm->vmcb->save.dr7 = value;
1661 mark_dirty(svm->vmcb, VMCB_DR);
1662 }
1663
pf_interception(struct vcpu_svm * svm)1664 static int pf_interception(struct vcpu_svm *svm)
1665 {
1666 u64 fault_address = svm->vmcb->control.exit_info_2;
1667 u32 error_code;
1668 int r = 1;
1669
1670 switch (svm->apf_reason) {
1671 default:
1672 error_code = svm->vmcb->control.exit_info_1;
1673
1674 trace_kvm_page_fault(fault_address, error_code);
1675 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1676 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1677 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1678 svm->vmcb->control.insn_bytes,
1679 svm->vmcb->control.insn_len);
1680 break;
1681 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1682 svm->apf_reason = 0;
1683 local_irq_disable();
1684 kvm_async_pf_task_wait(fault_address);
1685 local_irq_enable();
1686 break;
1687 case KVM_PV_REASON_PAGE_READY:
1688 svm->apf_reason = 0;
1689 local_irq_disable();
1690 kvm_async_pf_task_wake(fault_address);
1691 local_irq_enable();
1692 break;
1693 }
1694 return r;
1695 }
1696
db_interception(struct vcpu_svm * svm)1697 static int db_interception(struct vcpu_svm *svm)
1698 {
1699 struct kvm_run *kvm_run = svm->vcpu.run;
1700
1701 if (!(svm->vcpu.guest_debug &
1702 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1703 !svm->nmi_singlestep) {
1704 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1705 return 1;
1706 }
1707
1708 if (svm->nmi_singlestep) {
1709 svm->nmi_singlestep = false;
1710 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1711 svm->vmcb->save.rflags &=
1712 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1713 }
1714
1715 if (svm->vcpu.guest_debug &
1716 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1717 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1718 kvm_run->debug.arch.pc =
1719 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1720 kvm_run->debug.arch.exception = DB_VECTOR;
1721 return 0;
1722 }
1723
1724 return 1;
1725 }
1726
bp_interception(struct vcpu_svm * svm)1727 static int bp_interception(struct vcpu_svm *svm)
1728 {
1729 struct kvm_run *kvm_run = svm->vcpu.run;
1730
1731 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1732 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1733 kvm_run->debug.arch.exception = BP_VECTOR;
1734 return 0;
1735 }
1736
ud_interception(struct vcpu_svm * svm)1737 static int ud_interception(struct vcpu_svm *svm)
1738 {
1739 int er;
1740
1741 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
1742 if (er == EMULATE_USER_EXIT)
1743 return 0;
1744 if (er != EMULATE_DONE)
1745 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1746 return 1;
1747 }
1748
ac_interception(struct vcpu_svm * svm)1749 static int ac_interception(struct vcpu_svm *svm)
1750 {
1751 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
1752 return 1;
1753 }
1754
svm_fpu_activate(struct kvm_vcpu * vcpu)1755 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1756 {
1757 struct vcpu_svm *svm = to_svm(vcpu);
1758
1759 clr_exception_intercept(svm, NM_VECTOR);
1760
1761 svm->vcpu.fpu_active = 1;
1762 update_cr0_intercept(svm);
1763 }
1764
nm_interception(struct vcpu_svm * svm)1765 static int nm_interception(struct vcpu_svm *svm)
1766 {
1767 svm_fpu_activate(&svm->vcpu);
1768 return 1;
1769 }
1770
is_erratum_383(void)1771 static bool is_erratum_383(void)
1772 {
1773 int err, i;
1774 u64 value;
1775
1776 if (!erratum_383_found)
1777 return false;
1778
1779 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1780 if (err)
1781 return false;
1782
1783 /* Bit 62 may or may not be set for this mce */
1784 value &= ~(1ULL << 62);
1785
1786 if (value != 0xb600000000010015ULL)
1787 return false;
1788
1789 /* Clear MCi_STATUS registers */
1790 for (i = 0; i < 6; ++i)
1791 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1792
1793 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1794 if (!err) {
1795 u32 low, high;
1796
1797 value &= ~(1ULL << 2);
1798 low = lower_32_bits(value);
1799 high = upper_32_bits(value);
1800
1801 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1802 }
1803
1804 /* Flush tlb to evict multi-match entries */
1805 __flush_tlb_all();
1806
1807 return true;
1808 }
1809
svm_handle_mce(struct vcpu_svm * svm)1810 static void svm_handle_mce(struct vcpu_svm *svm)
1811 {
1812 if (is_erratum_383()) {
1813 /*
1814 * Erratum 383 triggered. Guest state is corrupt so kill the
1815 * guest.
1816 */
1817 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1818
1819 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1820
1821 return;
1822 }
1823
1824 /*
1825 * On an #MC intercept the MCE handler is not called automatically in
1826 * the host. So do it by hand here.
1827 */
1828 asm volatile (
1829 "int $0x12\n");
1830 /* not sure if we ever come back to this point */
1831
1832 return;
1833 }
1834
mc_interception(struct vcpu_svm * svm)1835 static int mc_interception(struct vcpu_svm *svm)
1836 {
1837 return 1;
1838 }
1839
shutdown_interception(struct vcpu_svm * svm)1840 static int shutdown_interception(struct vcpu_svm *svm)
1841 {
1842 struct kvm_run *kvm_run = svm->vcpu.run;
1843
1844 /*
1845 * VMCB is undefined after a SHUTDOWN intercept
1846 * so reinitialize it.
1847 */
1848 clear_page(svm->vmcb);
1849 init_vmcb(svm);
1850
1851 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1852 return 0;
1853 }
1854
io_interception(struct vcpu_svm * svm)1855 static int io_interception(struct vcpu_svm *svm)
1856 {
1857 struct kvm_vcpu *vcpu = &svm->vcpu;
1858 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1859 int size, in, string;
1860 unsigned port;
1861
1862 ++svm->vcpu.stat.io_exits;
1863 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1864 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1865 if (string || in)
1866 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
1867
1868 port = io_info >> 16;
1869 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1870 svm->next_rip = svm->vmcb->control.exit_info_2;
1871 skip_emulated_instruction(&svm->vcpu);
1872
1873 return kvm_fast_pio_out(vcpu, size, port);
1874 }
1875
nmi_interception(struct vcpu_svm * svm)1876 static int nmi_interception(struct vcpu_svm *svm)
1877 {
1878 return 1;
1879 }
1880
intr_interception(struct vcpu_svm * svm)1881 static int intr_interception(struct vcpu_svm *svm)
1882 {
1883 ++svm->vcpu.stat.irq_exits;
1884 return 1;
1885 }
1886
nop_on_interception(struct vcpu_svm * svm)1887 static int nop_on_interception(struct vcpu_svm *svm)
1888 {
1889 return 1;
1890 }
1891
halt_interception(struct vcpu_svm * svm)1892 static int halt_interception(struct vcpu_svm *svm)
1893 {
1894 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1895 return kvm_emulate_halt(&svm->vcpu);
1896 }
1897
vmmcall_interception(struct vcpu_svm * svm)1898 static int vmmcall_interception(struct vcpu_svm *svm)
1899 {
1900 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1901 kvm_emulate_hypercall(&svm->vcpu);
1902 return 1;
1903 }
1904
nested_svm_get_tdp_cr3(struct kvm_vcpu * vcpu)1905 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1906 {
1907 struct vcpu_svm *svm = to_svm(vcpu);
1908
1909 return svm->nested.nested_cr3;
1910 }
1911
nested_svm_get_tdp_pdptr(struct kvm_vcpu * vcpu,int index)1912 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1913 {
1914 struct vcpu_svm *svm = to_svm(vcpu);
1915 u64 cr3 = svm->nested.nested_cr3;
1916 u64 pdpte;
1917 int ret;
1918
1919 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
1920 offset_in_page(cr3) + index * 8, 8);
1921 if (ret)
1922 return 0;
1923 return pdpte;
1924 }
1925
nested_svm_set_tdp_cr3(struct kvm_vcpu * vcpu,unsigned long root)1926 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1927 unsigned long root)
1928 {
1929 struct vcpu_svm *svm = to_svm(vcpu);
1930
1931 svm->vmcb->control.nested_cr3 = root;
1932 mark_dirty(svm->vmcb, VMCB_NPT);
1933 svm_flush_tlb(vcpu);
1934 }
1935
nested_svm_inject_npf_exit(struct kvm_vcpu * vcpu,struct x86_exception * fault)1936 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1937 struct x86_exception *fault)
1938 {
1939 struct vcpu_svm *svm = to_svm(vcpu);
1940
1941 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
1942 /*
1943 * TODO: track the cause of the nested page fault, and
1944 * correctly fill in the high bits of exit_info_1.
1945 */
1946 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1947 svm->vmcb->control.exit_code_hi = 0;
1948 svm->vmcb->control.exit_info_1 = (1ULL << 32);
1949 svm->vmcb->control.exit_info_2 = fault->address;
1950 }
1951
1952 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
1953 svm->vmcb->control.exit_info_1 |= fault->error_code;
1954
1955 /*
1956 * The present bit is always zero for page structure faults on real
1957 * hardware.
1958 */
1959 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
1960 svm->vmcb->control.exit_info_1 &= ~1;
1961
1962 nested_svm_vmexit(svm);
1963 }
1964
nested_svm_init_mmu_context(struct kvm_vcpu * vcpu)1965 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1966 {
1967 WARN_ON(mmu_is_nested(vcpu));
1968 kvm_init_shadow_mmu(vcpu);
1969 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1970 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1971 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
1972 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1973 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1974 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
1975 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1976 }
1977
nested_svm_uninit_mmu_context(struct kvm_vcpu * vcpu)1978 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1979 {
1980 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1981 }
1982
nested_svm_check_permissions(struct vcpu_svm * svm)1983 static int nested_svm_check_permissions(struct vcpu_svm *svm)
1984 {
1985 if (!(svm->vcpu.arch.efer & EFER_SVME)
1986 || !is_paging(&svm->vcpu)) {
1987 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1988 return 1;
1989 }
1990
1991 if (svm->vmcb->save.cpl) {
1992 kvm_inject_gp(&svm->vcpu, 0);
1993 return 1;
1994 }
1995
1996 return 0;
1997 }
1998
nested_svm_check_exception(struct vcpu_svm * svm,unsigned nr,bool has_error_code,u32 error_code)1999 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2000 bool has_error_code, u32 error_code)
2001 {
2002 int vmexit;
2003
2004 if (!is_guest_mode(&svm->vcpu))
2005 return 0;
2006
2007 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2008 svm->vmcb->control.exit_code_hi = 0;
2009 svm->vmcb->control.exit_info_1 = error_code;
2010 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2011
2012 vmexit = nested_svm_intercept(svm);
2013 if (vmexit == NESTED_EXIT_DONE)
2014 svm->nested.exit_required = true;
2015
2016 return vmexit;
2017 }
2018
2019 /* This function returns true if it is save to enable the irq window */
nested_svm_intr(struct vcpu_svm * svm)2020 static inline bool nested_svm_intr(struct vcpu_svm *svm)
2021 {
2022 if (!is_guest_mode(&svm->vcpu))
2023 return true;
2024
2025 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2026 return true;
2027
2028 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
2029 return false;
2030
2031 /*
2032 * if vmexit was already requested (by intercepted exception
2033 * for instance) do not overwrite it with "external interrupt"
2034 * vmexit.
2035 */
2036 if (svm->nested.exit_required)
2037 return false;
2038
2039 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2040 svm->vmcb->control.exit_info_1 = 0;
2041 svm->vmcb->control.exit_info_2 = 0;
2042
2043 if (svm->nested.intercept & 1ULL) {
2044 /*
2045 * The #vmexit can't be emulated here directly because this
2046 * code path runs with irqs and preemption disabled. A
2047 * #vmexit emulation might sleep. Only signal request for
2048 * the #vmexit here.
2049 */
2050 svm->nested.exit_required = true;
2051 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2052 return false;
2053 }
2054
2055 return true;
2056 }
2057
2058 /* This function returns true if it is save to enable the nmi window */
nested_svm_nmi(struct vcpu_svm * svm)2059 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2060 {
2061 if (!is_guest_mode(&svm->vcpu))
2062 return true;
2063
2064 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2065 return true;
2066
2067 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2068 svm->nested.exit_required = true;
2069
2070 return false;
2071 }
2072
nested_svm_map(struct vcpu_svm * svm,u64 gpa,struct page ** _page)2073 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2074 {
2075 struct page *page;
2076
2077 might_sleep();
2078
2079 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
2080 if (is_error_page(page))
2081 goto error;
2082
2083 *_page = page;
2084
2085 return kmap(page);
2086
2087 error:
2088 kvm_inject_gp(&svm->vcpu, 0);
2089
2090 return NULL;
2091 }
2092
nested_svm_unmap(struct page * page)2093 static void nested_svm_unmap(struct page *page)
2094 {
2095 kunmap(page);
2096 kvm_release_page_dirty(page);
2097 }
2098
nested_svm_intercept_ioio(struct vcpu_svm * svm)2099 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
2100 {
2101 unsigned port, size, iopm_len;
2102 u16 val, mask;
2103 u8 start_bit;
2104 u64 gpa;
2105
2106 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2107 return NESTED_EXIT_HOST;
2108
2109 port = svm->vmcb->control.exit_info_1 >> 16;
2110 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2111 SVM_IOIO_SIZE_SHIFT;
2112 gpa = svm->nested.vmcb_iopm + (port / 8);
2113 start_bit = port % 8;
2114 iopm_len = (start_bit + size > 8) ? 2 : 1;
2115 mask = (0xf >> (4 - size)) << start_bit;
2116 val = 0;
2117
2118 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
2119 return NESTED_EXIT_DONE;
2120
2121 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2122 }
2123
nested_svm_exit_handled_msr(struct vcpu_svm * svm)2124 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2125 {
2126 u32 offset, msr, value;
2127 int write, mask;
2128
2129 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2130 return NESTED_EXIT_HOST;
2131
2132 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2133 offset = svm_msrpm_offset(msr);
2134 write = svm->vmcb->control.exit_info_1 & 1;
2135 mask = 1 << ((2 * (msr & 0xf)) + write);
2136
2137 if (offset == MSR_INVALID)
2138 return NESTED_EXIT_DONE;
2139
2140 /* Offset is in 32 bit units but need in 8 bit units */
2141 offset *= 4;
2142
2143 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
2144 return NESTED_EXIT_DONE;
2145
2146 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2147 }
2148
nested_svm_exit_special(struct vcpu_svm * svm)2149 static int nested_svm_exit_special(struct vcpu_svm *svm)
2150 {
2151 u32 exit_code = svm->vmcb->control.exit_code;
2152
2153 switch (exit_code) {
2154 case SVM_EXIT_INTR:
2155 case SVM_EXIT_NMI:
2156 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2157 return NESTED_EXIT_HOST;
2158 case SVM_EXIT_NPF:
2159 /* For now we are always handling NPFs when using them */
2160 if (npt_enabled)
2161 return NESTED_EXIT_HOST;
2162 break;
2163 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
2164 /* When we're shadowing, trap PFs, but not async PF */
2165 if (!npt_enabled && svm->apf_reason == 0)
2166 return NESTED_EXIT_HOST;
2167 break;
2168 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2169 nm_interception(svm);
2170 break;
2171 default:
2172 break;
2173 }
2174
2175 return NESTED_EXIT_CONTINUE;
2176 }
2177
2178 /*
2179 * If this function returns true, this #vmexit was already handled
2180 */
nested_svm_intercept(struct vcpu_svm * svm)2181 static int nested_svm_intercept(struct vcpu_svm *svm)
2182 {
2183 u32 exit_code = svm->vmcb->control.exit_code;
2184 int vmexit = NESTED_EXIT_HOST;
2185
2186 switch (exit_code) {
2187 case SVM_EXIT_MSR:
2188 vmexit = nested_svm_exit_handled_msr(svm);
2189 break;
2190 case SVM_EXIT_IOIO:
2191 vmexit = nested_svm_intercept_ioio(svm);
2192 break;
2193 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2194 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2195 if (svm->nested.intercept_cr & bit)
2196 vmexit = NESTED_EXIT_DONE;
2197 break;
2198 }
2199 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2200 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2201 if (svm->nested.intercept_dr & bit)
2202 vmexit = NESTED_EXIT_DONE;
2203 break;
2204 }
2205 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2206 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
2207 if (svm->nested.intercept_exceptions & excp_bits)
2208 vmexit = NESTED_EXIT_DONE;
2209 /* async page fault always cause vmexit */
2210 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2211 svm->apf_reason != 0)
2212 vmexit = NESTED_EXIT_DONE;
2213 break;
2214 }
2215 case SVM_EXIT_ERR: {
2216 vmexit = NESTED_EXIT_DONE;
2217 break;
2218 }
2219 default: {
2220 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
2221 if (svm->nested.intercept & exit_bits)
2222 vmexit = NESTED_EXIT_DONE;
2223 }
2224 }
2225
2226 return vmexit;
2227 }
2228
nested_svm_exit_handled(struct vcpu_svm * svm)2229 static int nested_svm_exit_handled(struct vcpu_svm *svm)
2230 {
2231 int vmexit;
2232
2233 vmexit = nested_svm_intercept(svm);
2234
2235 if (vmexit == NESTED_EXIT_DONE)
2236 nested_svm_vmexit(svm);
2237
2238 return vmexit;
2239 }
2240
copy_vmcb_control_area(struct vmcb * dst_vmcb,struct vmcb * from_vmcb)2241 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2242 {
2243 struct vmcb_control_area *dst = &dst_vmcb->control;
2244 struct vmcb_control_area *from = &from_vmcb->control;
2245
2246 dst->intercept_cr = from->intercept_cr;
2247 dst->intercept_dr = from->intercept_dr;
2248 dst->intercept_exceptions = from->intercept_exceptions;
2249 dst->intercept = from->intercept;
2250 dst->iopm_base_pa = from->iopm_base_pa;
2251 dst->msrpm_base_pa = from->msrpm_base_pa;
2252 dst->tsc_offset = from->tsc_offset;
2253 /* asid not copied, it is handled manually for svm->vmcb. */
2254 dst->tlb_ctl = from->tlb_ctl;
2255 dst->int_ctl = from->int_ctl;
2256 dst->int_vector = from->int_vector;
2257 dst->int_state = from->int_state;
2258 dst->exit_code = from->exit_code;
2259 dst->exit_code_hi = from->exit_code_hi;
2260 dst->exit_info_1 = from->exit_info_1;
2261 dst->exit_info_2 = from->exit_info_2;
2262 dst->exit_int_info = from->exit_int_info;
2263 dst->exit_int_info_err = from->exit_int_info_err;
2264 dst->nested_ctl = from->nested_ctl;
2265 dst->event_inj = from->event_inj;
2266 dst->event_inj_err = from->event_inj_err;
2267 dst->nested_cr3 = from->nested_cr3;
2268 dst->lbr_ctl = from->lbr_ctl;
2269 }
2270
nested_svm_vmexit(struct vcpu_svm * svm)2271 static int nested_svm_vmexit(struct vcpu_svm *svm)
2272 {
2273 struct vmcb *nested_vmcb;
2274 struct vmcb *hsave = svm->nested.hsave;
2275 struct vmcb *vmcb = svm->vmcb;
2276 struct page *page;
2277
2278 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2279 vmcb->control.exit_info_1,
2280 vmcb->control.exit_info_2,
2281 vmcb->control.exit_int_info,
2282 vmcb->control.exit_int_info_err,
2283 KVM_ISA_SVM);
2284
2285 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2286 if (!nested_vmcb)
2287 return 1;
2288
2289 /* Exit Guest-Mode */
2290 leave_guest_mode(&svm->vcpu);
2291 svm->nested.vmcb = 0;
2292
2293 /* Give the current vmcb to the guest */
2294 disable_gif(svm);
2295
2296 nested_vmcb->save.es = vmcb->save.es;
2297 nested_vmcb->save.cs = vmcb->save.cs;
2298 nested_vmcb->save.ss = vmcb->save.ss;
2299 nested_vmcb->save.ds = vmcb->save.ds;
2300 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2301 nested_vmcb->save.idtr = vmcb->save.idtr;
2302 nested_vmcb->save.efer = svm->vcpu.arch.efer;
2303 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
2304 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
2305 nested_vmcb->save.cr2 = vmcb->save.cr2;
2306 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
2307 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2308 nested_vmcb->save.rip = vmcb->save.rip;
2309 nested_vmcb->save.rsp = vmcb->save.rsp;
2310 nested_vmcb->save.rax = vmcb->save.rax;
2311 nested_vmcb->save.dr7 = vmcb->save.dr7;
2312 nested_vmcb->save.dr6 = vmcb->save.dr6;
2313 nested_vmcb->save.cpl = vmcb->save.cpl;
2314
2315 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2316 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2317 nested_vmcb->control.int_state = vmcb->control.int_state;
2318 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2319 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2320 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2321 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2322 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2323 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2324
2325 if (svm->nrips_enabled)
2326 nested_vmcb->control.next_rip = vmcb->control.next_rip;
2327
2328 /*
2329 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2330 * to make sure that we do not lose injected events. So check event_inj
2331 * here and copy it to exit_int_info if it is valid.
2332 * Exit_int_info and event_inj can't be both valid because the case
2333 * below only happens on a VMRUN instruction intercept which has
2334 * no valid exit_int_info set.
2335 */
2336 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2337 struct vmcb_control_area *nc = &nested_vmcb->control;
2338
2339 nc->exit_int_info = vmcb->control.event_inj;
2340 nc->exit_int_info_err = vmcb->control.event_inj_err;
2341 }
2342
2343 nested_vmcb->control.tlb_ctl = 0;
2344 nested_vmcb->control.event_inj = 0;
2345 nested_vmcb->control.event_inj_err = 0;
2346
2347 /* We always set V_INTR_MASKING and remember the old value in hflags */
2348 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2349 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2350
2351 /* Restore the original control entries */
2352 copy_vmcb_control_area(vmcb, hsave);
2353
2354 kvm_clear_exception_queue(&svm->vcpu);
2355 kvm_clear_interrupt_queue(&svm->vcpu);
2356
2357 svm->nested.nested_cr3 = 0;
2358
2359 /* Restore selected save entries */
2360 svm->vmcb->save.es = hsave->save.es;
2361 svm->vmcb->save.cs = hsave->save.cs;
2362 svm->vmcb->save.ss = hsave->save.ss;
2363 svm->vmcb->save.ds = hsave->save.ds;
2364 svm->vmcb->save.gdtr = hsave->save.gdtr;
2365 svm->vmcb->save.idtr = hsave->save.idtr;
2366 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2367 svm_set_efer(&svm->vcpu, hsave->save.efer);
2368 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2369 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2370 if (npt_enabled) {
2371 svm->vmcb->save.cr3 = hsave->save.cr3;
2372 svm->vcpu.arch.cr3 = hsave->save.cr3;
2373 } else {
2374 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2375 }
2376 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2377 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2378 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2379 svm->vmcb->save.dr7 = 0;
2380 svm->vmcb->save.cpl = 0;
2381 svm->vmcb->control.exit_int_info = 0;
2382
2383 mark_all_dirty(svm->vmcb);
2384
2385 nested_svm_unmap(page);
2386
2387 nested_svm_uninit_mmu_context(&svm->vcpu);
2388 kvm_mmu_reset_context(&svm->vcpu);
2389 kvm_mmu_load(&svm->vcpu);
2390
2391 /*
2392 * Drop what we picked up for L2 via svm_complete_interrupts() so it
2393 * doesn't end up in L1.
2394 */
2395 svm->vcpu.arch.nmi_injected = false;
2396 kvm_clear_exception_queue(&svm->vcpu);
2397 kvm_clear_interrupt_queue(&svm->vcpu);
2398
2399 return 0;
2400 }
2401
nested_svm_vmrun_msrpm(struct vcpu_svm * svm)2402 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
2403 {
2404 /*
2405 * This function merges the msr permission bitmaps of kvm and the
2406 * nested vmcb. It is optimized in that it only merges the parts where
2407 * the kvm msr permission bitmap may contain zero bits
2408 */
2409 int i;
2410
2411 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2412 return true;
2413
2414 for (i = 0; i < MSRPM_OFFSETS; i++) {
2415 u32 value, p;
2416 u64 offset;
2417
2418 if (msrpm_offsets[i] == 0xffffffff)
2419 break;
2420
2421 p = msrpm_offsets[i];
2422 offset = svm->nested.vmcb_msrpm + (p * 4);
2423
2424 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
2425 return false;
2426
2427 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2428 }
2429
2430 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2431
2432 return true;
2433 }
2434
nested_vmcb_checks(struct vmcb * vmcb)2435 static bool nested_vmcb_checks(struct vmcb *vmcb)
2436 {
2437 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2438 return false;
2439
2440 if (vmcb->control.asid == 0)
2441 return false;
2442
2443 if (vmcb->control.nested_ctl && !npt_enabled)
2444 return false;
2445
2446 return true;
2447 }
2448
nested_svm_vmrun(struct vcpu_svm * svm)2449 static bool nested_svm_vmrun(struct vcpu_svm *svm)
2450 {
2451 struct vmcb *nested_vmcb;
2452 struct vmcb *hsave = svm->nested.hsave;
2453 struct vmcb *vmcb = svm->vmcb;
2454 struct page *page;
2455 u64 vmcb_gpa;
2456
2457 vmcb_gpa = svm->vmcb->save.rax;
2458
2459 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2460 if (!nested_vmcb)
2461 return false;
2462
2463 if (!nested_vmcb_checks(nested_vmcb)) {
2464 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2465 nested_vmcb->control.exit_code_hi = 0;
2466 nested_vmcb->control.exit_info_1 = 0;
2467 nested_vmcb->control.exit_info_2 = 0;
2468
2469 nested_svm_unmap(page);
2470
2471 return false;
2472 }
2473
2474 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2475 nested_vmcb->save.rip,
2476 nested_vmcb->control.int_ctl,
2477 nested_vmcb->control.event_inj,
2478 nested_vmcb->control.nested_ctl);
2479
2480 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2481 nested_vmcb->control.intercept_cr >> 16,
2482 nested_vmcb->control.intercept_exceptions,
2483 nested_vmcb->control.intercept);
2484
2485 /* Clear internal status */
2486 kvm_clear_exception_queue(&svm->vcpu);
2487 kvm_clear_interrupt_queue(&svm->vcpu);
2488
2489 /*
2490 * Save the old vmcb, so we don't need to pick what we save, but can
2491 * restore everything when a VMEXIT occurs
2492 */
2493 hsave->save.es = vmcb->save.es;
2494 hsave->save.cs = vmcb->save.cs;
2495 hsave->save.ss = vmcb->save.ss;
2496 hsave->save.ds = vmcb->save.ds;
2497 hsave->save.gdtr = vmcb->save.gdtr;
2498 hsave->save.idtr = vmcb->save.idtr;
2499 hsave->save.efer = svm->vcpu.arch.efer;
2500 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
2501 hsave->save.cr4 = svm->vcpu.arch.cr4;
2502 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2503 hsave->save.rip = kvm_rip_read(&svm->vcpu);
2504 hsave->save.rsp = vmcb->save.rsp;
2505 hsave->save.rax = vmcb->save.rax;
2506 if (npt_enabled)
2507 hsave->save.cr3 = vmcb->save.cr3;
2508 else
2509 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
2510
2511 copy_vmcb_control_area(hsave, vmcb);
2512
2513 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
2514 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2515 else
2516 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2517
2518 if (nested_vmcb->control.nested_ctl) {
2519 kvm_mmu_unload(&svm->vcpu);
2520 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2521 nested_svm_init_mmu_context(&svm->vcpu);
2522 }
2523
2524 /* Load the nested guest state */
2525 svm->vmcb->save.es = nested_vmcb->save.es;
2526 svm->vmcb->save.cs = nested_vmcb->save.cs;
2527 svm->vmcb->save.ss = nested_vmcb->save.ss;
2528 svm->vmcb->save.ds = nested_vmcb->save.ds;
2529 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2530 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2531 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
2532 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2533 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2534 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2535 if (npt_enabled) {
2536 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2537 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2538 } else
2539 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2540
2541 /* Guest paging mode is active - reset mmu */
2542 kvm_mmu_reset_context(&svm->vcpu);
2543
2544 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
2545 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2546 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2547 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
2548
2549 /* In case we don't even reach vcpu_run, the fields are not updated */
2550 svm->vmcb->save.rax = nested_vmcb->save.rax;
2551 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2552 svm->vmcb->save.rip = nested_vmcb->save.rip;
2553 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2554 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2555 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2556
2557 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
2558 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
2559
2560 /* cache intercepts */
2561 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
2562 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
2563 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2564 svm->nested.intercept = nested_vmcb->control.intercept;
2565
2566 svm_flush_tlb(&svm->vcpu);
2567 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl &
2568 (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK);
2569
2570 svm->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
2571
2572 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2573 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2574 else
2575 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2576
2577 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2578 /* We only want the cr8 intercept bits of the guest */
2579 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2580 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2581 }
2582
2583 /* We don't want to see VMMCALLs from a nested guest */
2584 clr_intercept(svm, INTERCEPT_VMMCALL);
2585
2586 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
2587 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2588 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2589 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
2590 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2591 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2592
2593 nested_svm_unmap(page);
2594
2595 /* Enter Guest-Mode */
2596 enter_guest_mode(&svm->vcpu);
2597
2598 /*
2599 * Merge guest and host intercepts - must be called with vcpu in
2600 * guest-mode to take affect here
2601 */
2602 recalc_intercepts(svm);
2603
2604 svm->nested.vmcb = vmcb_gpa;
2605
2606 enable_gif(svm);
2607
2608 mark_all_dirty(svm->vmcb);
2609
2610 return true;
2611 }
2612
nested_svm_vmloadsave(struct vmcb * from_vmcb,struct vmcb * to_vmcb)2613 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
2614 {
2615 to_vmcb->save.fs = from_vmcb->save.fs;
2616 to_vmcb->save.gs = from_vmcb->save.gs;
2617 to_vmcb->save.tr = from_vmcb->save.tr;
2618 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2619 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2620 to_vmcb->save.star = from_vmcb->save.star;
2621 to_vmcb->save.lstar = from_vmcb->save.lstar;
2622 to_vmcb->save.cstar = from_vmcb->save.cstar;
2623 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2624 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2625 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2626 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
2627 }
2628
vmload_interception(struct vcpu_svm * svm)2629 static int vmload_interception(struct vcpu_svm *svm)
2630 {
2631 struct vmcb *nested_vmcb;
2632 struct page *page;
2633
2634 if (nested_svm_check_permissions(svm))
2635 return 1;
2636
2637 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2638 if (!nested_vmcb)
2639 return 1;
2640
2641 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2642 skip_emulated_instruction(&svm->vcpu);
2643
2644 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2645 nested_svm_unmap(page);
2646
2647 return 1;
2648 }
2649
vmsave_interception(struct vcpu_svm * svm)2650 static int vmsave_interception(struct vcpu_svm *svm)
2651 {
2652 struct vmcb *nested_vmcb;
2653 struct page *page;
2654
2655 if (nested_svm_check_permissions(svm))
2656 return 1;
2657
2658 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2659 if (!nested_vmcb)
2660 return 1;
2661
2662 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2663 skip_emulated_instruction(&svm->vcpu);
2664
2665 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2666 nested_svm_unmap(page);
2667
2668 return 1;
2669 }
2670
vmrun_interception(struct vcpu_svm * svm)2671 static int vmrun_interception(struct vcpu_svm *svm)
2672 {
2673 if (nested_svm_check_permissions(svm))
2674 return 1;
2675
2676 /* Save rip after vmrun instruction */
2677 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
2678
2679 if (!nested_svm_vmrun(svm))
2680 return 1;
2681
2682 if (!nested_svm_vmrun_msrpm(svm))
2683 goto failed;
2684
2685 return 1;
2686
2687 failed:
2688
2689 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2690 svm->vmcb->control.exit_code_hi = 0;
2691 svm->vmcb->control.exit_info_1 = 0;
2692 svm->vmcb->control.exit_info_2 = 0;
2693
2694 nested_svm_vmexit(svm);
2695
2696 return 1;
2697 }
2698
stgi_interception(struct vcpu_svm * svm)2699 static int stgi_interception(struct vcpu_svm *svm)
2700 {
2701 if (nested_svm_check_permissions(svm))
2702 return 1;
2703
2704 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2705 skip_emulated_instruction(&svm->vcpu);
2706 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2707
2708 enable_gif(svm);
2709
2710 return 1;
2711 }
2712
clgi_interception(struct vcpu_svm * svm)2713 static int clgi_interception(struct vcpu_svm *svm)
2714 {
2715 if (nested_svm_check_permissions(svm))
2716 return 1;
2717
2718 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2719 skip_emulated_instruction(&svm->vcpu);
2720
2721 disable_gif(svm);
2722
2723 /* After a CLGI no interrupts should come */
2724 svm_clear_vintr(svm);
2725 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2726
2727 mark_dirty(svm->vmcb, VMCB_INTR);
2728
2729 return 1;
2730 }
2731
invlpga_interception(struct vcpu_svm * svm)2732 static int invlpga_interception(struct vcpu_svm *svm)
2733 {
2734 struct kvm_vcpu *vcpu = &svm->vcpu;
2735
2736 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
2737 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2738
2739 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2740 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2741
2742 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2743 skip_emulated_instruction(&svm->vcpu);
2744 return 1;
2745 }
2746
skinit_interception(struct vcpu_svm * svm)2747 static int skinit_interception(struct vcpu_svm *svm)
2748 {
2749 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2750
2751 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2752 return 1;
2753 }
2754
wbinvd_interception(struct vcpu_svm * svm)2755 static int wbinvd_interception(struct vcpu_svm *svm)
2756 {
2757 kvm_emulate_wbinvd(&svm->vcpu);
2758 return 1;
2759 }
2760
xsetbv_interception(struct vcpu_svm * svm)2761 static int xsetbv_interception(struct vcpu_svm *svm)
2762 {
2763 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2764 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2765
2766 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2767 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2768 skip_emulated_instruction(&svm->vcpu);
2769 }
2770
2771 return 1;
2772 }
2773
task_switch_interception(struct vcpu_svm * svm)2774 static int task_switch_interception(struct vcpu_svm *svm)
2775 {
2776 u16 tss_selector;
2777 int reason;
2778 int int_type = svm->vmcb->control.exit_int_info &
2779 SVM_EXITINTINFO_TYPE_MASK;
2780 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2781 uint32_t type =
2782 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2783 uint32_t idt_v =
2784 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2785 bool has_error_code = false;
2786 u32 error_code = 0;
2787
2788 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2789
2790 if (svm->vmcb->control.exit_info_2 &
2791 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2792 reason = TASK_SWITCH_IRET;
2793 else if (svm->vmcb->control.exit_info_2 &
2794 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2795 reason = TASK_SWITCH_JMP;
2796 else if (idt_v)
2797 reason = TASK_SWITCH_GATE;
2798 else
2799 reason = TASK_SWITCH_CALL;
2800
2801 if (reason == TASK_SWITCH_GATE) {
2802 switch (type) {
2803 case SVM_EXITINTINFO_TYPE_NMI:
2804 svm->vcpu.arch.nmi_injected = false;
2805 break;
2806 case SVM_EXITINTINFO_TYPE_EXEPT:
2807 if (svm->vmcb->control.exit_info_2 &
2808 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2809 has_error_code = true;
2810 error_code =
2811 (u32)svm->vmcb->control.exit_info_2;
2812 }
2813 kvm_clear_exception_queue(&svm->vcpu);
2814 break;
2815 case SVM_EXITINTINFO_TYPE_INTR:
2816 kvm_clear_interrupt_queue(&svm->vcpu);
2817 break;
2818 default:
2819 break;
2820 }
2821 }
2822
2823 if (reason != TASK_SWITCH_GATE ||
2824 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2825 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2826 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2827 skip_emulated_instruction(&svm->vcpu);
2828
2829 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2830 int_vec = -1;
2831
2832 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2833 has_error_code, error_code) == EMULATE_FAIL) {
2834 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2835 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2836 svm->vcpu.run->internal.ndata = 0;
2837 return 0;
2838 }
2839 return 1;
2840 }
2841
cpuid_interception(struct vcpu_svm * svm)2842 static int cpuid_interception(struct vcpu_svm *svm)
2843 {
2844 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2845 kvm_emulate_cpuid(&svm->vcpu);
2846 return 1;
2847 }
2848
iret_interception(struct vcpu_svm * svm)2849 static int iret_interception(struct vcpu_svm *svm)
2850 {
2851 ++svm->vcpu.stat.nmi_window_exits;
2852 clr_intercept(svm, INTERCEPT_IRET);
2853 svm->vcpu.arch.hflags |= HF_IRET_MASK;
2854 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2855 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2856 return 1;
2857 }
2858
invlpg_interception(struct vcpu_svm * svm)2859 static int invlpg_interception(struct vcpu_svm *svm)
2860 {
2861 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2862 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2863
2864 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2865 skip_emulated_instruction(&svm->vcpu);
2866 return 1;
2867 }
2868
emulate_on_interception(struct vcpu_svm * svm)2869 static int emulate_on_interception(struct vcpu_svm *svm)
2870 {
2871 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2872 }
2873
rdpmc_interception(struct vcpu_svm * svm)2874 static int rdpmc_interception(struct vcpu_svm *svm)
2875 {
2876 int err;
2877
2878 if (!static_cpu_has(X86_FEATURE_NRIPS))
2879 return emulate_on_interception(svm);
2880
2881 err = kvm_rdpmc(&svm->vcpu);
2882 kvm_complete_insn_gp(&svm->vcpu, err);
2883
2884 return 1;
2885 }
2886
check_selective_cr0_intercepted(struct vcpu_svm * svm,unsigned long val)2887 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
2888 unsigned long val)
2889 {
2890 unsigned long cr0 = svm->vcpu.arch.cr0;
2891 bool ret = false;
2892 u64 intercept;
2893
2894 intercept = svm->nested.intercept;
2895
2896 if (!is_guest_mode(&svm->vcpu) ||
2897 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2898 return false;
2899
2900 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2901 val &= ~SVM_CR0_SELECTIVE_MASK;
2902
2903 if (cr0 ^ val) {
2904 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2905 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2906 }
2907
2908 return ret;
2909 }
2910
2911 #define CR_VALID (1ULL << 63)
2912
cr_interception(struct vcpu_svm * svm)2913 static int cr_interception(struct vcpu_svm *svm)
2914 {
2915 int reg, cr;
2916 unsigned long val;
2917 int err;
2918
2919 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2920 return emulate_on_interception(svm);
2921
2922 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2923 return emulate_on_interception(svm);
2924
2925 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2926 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2927 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2928 else
2929 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2930
2931 err = 0;
2932 if (cr >= 16) { /* mov to cr */
2933 cr -= 16;
2934 val = kvm_register_readl(&svm->vcpu, reg);
2935 switch (cr) {
2936 case 0:
2937 if (!check_selective_cr0_intercepted(svm, val))
2938 err = kvm_set_cr0(&svm->vcpu, val);
2939 else
2940 return 1;
2941
2942 break;
2943 case 3:
2944 err = kvm_set_cr3(&svm->vcpu, val);
2945 break;
2946 case 4:
2947 err = kvm_set_cr4(&svm->vcpu, val);
2948 break;
2949 case 8:
2950 err = kvm_set_cr8(&svm->vcpu, val);
2951 break;
2952 default:
2953 WARN(1, "unhandled write to CR%d", cr);
2954 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2955 return 1;
2956 }
2957 } else { /* mov from cr */
2958 switch (cr) {
2959 case 0:
2960 val = kvm_read_cr0(&svm->vcpu);
2961 break;
2962 case 2:
2963 val = svm->vcpu.arch.cr2;
2964 break;
2965 case 3:
2966 val = kvm_read_cr3(&svm->vcpu);
2967 break;
2968 case 4:
2969 val = kvm_read_cr4(&svm->vcpu);
2970 break;
2971 case 8:
2972 val = kvm_get_cr8(&svm->vcpu);
2973 break;
2974 default:
2975 WARN(1, "unhandled read from CR%d", cr);
2976 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2977 return 1;
2978 }
2979 kvm_register_writel(&svm->vcpu, reg, val);
2980 }
2981 kvm_complete_insn_gp(&svm->vcpu, err);
2982
2983 return 1;
2984 }
2985
dr_interception(struct vcpu_svm * svm)2986 static int dr_interception(struct vcpu_svm *svm)
2987 {
2988 int reg, dr;
2989 unsigned long val;
2990
2991 if (svm->vcpu.guest_debug == 0) {
2992 /*
2993 * No more DR vmexits; force a reload of the debug registers
2994 * and reenter on this instruction. The next vmexit will
2995 * retrieve the full state of the debug registers.
2996 */
2997 clr_dr_intercepts(svm);
2998 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2999 return 1;
3000 }
3001
3002 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3003 return emulate_on_interception(svm);
3004
3005 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3006 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3007
3008 if (dr >= 16) { /* mov to DRn */
3009 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3010 return 1;
3011 val = kvm_register_readl(&svm->vcpu, reg);
3012 kvm_set_dr(&svm->vcpu, dr - 16, val);
3013 } else {
3014 if (!kvm_require_dr(&svm->vcpu, dr))
3015 return 1;
3016 kvm_get_dr(&svm->vcpu, dr, &val);
3017 kvm_register_writel(&svm->vcpu, reg, val);
3018 }
3019
3020 skip_emulated_instruction(&svm->vcpu);
3021
3022 return 1;
3023 }
3024
cr8_write_interception(struct vcpu_svm * svm)3025 static int cr8_write_interception(struct vcpu_svm *svm)
3026 {
3027 struct kvm_run *kvm_run = svm->vcpu.run;
3028 int r;
3029
3030 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3031 /* instruction emulation calls kvm_set_cr8() */
3032 r = cr_interception(svm);
3033 if (lapic_in_kernel(&svm->vcpu))
3034 return r;
3035 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3036 return r;
3037 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3038 return 0;
3039 }
3040
svm_read_l1_tsc(struct kvm_vcpu * vcpu,u64 host_tsc)3041 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3042 {
3043 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3044 return vmcb->control.tsc_offset + host_tsc;
3045 }
3046
svm_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)3047 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3048 {
3049 struct vcpu_svm *svm = to_svm(vcpu);
3050
3051 switch (msr_info->index) {
3052 case MSR_IA32_TSC: {
3053 msr_info->data = svm->vmcb->control.tsc_offset +
3054 kvm_scale_tsc(vcpu, rdtsc());
3055
3056 break;
3057 }
3058 case MSR_STAR:
3059 msr_info->data = svm->vmcb->save.star;
3060 break;
3061 #ifdef CONFIG_X86_64
3062 case MSR_LSTAR:
3063 msr_info->data = svm->vmcb->save.lstar;
3064 break;
3065 case MSR_CSTAR:
3066 msr_info->data = svm->vmcb->save.cstar;
3067 break;
3068 case MSR_KERNEL_GS_BASE:
3069 msr_info->data = svm->vmcb->save.kernel_gs_base;
3070 break;
3071 case MSR_SYSCALL_MASK:
3072 msr_info->data = svm->vmcb->save.sfmask;
3073 break;
3074 #endif
3075 case MSR_IA32_SYSENTER_CS:
3076 msr_info->data = svm->vmcb->save.sysenter_cs;
3077 break;
3078 case MSR_IA32_SYSENTER_EIP:
3079 msr_info->data = svm->sysenter_eip;
3080 break;
3081 case MSR_IA32_SYSENTER_ESP:
3082 msr_info->data = svm->sysenter_esp;
3083 break;
3084 /*
3085 * Nobody will change the following 5 values in the VMCB so we can
3086 * safely return them on rdmsr. They will always be 0 until LBRV is
3087 * implemented.
3088 */
3089 case MSR_IA32_DEBUGCTLMSR:
3090 msr_info->data = svm->vmcb->save.dbgctl;
3091 break;
3092 case MSR_IA32_LASTBRANCHFROMIP:
3093 msr_info->data = svm->vmcb->save.br_from;
3094 break;
3095 case MSR_IA32_LASTBRANCHTOIP:
3096 msr_info->data = svm->vmcb->save.br_to;
3097 break;
3098 case MSR_IA32_LASTINTFROMIP:
3099 msr_info->data = svm->vmcb->save.last_excp_from;
3100 break;
3101 case MSR_IA32_LASTINTTOIP:
3102 msr_info->data = svm->vmcb->save.last_excp_to;
3103 break;
3104 case MSR_VM_HSAVE_PA:
3105 msr_info->data = svm->nested.hsave_msr;
3106 break;
3107 case MSR_VM_CR:
3108 msr_info->data = svm->nested.vm_cr_msr;
3109 break;
3110 case MSR_IA32_SPEC_CTRL:
3111 if (!msr_info->host_initiated &&
3112 !guest_cpuid_has_spec_ctrl(vcpu))
3113 return 1;
3114
3115 msr_info->data = svm->spec_ctrl;
3116 break;
3117 case MSR_AMD64_VIRT_SPEC_CTRL:
3118 if (!msr_info->host_initiated &&
3119 !guest_cpuid_has_virt_ssbd(vcpu))
3120 return 1;
3121
3122 msr_info->data = svm->virt_spec_ctrl;
3123 break;
3124 case MSR_IA32_UCODE_REV:
3125 msr_info->data = 0x01000065;
3126 break;
3127 default:
3128 return kvm_get_msr_common(vcpu, msr_info);
3129 }
3130 return 0;
3131 }
3132
rdmsr_interception(struct vcpu_svm * svm)3133 static int rdmsr_interception(struct vcpu_svm *svm)
3134 {
3135 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3136 struct msr_data msr_info;
3137
3138 msr_info.index = ecx;
3139 msr_info.host_initiated = false;
3140 if (svm_get_msr(&svm->vcpu, &msr_info)) {
3141 trace_kvm_msr_read_ex(ecx);
3142 kvm_inject_gp(&svm->vcpu, 0);
3143 } else {
3144 trace_kvm_msr_read(ecx, msr_info.data);
3145
3146 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3147 msr_info.data & 0xffffffff);
3148 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3149 msr_info.data >> 32);
3150 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3151 skip_emulated_instruction(&svm->vcpu);
3152 }
3153 return 1;
3154 }
3155
svm_set_vm_cr(struct kvm_vcpu * vcpu,u64 data)3156 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3157 {
3158 struct vcpu_svm *svm = to_svm(vcpu);
3159 int svm_dis, chg_mask;
3160
3161 if (data & ~SVM_VM_CR_VALID_MASK)
3162 return 1;
3163
3164 chg_mask = SVM_VM_CR_VALID_MASK;
3165
3166 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3167 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3168
3169 svm->nested.vm_cr_msr &= ~chg_mask;
3170 svm->nested.vm_cr_msr |= (data & chg_mask);
3171
3172 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3173
3174 /* check for svm_disable while efer.svme is set */
3175 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3176 return 1;
3177
3178 return 0;
3179 }
3180
svm_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr)3181 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3182 {
3183 struct vcpu_svm *svm = to_svm(vcpu);
3184
3185 u32 ecx = msr->index;
3186 u64 data = msr->data;
3187 switch (ecx) {
3188 case MSR_IA32_CR_PAT:
3189 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3190 return 1;
3191 vcpu->arch.pat = data;
3192 svm->vmcb->save.g_pat = data;
3193 mark_dirty(svm->vmcb, VMCB_NPT);
3194 break;
3195 case MSR_IA32_TSC:
3196 kvm_write_tsc(vcpu, msr);
3197 break;
3198 case MSR_IA32_SPEC_CTRL:
3199 if (!msr->host_initiated &&
3200 !guest_cpuid_has_spec_ctrl(vcpu))
3201 return 1;
3202
3203 /* The STIBP bit doesn't fault even if it's not advertised */
3204 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
3205 return 1;
3206
3207 svm->spec_ctrl = data;
3208
3209 if (!data)
3210 break;
3211
3212 /*
3213 * For non-nested:
3214 * When it's written (to non-zero) for the first time, pass
3215 * it through.
3216 *
3217 * For nested:
3218 * The handling of the MSR bitmap for L2 guests is done in
3219 * nested_svm_vmrun_msrpm.
3220 * We update the L1 MSR bit as well since it will end up
3221 * touching the MSR anyway now.
3222 */
3223 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
3224 break;
3225 case MSR_IA32_PRED_CMD:
3226 if (!msr->host_initiated &&
3227 !guest_cpuid_has_ibpb(vcpu))
3228 return 1;
3229
3230 if (data & ~PRED_CMD_IBPB)
3231 return 1;
3232
3233 if (!data)
3234 break;
3235
3236 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
3237 if (is_guest_mode(vcpu))
3238 break;
3239 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
3240 break;
3241 case MSR_AMD64_VIRT_SPEC_CTRL:
3242 if (!msr->host_initiated &&
3243 !guest_cpuid_has_virt_ssbd(vcpu))
3244 return 1;
3245
3246 if (data & ~SPEC_CTRL_SSBD)
3247 return 1;
3248
3249 svm->virt_spec_ctrl = data;
3250 break;
3251 case MSR_STAR:
3252 svm->vmcb->save.star = data;
3253 break;
3254 #ifdef CONFIG_X86_64
3255 case MSR_LSTAR:
3256 svm->vmcb->save.lstar = data;
3257 break;
3258 case MSR_CSTAR:
3259 svm->vmcb->save.cstar = data;
3260 break;
3261 case MSR_KERNEL_GS_BASE:
3262 svm->vmcb->save.kernel_gs_base = data;
3263 break;
3264 case MSR_SYSCALL_MASK:
3265 svm->vmcb->save.sfmask = data;
3266 break;
3267 #endif
3268 case MSR_IA32_SYSENTER_CS:
3269 svm->vmcb->save.sysenter_cs = data;
3270 break;
3271 case MSR_IA32_SYSENTER_EIP:
3272 svm->sysenter_eip = data;
3273 svm->vmcb->save.sysenter_eip = data;
3274 break;
3275 case MSR_IA32_SYSENTER_ESP:
3276 svm->sysenter_esp = data;
3277 svm->vmcb->save.sysenter_esp = data;
3278 break;
3279 case MSR_IA32_DEBUGCTLMSR:
3280 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3281 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3282 __func__, data);
3283 break;
3284 }
3285 if (data & DEBUGCTL_RESERVED_BITS)
3286 return 1;
3287
3288 svm->vmcb->save.dbgctl = data;
3289 mark_dirty(svm->vmcb, VMCB_LBR);
3290 if (data & (1ULL<<0))
3291 svm_enable_lbrv(svm);
3292 else
3293 svm_disable_lbrv(svm);
3294 break;
3295 case MSR_VM_HSAVE_PA:
3296 svm->nested.hsave_msr = data;
3297 break;
3298 case MSR_VM_CR:
3299 return svm_set_vm_cr(vcpu, data);
3300 case MSR_VM_IGNNE:
3301 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3302 break;
3303 default:
3304 return kvm_set_msr_common(vcpu, msr);
3305 }
3306 return 0;
3307 }
3308
wrmsr_interception(struct vcpu_svm * svm)3309 static int wrmsr_interception(struct vcpu_svm *svm)
3310 {
3311 struct msr_data msr;
3312 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3313 u64 data = kvm_read_edx_eax(&svm->vcpu);
3314
3315 msr.data = data;
3316 msr.index = ecx;
3317 msr.host_initiated = false;
3318
3319 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3320 if (kvm_set_msr(&svm->vcpu, &msr)) {
3321 trace_kvm_msr_write_ex(ecx, data);
3322 kvm_inject_gp(&svm->vcpu, 0);
3323 } else {
3324 trace_kvm_msr_write(ecx, data);
3325 skip_emulated_instruction(&svm->vcpu);
3326 }
3327 return 1;
3328 }
3329
msr_interception(struct vcpu_svm * svm)3330 static int msr_interception(struct vcpu_svm *svm)
3331 {
3332 if (svm->vmcb->control.exit_info_1)
3333 return wrmsr_interception(svm);
3334 else
3335 return rdmsr_interception(svm);
3336 }
3337
interrupt_window_interception(struct vcpu_svm * svm)3338 static int interrupt_window_interception(struct vcpu_svm *svm)
3339 {
3340 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3341 svm_clear_vintr(svm);
3342 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3343 mark_dirty(svm->vmcb, VMCB_INTR);
3344 ++svm->vcpu.stat.irq_window_exits;
3345 return 1;
3346 }
3347
pause_interception(struct vcpu_svm * svm)3348 static int pause_interception(struct vcpu_svm *svm)
3349 {
3350 kvm_vcpu_on_spin(&(svm->vcpu));
3351 return 1;
3352 }
3353
nop_interception(struct vcpu_svm * svm)3354 static int nop_interception(struct vcpu_svm *svm)
3355 {
3356 skip_emulated_instruction(&(svm->vcpu));
3357 return 1;
3358 }
3359
monitor_interception(struct vcpu_svm * svm)3360 static int monitor_interception(struct vcpu_svm *svm)
3361 {
3362 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
3363 return nop_interception(svm);
3364 }
3365
mwait_interception(struct vcpu_svm * svm)3366 static int mwait_interception(struct vcpu_svm *svm)
3367 {
3368 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
3369 return nop_interception(svm);
3370 }
3371
3372 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3373 [SVM_EXIT_READ_CR0] = cr_interception,
3374 [SVM_EXIT_READ_CR3] = cr_interception,
3375 [SVM_EXIT_READ_CR4] = cr_interception,
3376 [SVM_EXIT_READ_CR8] = cr_interception,
3377 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
3378 [SVM_EXIT_WRITE_CR0] = cr_interception,
3379 [SVM_EXIT_WRITE_CR3] = cr_interception,
3380 [SVM_EXIT_WRITE_CR4] = cr_interception,
3381 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
3382 [SVM_EXIT_READ_DR0] = dr_interception,
3383 [SVM_EXIT_READ_DR1] = dr_interception,
3384 [SVM_EXIT_READ_DR2] = dr_interception,
3385 [SVM_EXIT_READ_DR3] = dr_interception,
3386 [SVM_EXIT_READ_DR4] = dr_interception,
3387 [SVM_EXIT_READ_DR5] = dr_interception,
3388 [SVM_EXIT_READ_DR6] = dr_interception,
3389 [SVM_EXIT_READ_DR7] = dr_interception,
3390 [SVM_EXIT_WRITE_DR0] = dr_interception,
3391 [SVM_EXIT_WRITE_DR1] = dr_interception,
3392 [SVM_EXIT_WRITE_DR2] = dr_interception,
3393 [SVM_EXIT_WRITE_DR3] = dr_interception,
3394 [SVM_EXIT_WRITE_DR4] = dr_interception,
3395 [SVM_EXIT_WRITE_DR5] = dr_interception,
3396 [SVM_EXIT_WRITE_DR6] = dr_interception,
3397 [SVM_EXIT_WRITE_DR7] = dr_interception,
3398 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3399 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
3400 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
3401 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3402 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3403 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3404 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
3405 [SVM_EXIT_INTR] = intr_interception,
3406 [SVM_EXIT_NMI] = nmi_interception,
3407 [SVM_EXIT_SMI] = nop_on_interception,
3408 [SVM_EXIT_INIT] = nop_on_interception,
3409 [SVM_EXIT_VINTR] = interrupt_window_interception,
3410 [SVM_EXIT_RDPMC] = rdpmc_interception,
3411 [SVM_EXIT_CPUID] = cpuid_interception,
3412 [SVM_EXIT_IRET] = iret_interception,
3413 [SVM_EXIT_INVD] = emulate_on_interception,
3414 [SVM_EXIT_PAUSE] = pause_interception,
3415 [SVM_EXIT_HLT] = halt_interception,
3416 [SVM_EXIT_INVLPG] = invlpg_interception,
3417 [SVM_EXIT_INVLPGA] = invlpga_interception,
3418 [SVM_EXIT_IOIO] = io_interception,
3419 [SVM_EXIT_MSR] = msr_interception,
3420 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
3421 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3422 [SVM_EXIT_VMRUN] = vmrun_interception,
3423 [SVM_EXIT_VMMCALL] = vmmcall_interception,
3424 [SVM_EXIT_VMLOAD] = vmload_interception,
3425 [SVM_EXIT_VMSAVE] = vmsave_interception,
3426 [SVM_EXIT_STGI] = stgi_interception,
3427 [SVM_EXIT_CLGI] = clgi_interception,
3428 [SVM_EXIT_SKINIT] = skinit_interception,
3429 [SVM_EXIT_WBINVD] = wbinvd_interception,
3430 [SVM_EXIT_MONITOR] = monitor_interception,
3431 [SVM_EXIT_MWAIT] = mwait_interception,
3432 [SVM_EXIT_XSETBV] = xsetbv_interception,
3433 [SVM_EXIT_NPF] = pf_interception,
3434 [SVM_EXIT_RSM] = emulate_on_interception,
3435 };
3436
dump_vmcb(struct kvm_vcpu * vcpu)3437 static void dump_vmcb(struct kvm_vcpu *vcpu)
3438 {
3439 struct vcpu_svm *svm = to_svm(vcpu);
3440 struct vmcb_control_area *control = &svm->vmcb->control;
3441 struct vmcb_save_area *save = &svm->vmcb->save;
3442
3443 pr_err("VMCB Control Area:\n");
3444 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
3445 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
3446 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
3447 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
3448 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
3449 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
3450 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3451 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3452 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3453 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3454 pr_err("%-20s%d\n", "asid:", control->asid);
3455 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3456 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3457 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3458 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3459 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3460 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3461 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3462 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3463 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3464 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3465 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3466 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3467 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3468 pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
3469 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3470 pr_err("VMCB State Save Area:\n");
3471 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3472 "es:",
3473 save->es.selector, save->es.attrib,
3474 save->es.limit, save->es.base);
3475 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3476 "cs:",
3477 save->cs.selector, save->cs.attrib,
3478 save->cs.limit, save->cs.base);
3479 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3480 "ss:",
3481 save->ss.selector, save->ss.attrib,
3482 save->ss.limit, save->ss.base);
3483 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3484 "ds:",
3485 save->ds.selector, save->ds.attrib,
3486 save->ds.limit, save->ds.base);
3487 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3488 "fs:",
3489 save->fs.selector, save->fs.attrib,
3490 save->fs.limit, save->fs.base);
3491 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3492 "gs:",
3493 save->gs.selector, save->gs.attrib,
3494 save->gs.limit, save->gs.base);
3495 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3496 "gdtr:",
3497 save->gdtr.selector, save->gdtr.attrib,
3498 save->gdtr.limit, save->gdtr.base);
3499 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3500 "ldtr:",
3501 save->ldtr.selector, save->ldtr.attrib,
3502 save->ldtr.limit, save->ldtr.base);
3503 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3504 "idtr:",
3505 save->idtr.selector, save->idtr.attrib,
3506 save->idtr.limit, save->idtr.base);
3507 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3508 "tr:",
3509 save->tr.selector, save->tr.attrib,
3510 save->tr.limit, save->tr.base);
3511 pr_err("cpl: %d efer: %016llx\n",
3512 save->cpl, save->efer);
3513 pr_err("%-15s %016llx %-13s %016llx\n",
3514 "cr0:", save->cr0, "cr2:", save->cr2);
3515 pr_err("%-15s %016llx %-13s %016llx\n",
3516 "cr3:", save->cr3, "cr4:", save->cr4);
3517 pr_err("%-15s %016llx %-13s %016llx\n",
3518 "dr6:", save->dr6, "dr7:", save->dr7);
3519 pr_err("%-15s %016llx %-13s %016llx\n",
3520 "rip:", save->rip, "rflags:", save->rflags);
3521 pr_err("%-15s %016llx %-13s %016llx\n",
3522 "rsp:", save->rsp, "rax:", save->rax);
3523 pr_err("%-15s %016llx %-13s %016llx\n",
3524 "star:", save->star, "lstar:", save->lstar);
3525 pr_err("%-15s %016llx %-13s %016llx\n",
3526 "cstar:", save->cstar, "sfmask:", save->sfmask);
3527 pr_err("%-15s %016llx %-13s %016llx\n",
3528 "kernel_gs_base:", save->kernel_gs_base,
3529 "sysenter_cs:", save->sysenter_cs);
3530 pr_err("%-15s %016llx %-13s %016llx\n",
3531 "sysenter_esp:", save->sysenter_esp,
3532 "sysenter_eip:", save->sysenter_eip);
3533 pr_err("%-15s %016llx %-13s %016llx\n",
3534 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3535 pr_err("%-15s %016llx %-13s %016llx\n",
3536 "br_from:", save->br_from, "br_to:", save->br_to);
3537 pr_err("%-15s %016llx %-13s %016llx\n",
3538 "excp_from:", save->last_excp_from,
3539 "excp_to:", save->last_excp_to);
3540 }
3541
svm_get_exit_info(struct kvm_vcpu * vcpu,u64 * info1,u64 * info2)3542 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3543 {
3544 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3545
3546 *info1 = control->exit_info_1;
3547 *info2 = control->exit_info_2;
3548 }
3549
handle_exit(struct kvm_vcpu * vcpu)3550 static int handle_exit(struct kvm_vcpu *vcpu)
3551 {
3552 struct vcpu_svm *svm = to_svm(vcpu);
3553 struct kvm_run *kvm_run = vcpu->run;
3554 u32 exit_code = svm->vmcb->control.exit_code;
3555
3556 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3557
3558 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3559 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3560 if (npt_enabled)
3561 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3562
3563 if (unlikely(svm->nested.exit_required)) {
3564 nested_svm_vmexit(svm);
3565 svm->nested.exit_required = false;
3566
3567 return 1;
3568 }
3569
3570 if (is_guest_mode(vcpu)) {
3571 int vmexit;
3572
3573 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3574 svm->vmcb->control.exit_info_1,
3575 svm->vmcb->control.exit_info_2,
3576 svm->vmcb->control.exit_int_info,
3577 svm->vmcb->control.exit_int_info_err,
3578 KVM_ISA_SVM);
3579
3580 vmexit = nested_svm_exit_special(svm);
3581
3582 if (vmexit == NESTED_EXIT_CONTINUE)
3583 vmexit = nested_svm_exit_handled(svm);
3584
3585 if (vmexit == NESTED_EXIT_DONE)
3586 return 1;
3587 }
3588
3589 svm_complete_interrupts(svm);
3590
3591 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3592 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3593 kvm_run->fail_entry.hardware_entry_failure_reason
3594 = svm->vmcb->control.exit_code;
3595 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3596 dump_vmcb(vcpu);
3597 return 0;
3598 }
3599
3600 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3601 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3602 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3603 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3604 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
3605 "exit_code 0x%x\n",
3606 __func__, svm->vmcb->control.exit_int_info,
3607 exit_code);
3608
3609 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
3610 || !svm_exit_handlers[exit_code]) {
3611 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
3612 kvm_queue_exception(vcpu, UD_VECTOR);
3613 return 1;
3614 }
3615
3616 return svm_exit_handlers[exit_code](svm);
3617 }
3618
reload_tss(struct kvm_vcpu * vcpu)3619 static void reload_tss(struct kvm_vcpu *vcpu)
3620 {
3621 int cpu = raw_smp_processor_id();
3622
3623 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3624 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
3625 load_TR_desc();
3626 }
3627
pre_svm_run(struct vcpu_svm * svm)3628 static void pre_svm_run(struct vcpu_svm *svm)
3629 {
3630 int cpu = raw_smp_processor_id();
3631
3632 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3633
3634 /* FIXME: handle wraparound of asid_generation */
3635 if (svm->asid_generation != sd->asid_generation)
3636 new_asid(svm, sd);
3637 }
3638
svm_inject_nmi(struct kvm_vcpu * vcpu)3639 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3640 {
3641 struct vcpu_svm *svm = to_svm(vcpu);
3642
3643 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3644 vcpu->arch.hflags |= HF_NMI_MASK;
3645 set_intercept(svm, INTERCEPT_IRET);
3646 ++vcpu->stat.nmi_injections;
3647 }
3648
svm_inject_irq(struct vcpu_svm * svm,int irq)3649 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
3650 {
3651 struct vmcb_control_area *control;
3652
3653 control = &svm->vmcb->control;
3654 control->int_vector = irq;
3655 control->int_ctl &= ~V_INTR_PRIO_MASK;
3656 control->int_ctl |= V_IRQ_MASK |
3657 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
3658 mark_dirty(svm->vmcb, VMCB_INTR);
3659 }
3660
svm_set_irq(struct kvm_vcpu * vcpu)3661 static void svm_set_irq(struct kvm_vcpu *vcpu)
3662 {
3663 struct vcpu_svm *svm = to_svm(vcpu);
3664
3665 BUG_ON(!(gif_set(svm)));
3666
3667 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3668 ++vcpu->stat.irq_injections;
3669
3670 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3671 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
3672 }
3673
update_cr8_intercept(struct kvm_vcpu * vcpu,int tpr,int irr)3674 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3675 {
3676 struct vcpu_svm *svm = to_svm(vcpu);
3677
3678 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3679 return;
3680
3681 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3682
3683 if (irr == -1)
3684 return;
3685
3686 if (tpr >= irr)
3687 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3688 }
3689
svm_set_virtual_x2apic_mode(struct kvm_vcpu * vcpu,bool set)3690 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3691 {
3692 return;
3693 }
3694
svm_cpu_uses_apicv(struct kvm_vcpu * vcpu)3695 static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu)
3696 {
3697 return 0;
3698 }
3699
svm_load_eoi_exitmap(struct kvm_vcpu * vcpu)3700 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu)
3701 {
3702 return;
3703 }
3704
svm_sync_pir_to_irr(struct kvm_vcpu * vcpu)3705 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3706 {
3707 return;
3708 }
3709
svm_nmi_allowed(struct kvm_vcpu * vcpu)3710 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3711 {
3712 struct vcpu_svm *svm = to_svm(vcpu);
3713 struct vmcb *vmcb = svm->vmcb;
3714 int ret;
3715 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3716 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3717 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3718
3719 return ret;
3720 }
3721
svm_get_nmi_mask(struct kvm_vcpu * vcpu)3722 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3723 {
3724 struct vcpu_svm *svm = to_svm(vcpu);
3725
3726 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3727 }
3728
svm_set_nmi_mask(struct kvm_vcpu * vcpu,bool masked)3729 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3730 {
3731 struct vcpu_svm *svm = to_svm(vcpu);
3732
3733 if (masked) {
3734 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3735 set_intercept(svm, INTERCEPT_IRET);
3736 } else {
3737 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3738 clr_intercept(svm, INTERCEPT_IRET);
3739 }
3740 }
3741
svm_interrupt_allowed(struct kvm_vcpu * vcpu)3742 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3743 {
3744 struct vcpu_svm *svm = to_svm(vcpu);
3745 struct vmcb *vmcb = svm->vmcb;
3746 int ret;
3747
3748 if (!gif_set(svm) ||
3749 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3750 return 0;
3751
3752 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3753
3754 if (is_guest_mode(vcpu))
3755 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3756
3757 return ret;
3758 }
3759
enable_irq_window(struct kvm_vcpu * vcpu)3760 static void enable_irq_window(struct kvm_vcpu *vcpu)
3761 {
3762 struct vcpu_svm *svm = to_svm(vcpu);
3763
3764 /*
3765 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3766 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3767 * get that intercept, this function will be called again though and
3768 * we'll get the vintr intercept.
3769 */
3770 if (gif_set(svm) && nested_svm_intr(svm)) {
3771 svm_set_vintr(svm);
3772 svm_inject_irq(svm, 0x0);
3773 }
3774 }
3775
enable_nmi_window(struct kvm_vcpu * vcpu)3776 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3777 {
3778 struct vcpu_svm *svm = to_svm(vcpu);
3779
3780 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3781 == HF_NMI_MASK)
3782 return; /* IRET will cause a vm exit */
3783
3784 /*
3785 * Something prevents NMI from been injected. Single step over possible
3786 * problem (IRET or exception injection or interrupt shadow)
3787 */
3788 svm->nmi_singlestep = true;
3789 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3790 }
3791
svm_set_tss_addr(struct kvm * kvm,unsigned int addr)3792 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3793 {
3794 return 0;
3795 }
3796
svm_flush_tlb(struct kvm_vcpu * vcpu)3797 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3798 {
3799 struct vcpu_svm *svm = to_svm(vcpu);
3800
3801 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3802 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3803 else
3804 svm->asid_generation--;
3805 }
3806
svm_prepare_guest_switch(struct kvm_vcpu * vcpu)3807 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3808 {
3809 }
3810
sync_cr8_to_lapic(struct kvm_vcpu * vcpu)3811 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3812 {
3813 struct vcpu_svm *svm = to_svm(vcpu);
3814
3815 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3816 return;
3817
3818 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3819 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3820 kvm_set_cr8(vcpu, cr8);
3821 }
3822 }
3823
sync_lapic_to_cr8(struct kvm_vcpu * vcpu)3824 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3825 {
3826 struct vcpu_svm *svm = to_svm(vcpu);
3827 u64 cr8;
3828
3829 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3830 return;
3831
3832 cr8 = kvm_get_cr8(vcpu);
3833 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3834 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3835 }
3836
svm_complete_interrupts(struct vcpu_svm * svm)3837 static void svm_complete_interrupts(struct vcpu_svm *svm)
3838 {
3839 u8 vector;
3840 int type;
3841 u32 exitintinfo = svm->vmcb->control.exit_int_info;
3842 unsigned int3_injected = svm->int3_injected;
3843
3844 svm->int3_injected = 0;
3845
3846 /*
3847 * If we've made progress since setting HF_IRET_MASK, we've
3848 * executed an IRET and can allow NMI injection.
3849 */
3850 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3851 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3852 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3853 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3854 }
3855
3856 svm->vcpu.arch.nmi_injected = false;
3857 kvm_clear_exception_queue(&svm->vcpu);
3858 kvm_clear_interrupt_queue(&svm->vcpu);
3859
3860 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3861 return;
3862
3863 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3864
3865 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3866 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3867
3868 switch (type) {
3869 case SVM_EXITINTINFO_TYPE_NMI:
3870 svm->vcpu.arch.nmi_injected = true;
3871 break;
3872 case SVM_EXITINTINFO_TYPE_EXEPT:
3873 /*
3874 * In case of software exceptions, do not reinject the vector,
3875 * but re-execute the instruction instead. Rewind RIP first
3876 * if we emulated INT3 before.
3877 */
3878 if (kvm_exception_is_soft(vector)) {
3879 if (vector == BP_VECTOR && int3_injected &&
3880 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3881 kvm_rip_write(&svm->vcpu,
3882 kvm_rip_read(&svm->vcpu) -
3883 int3_injected);
3884 break;
3885 }
3886 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3887 u32 err = svm->vmcb->control.exit_int_info_err;
3888 kvm_requeue_exception_e(&svm->vcpu, vector, err);
3889
3890 } else
3891 kvm_requeue_exception(&svm->vcpu, vector);
3892 break;
3893 case SVM_EXITINTINFO_TYPE_INTR:
3894 kvm_queue_interrupt(&svm->vcpu, vector, false);
3895 break;
3896 default:
3897 break;
3898 }
3899 }
3900
svm_cancel_injection(struct kvm_vcpu * vcpu)3901 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3902 {
3903 struct vcpu_svm *svm = to_svm(vcpu);
3904 struct vmcb_control_area *control = &svm->vmcb->control;
3905
3906 control->exit_int_info = control->event_inj;
3907 control->exit_int_info_err = control->event_inj_err;
3908 control->event_inj = 0;
3909 svm_complete_interrupts(svm);
3910 }
3911
svm_vcpu_run(struct kvm_vcpu * vcpu)3912 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3913 {
3914 struct vcpu_svm *svm = to_svm(vcpu);
3915
3916 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3917 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3918 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3919
3920 /*
3921 * A vmexit emulation is required before the vcpu can be executed
3922 * again.
3923 */
3924 if (unlikely(svm->nested.exit_required))
3925 return;
3926
3927 pre_svm_run(svm);
3928
3929 sync_lapic_to_cr8(vcpu);
3930
3931 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3932
3933 clgi();
3934
3935 /*
3936 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3937 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3938 * is no need to worry about the conditional branch over the wrmsr
3939 * being speculatively taken.
3940 */
3941 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3942
3943 local_irq_enable();
3944
3945 asm volatile (
3946 "push %%" _ASM_BP "; \n\t"
3947 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
3948 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
3949 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
3950 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
3951 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
3952 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
3953 #ifdef CONFIG_X86_64
3954 "mov %c[r8](%[svm]), %%r8 \n\t"
3955 "mov %c[r9](%[svm]), %%r9 \n\t"
3956 "mov %c[r10](%[svm]), %%r10 \n\t"
3957 "mov %c[r11](%[svm]), %%r11 \n\t"
3958 "mov %c[r12](%[svm]), %%r12 \n\t"
3959 "mov %c[r13](%[svm]), %%r13 \n\t"
3960 "mov %c[r14](%[svm]), %%r14 \n\t"
3961 "mov %c[r15](%[svm]), %%r15 \n\t"
3962 #endif
3963
3964 /* Enter guest mode */
3965 "push %%" _ASM_AX " \n\t"
3966 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
3967 __ex(SVM_VMLOAD) "\n\t"
3968 __ex(SVM_VMRUN) "\n\t"
3969 __ex(SVM_VMSAVE) "\n\t"
3970 "pop %%" _ASM_AX " \n\t"
3971
3972 /* Save guest registers, load host registers */
3973 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
3974 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
3975 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
3976 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
3977 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
3978 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
3979 #ifdef CONFIG_X86_64
3980 "mov %%r8, %c[r8](%[svm]) \n\t"
3981 "mov %%r9, %c[r9](%[svm]) \n\t"
3982 "mov %%r10, %c[r10](%[svm]) \n\t"
3983 "mov %%r11, %c[r11](%[svm]) \n\t"
3984 "mov %%r12, %c[r12](%[svm]) \n\t"
3985 "mov %%r13, %c[r13](%[svm]) \n\t"
3986 "mov %%r14, %c[r14](%[svm]) \n\t"
3987 "mov %%r15, %c[r15](%[svm]) \n\t"
3988 #endif
3989 /*
3990 * Clear host registers marked as clobbered to prevent
3991 * speculative use.
3992 */
3993 "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
3994 "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
3995 "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
3996 "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
3997 "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
3998 #ifdef CONFIG_X86_64
3999 "xor %%r8, %%r8 \n\t"
4000 "xor %%r9, %%r9 \n\t"
4001 "xor %%r10, %%r10 \n\t"
4002 "xor %%r11, %%r11 \n\t"
4003 "xor %%r12, %%r12 \n\t"
4004 "xor %%r13, %%r13 \n\t"
4005 "xor %%r14, %%r14 \n\t"
4006 "xor %%r15, %%r15 \n\t"
4007 #endif
4008 "pop %%" _ASM_BP
4009 :
4010 : [svm]"a"(svm),
4011 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
4012 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
4013 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
4014 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
4015 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
4016 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
4017 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
4018 #ifdef CONFIG_X86_64
4019 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
4020 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
4021 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
4022 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
4023 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
4024 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
4025 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
4026 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
4027 #endif
4028 : "cc", "memory"
4029 #ifdef CONFIG_X86_64
4030 , "rbx", "rcx", "rdx", "rsi", "rdi"
4031 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
4032 #else
4033 , "ebx", "ecx", "edx", "esi", "edi"
4034 #endif
4035 );
4036
4037 /* Eliminate branch target predictions from guest mode */
4038 vmexit_fill_RSB();
4039
4040 #ifdef CONFIG_X86_64
4041 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
4042 #else
4043 loadsegment(fs, svm->host.fs);
4044 #ifndef CONFIG_X86_32_LAZY_GS
4045 loadsegment(gs, svm->host.gs);
4046 #endif
4047 #endif
4048
4049 /*
4050 * We do not use IBRS in the kernel. If this vCPU has used the
4051 * SPEC_CTRL MSR it may have left it on; save the value and
4052 * turn it off. This is much more efficient than blindly adding
4053 * it to the atomic save/restore list. Especially as the former
4054 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
4055 *
4056 * For non-nested case:
4057 * If the L01 MSR bitmap does not intercept the MSR, then we need to
4058 * save it.
4059 *
4060 * For nested case:
4061 * If the L02 MSR bitmap does not intercept the MSR, then we need to
4062 * save it.
4063 */
4064 if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
4065 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
4066
4067 reload_tss(vcpu);
4068
4069 local_irq_disable();
4070
4071 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
4072
4073 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4074 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4075 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4076 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4077
4078 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4079 kvm_before_handle_nmi(&svm->vcpu);
4080
4081 stgi();
4082
4083 /* Any pending NMI will happen here */
4084
4085 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4086 kvm_after_handle_nmi(&svm->vcpu);
4087
4088 sync_cr8_to_lapic(vcpu);
4089
4090 svm->next_rip = 0;
4091
4092 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4093
4094 /* if exit due to PF check for async PF */
4095 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4096 svm->apf_reason = kvm_read_and_reset_pf_reason();
4097
4098 if (npt_enabled) {
4099 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
4100 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
4101 }
4102
4103 /*
4104 * We need to handle MC intercepts here before the vcpu has a chance to
4105 * change the physical cpu
4106 */
4107 if (unlikely(svm->vmcb->control.exit_code ==
4108 SVM_EXIT_EXCP_BASE + MC_VECTOR))
4109 svm_handle_mce(svm);
4110
4111 mark_all_clean(svm->vmcb);
4112 }
4113
svm_set_cr3(struct kvm_vcpu * vcpu,unsigned long root)4114 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
4115 {
4116 struct vcpu_svm *svm = to_svm(vcpu);
4117
4118 svm->vmcb->save.cr3 = root;
4119 mark_dirty(svm->vmcb, VMCB_CR);
4120 svm_flush_tlb(vcpu);
4121 }
4122
set_tdp_cr3(struct kvm_vcpu * vcpu,unsigned long root)4123 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
4124 {
4125 struct vcpu_svm *svm = to_svm(vcpu);
4126
4127 svm->vmcb->control.nested_cr3 = root;
4128 mark_dirty(svm->vmcb, VMCB_NPT);
4129
4130 /* Also sync guest cr3 here in case we live migrate */
4131 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
4132 mark_dirty(svm->vmcb, VMCB_CR);
4133
4134 svm_flush_tlb(vcpu);
4135 }
4136
is_disabled(void)4137 static int is_disabled(void)
4138 {
4139 u64 vm_cr;
4140
4141 rdmsrl(MSR_VM_CR, vm_cr);
4142 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
4143 return 1;
4144
4145 return 0;
4146 }
4147
4148 static void
svm_patch_hypercall(struct kvm_vcpu * vcpu,unsigned char * hypercall)4149 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4150 {
4151 /*
4152 * Patch in the VMMCALL instruction:
4153 */
4154 hypercall[0] = 0x0f;
4155 hypercall[1] = 0x01;
4156 hypercall[2] = 0xd9;
4157 }
4158
svm_check_processor_compat(void * rtn)4159 static void svm_check_processor_compat(void *rtn)
4160 {
4161 *(int *)rtn = 0;
4162 }
4163
svm_cpu_has_accelerated_tpr(void)4164 static bool svm_cpu_has_accelerated_tpr(void)
4165 {
4166 return false;
4167 }
4168
svm_has_emulated_msr(int index)4169 static bool svm_has_emulated_msr(int index)
4170 {
4171 switch (index) {
4172 case MSR_IA32_MCG_EXT_CTL:
4173 return false;
4174 default:
4175 break;
4176 }
4177
4178 return true;
4179 }
4180
svm_get_mt_mask(struct kvm_vcpu * vcpu,gfn_t gfn,bool is_mmio)4181 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4182 {
4183 return 0;
4184 }
4185
svm_cpuid_update(struct kvm_vcpu * vcpu)4186 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4187 {
4188 struct vcpu_svm *svm = to_svm(vcpu);
4189
4190 /* Update nrips enabled cache */
4191 svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
4192 }
4193
svm_set_supported_cpuid(u32 func,struct kvm_cpuid_entry2 * entry)4194 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4195 {
4196 switch (func) {
4197 case 0x80000001:
4198 if (nested)
4199 entry->ecx |= (1 << 2); /* Set SVM bit */
4200 break;
4201 case 0x8000000A:
4202 entry->eax = 1; /* SVM revision 1 */
4203 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
4204 ASID emulation to nested SVM */
4205 entry->ecx = 0; /* Reserved */
4206 entry->edx = 0; /* Per default do not support any
4207 additional features */
4208
4209 /* Support next_rip if host supports it */
4210 if (boot_cpu_has(X86_FEATURE_NRIPS))
4211 entry->edx |= SVM_FEATURE_NRIP;
4212
4213 /* Support NPT for the guest if enabled */
4214 if (npt_enabled)
4215 entry->edx |= SVM_FEATURE_NPT;
4216
4217 break;
4218 }
4219 }
4220
svm_get_lpage_level(void)4221 static int svm_get_lpage_level(void)
4222 {
4223 return PT_PDPE_LEVEL;
4224 }
4225
svm_rdtscp_supported(void)4226 static bool svm_rdtscp_supported(void)
4227 {
4228 return false;
4229 }
4230
svm_invpcid_supported(void)4231 static bool svm_invpcid_supported(void)
4232 {
4233 return false;
4234 }
4235
svm_mpx_supported(void)4236 static bool svm_mpx_supported(void)
4237 {
4238 return false;
4239 }
4240
svm_xsaves_supported(void)4241 static bool svm_xsaves_supported(void)
4242 {
4243 return false;
4244 }
4245
svm_has_wbinvd_exit(void)4246 static bool svm_has_wbinvd_exit(void)
4247 {
4248 return true;
4249 }
4250
svm_fpu_deactivate(struct kvm_vcpu * vcpu)4251 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
4252 {
4253 struct vcpu_svm *svm = to_svm(vcpu);
4254
4255 set_exception_intercept(svm, NM_VECTOR);
4256 update_cr0_intercept(svm);
4257 }
4258
4259 #define PRE_EX(exit) { .exit_code = (exit), \
4260 .stage = X86_ICPT_PRE_EXCEPT, }
4261 #define POST_EX(exit) { .exit_code = (exit), \
4262 .stage = X86_ICPT_POST_EXCEPT, }
4263 #define POST_MEM(exit) { .exit_code = (exit), \
4264 .stage = X86_ICPT_POST_MEMACCESS, }
4265
4266 static const struct __x86_intercept {
4267 u32 exit_code;
4268 enum x86_intercept_stage stage;
4269 } x86_intercept_map[] = {
4270 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4271 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4272 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4273 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4274 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
4275 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4276 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
4277 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4278 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4279 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4280 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4281 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4282 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4283 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4284 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
4285 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4286 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4287 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4288 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4289 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4290 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4291 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4292 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
4293 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4294 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4295 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
4296 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4297 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4298 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4299 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4300 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4301 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4302 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4303 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4304 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
4305 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4306 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4307 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4308 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4309 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4310 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4311 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
4312 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4313 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4314 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4315 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
4316 };
4317
4318 #undef PRE_EX
4319 #undef POST_EX
4320 #undef POST_MEM
4321
svm_check_intercept(struct kvm_vcpu * vcpu,struct x86_instruction_info * info,enum x86_intercept_stage stage)4322 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4323 struct x86_instruction_info *info,
4324 enum x86_intercept_stage stage)
4325 {
4326 struct vcpu_svm *svm = to_svm(vcpu);
4327 int vmexit, ret = X86EMUL_CONTINUE;
4328 struct __x86_intercept icpt_info;
4329 struct vmcb *vmcb = svm->vmcb;
4330
4331 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4332 goto out;
4333
4334 icpt_info = x86_intercept_map[info->intercept];
4335
4336 if (stage != icpt_info.stage)
4337 goto out;
4338
4339 switch (icpt_info.exit_code) {
4340 case SVM_EXIT_READ_CR0:
4341 if (info->intercept == x86_intercept_cr_read)
4342 icpt_info.exit_code += info->modrm_reg;
4343 break;
4344 case SVM_EXIT_WRITE_CR0: {
4345 unsigned long cr0, val;
4346 u64 intercept;
4347
4348 if (info->intercept == x86_intercept_cr_write)
4349 icpt_info.exit_code += info->modrm_reg;
4350
4351 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4352 info->intercept == x86_intercept_clts)
4353 break;
4354
4355 intercept = svm->nested.intercept;
4356
4357 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4358 break;
4359
4360 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4361 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4362
4363 if (info->intercept == x86_intercept_lmsw) {
4364 cr0 &= 0xfUL;
4365 val &= 0xfUL;
4366 /* lmsw can't clear PE - catch this here */
4367 if (cr0 & X86_CR0_PE)
4368 val |= X86_CR0_PE;
4369 }
4370
4371 if (cr0 ^ val)
4372 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4373
4374 break;
4375 }
4376 case SVM_EXIT_READ_DR0:
4377 case SVM_EXIT_WRITE_DR0:
4378 icpt_info.exit_code += info->modrm_reg;
4379 break;
4380 case SVM_EXIT_MSR:
4381 if (info->intercept == x86_intercept_wrmsr)
4382 vmcb->control.exit_info_1 = 1;
4383 else
4384 vmcb->control.exit_info_1 = 0;
4385 break;
4386 case SVM_EXIT_PAUSE:
4387 /*
4388 * We get this for NOP only, but pause
4389 * is rep not, check this here
4390 */
4391 if (info->rep_prefix != REPE_PREFIX)
4392 goto out;
4393 case SVM_EXIT_IOIO: {
4394 u64 exit_info;
4395 u32 bytes;
4396
4397 if (info->intercept == x86_intercept_in ||
4398 info->intercept == x86_intercept_ins) {
4399 exit_info = ((info->src_val & 0xffff) << 16) |
4400 SVM_IOIO_TYPE_MASK;
4401 bytes = info->dst_bytes;
4402 } else {
4403 exit_info = (info->dst_val & 0xffff) << 16;
4404 bytes = info->src_bytes;
4405 }
4406
4407 if (info->intercept == x86_intercept_outs ||
4408 info->intercept == x86_intercept_ins)
4409 exit_info |= SVM_IOIO_STR_MASK;
4410
4411 if (info->rep_prefix)
4412 exit_info |= SVM_IOIO_REP_MASK;
4413
4414 bytes = min(bytes, 4u);
4415
4416 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4417
4418 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4419
4420 vmcb->control.exit_info_1 = exit_info;
4421 vmcb->control.exit_info_2 = info->next_rip;
4422
4423 break;
4424 }
4425 default:
4426 break;
4427 }
4428
4429 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4430 if (static_cpu_has(X86_FEATURE_NRIPS))
4431 vmcb->control.next_rip = info->next_rip;
4432 vmcb->control.exit_code = icpt_info.exit_code;
4433 vmexit = nested_svm_exit_handled(svm);
4434
4435 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4436 : X86EMUL_CONTINUE;
4437
4438 out:
4439 return ret;
4440 }
4441
svm_handle_external_intr(struct kvm_vcpu * vcpu)4442 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
4443 {
4444 local_irq_enable();
4445 }
4446
svm_sched_in(struct kvm_vcpu * vcpu,int cpu)4447 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4448 {
4449 }
4450
4451 static struct kvm_x86_ops svm_x86_ops = {
4452 .cpu_has_kvm_support = has_svm,
4453 .disabled_by_bios = is_disabled,
4454 .hardware_setup = svm_hardware_setup,
4455 .hardware_unsetup = svm_hardware_unsetup,
4456 .check_processor_compatibility = svm_check_processor_compat,
4457 .hardware_enable = svm_hardware_enable,
4458 .hardware_disable = svm_hardware_disable,
4459 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
4460 .has_emulated_msr = svm_has_emulated_msr,
4461
4462 .vcpu_create = svm_create_vcpu,
4463 .vcpu_free = svm_free_vcpu,
4464 .vcpu_reset = svm_vcpu_reset,
4465
4466 .prepare_guest_switch = svm_prepare_guest_switch,
4467 .vcpu_load = svm_vcpu_load,
4468 .vcpu_put = svm_vcpu_put,
4469
4470 .update_bp_intercept = update_bp_intercept,
4471 .get_msr = svm_get_msr,
4472 .set_msr = svm_set_msr,
4473 .get_segment_base = svm_get_segment_base,
4474 .get_segment = svm_get_segment,
4475 .set_segment = svm_set_segment,
4476 .get_cpl = svm_get_cpl,
4477 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4478 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4479 .decache_cr3 = svm_decache_cr3,
4480 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
4481 .set_cr0 = svm_set_cr0,
4482 .set_cr3 = svm_set_cr3,
4483 .set_cr4 = svm_set_cr4,
4484 .set_efer = svm_set_efer,
4485 .get_idt = svm_get_idt,
4486 .set_idt = svm_set_idt,
4487 .get_gdt = svm_get_gdt,
4488 .set_gdt = svm_set_gdt,
4489 .get_dr6 = svm_get_dr6,
4490 .set_dr6 = svm_set_dr6,
4491 .set_dr7 = svm_set_dr7,
4492 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4493 .cache_reg = svm_cache_reg,
4494 .get_rflags = svm_get_rflags,
4495 .set_rflags = svm_set_rflags,
4496 .fpu_activate = svm_fpu_activate,
4497 .fpu_deactivate = svm_fpu_deactivate,
4498
4499 .tlb_flush = svm_flush_tlb,
4500
4501 .run = svm_vcpu_run,
4502 .handle_exit = handle_exit,
4503 .skip_emulated_instruction = skip_emulated_instruction,
4504 .set_interrupt_shadow = svm_set_interrupt_shadow,
4505 .get_interrupt_shadow = svm_get_interrupt_shadow,
4506 .patch_hypercall = svm_patch_hypercall,
4507 .set_irq = svm_set_irq,
4508 .set_nmi = svm_inject_nmi,
4509 .queue_exception = svm_queue_exception,
4510 .cancel_injection = svm_cancel_injection,
4511 .interrupt_allowed = svm_interrupt_allowed,
4512 .nmi_allowed = svm_nmi_allowed,
4513 .get_nmi_mask = svm_get_nmi_mask,
4514 .set_nmi_mask = svm_set_nmi_mask,
4515 .enable_nmi_window = enable_nmi_window,
4516 .enable_irq_window = enable_irq_window,
4517 .update_cr8_intercept = update_cr8_intercept,
4518 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4519 .cpu_uses_apicv = svm_cpu_uses_apicv,
4520 .load_eoi_exitmap = svm_load_eoi_exitmap,
4521 .sync_pir_to_irr = svm_sync_pir_to_irr,
4522
4523 .set_tss_addr = svm_set_tss_addr,
4524 .get_tdp_level = get_npt_level,
4525 .get_mt_mask = svm_get_mt_mask,
4526
4527 .get_exit_info = svm_get_exit_info,
4528
4529 .get_lpage_level = svm_get_lpage_level,
4530
4531 .cpuid_update = svm_cpuid_update,
4532
4533 .rdtscp_supported = svm_rdtscp_supported,
4534 .invpcid_supported = svm_invpcid_supported,
4535 .mpx_supported = svm_mpx_supported,
4536 .xsaves_supported = svm_xsaves_supported,
4537
4538 .set_supported_cpuid = svm_set_supported_cpuid,
4539
4540 .has_wbinvd_exit = svm_has_wbinvd_exit,
4541
4542 .read_tsc_offset = svm_read_tsc_offset,
4543 .write_tsc_offset = svm_write_tsc_offset,
4544 .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
4545 .read_l1_tsc = svm_read_l1_tsc,
4546
4547 .set_tdp_cr3 = set_tdp_cr3,
4548
4549 .check_intercept = svm_check_intercept,
4550 .handle_external_intr = svm_handle_external_intr,
4551
4552 .sched_in = svm_sched_in,
4553
4554 .pmu_ops = &amd_pmu_ops,
4555 };
4556
svm_init(void)4557 static int __init svm_init(void)
4558 {
4559 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
4560 __alignof__(struct vcpu_svm), THIS_MODULE);
4561 }
4562
svm_exit(void)4563 static void __exit svm_exit(void)
4564 {
4565 kvm_exit();
4566 }
4567
4568 module_init(svm_init)
4569 module_exit(svm_exit)
4570