1 #ifndef ARCH_X86_KVM_X86_H
2 #define ARCH_X86_KVM_X86_H
3
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
6
kvm_clear_exception_queue(struct kvm_vcpu * vcpu)7 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
8 {
9 vcpu->arch.exception.pending = false;
10 }
11
kvm_queue_interrupt(struct kvm_vcpu * vcpu,u8 vector,bool soft)12 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
13 bool soft)
14 {
15 vcpu->arch.interrupt.pending = true;
16 vcpu->arch.interrupt.soft = soft;
17 vcpu->arch.interrupt.nr = vector;
18 }
19
kvm_clear_interrupt_queue(struct kvm_vcpu * vcpu)20 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
21 {
22 vcpu->arch.interrupt.pending = false;
23 }
24
kvm_event_needs_reinjection(struct kvm_vcpu * vcpu)25 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
26 {
27 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
28 vcpu->arch.nmi_injected;
29 }
30
kvm_exception_is_soft(unsigned int nr)31 static inline bool kvm_exception_is_soft(unsigned int nr)
32 {
33 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
34 }
35
is_protmode(struct kvm_vcpu * vcpu)36 static inline bool is_protmode(struct kvm_vcpu *vcpu)
37 {
38 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
39 }
40
is_long_mode(struct kvm_vcpu * vcpu)41 static inline int is_long_mode(struct kvm_vcpu *vcpu)
42 {
43 #ifdef CONFIG_X86_64
44 return vcpu->arch.efer & EFER_LMA;
45 #else
46 return 0;
47 #endif
48 }
49
mmu_is_nested(struct kvm_vcpu * vcpu)50 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
51 {
52 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
53 }
54
is_pae(struct kvm_vcpu * vcpu)55 static inline int is_pae(struct kvm_vcpu *vcpu)
56 {
57 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
58 }
59
is_pse(struct kvm_vcpu * vcpu)60 static inline int is_pse(struct kvm_vcpu *vcpu)
61 {
62 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
63 }
64
is_paging(struct kvm_vcpu * vcpu)65 static inline int is_paging(struct kvm_vcpu *vcpu)
66 {
67 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
68 }
69
bit(int bitno)70 static inline u32 bit(int bitno)
71 {
72 return 1 << (bitno & 31);
73 }
74
vcpu_cache_mmio_info(struct kvm_vcpu * vcpu,gva_t gva,gfn_t gfn,unsigned access)75 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
76 gva_t gva, gfn_t gfn, unsigned access)
77 {
78 vcpu->arch.mmio_gva = gva & PAGE_MASK;
79 vcpu->arch.access = access;
80 vcpu->arch.mmio_gfn = gfn;
81 }
82
83 /*
84 * Clear the mmio cache info for the given gva,
85 * specially, if gva is ~0ul, we clear all mmio cache info.
86 */
vcpu_clear_mmio_info(struct kvm_vcpu * vcpu,gva_t gva)87 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
88 {
89 if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
90 return;
91
92 vcpu->arch.mmio_gva = 0;
93 }
94
vcpu_match_mmio_gva(struct kvm_vcpu * vcpu,unsigned long gva)95 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
96 {
97 if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
98 return true;
99
100 return false;
101 }
102
vcpu_match_mmio_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)103 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
104 {
105 if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
106 return true;
107
108 return false;
109 }
110
111 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
112 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
113 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
114
115 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
116
117 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
118 gva_t addr, void *val, unsigned int bytes,
119 struct x86_exception *exception);
120
121 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
122 gva_t addr, void *val, unsigned int bytes,
123 struct x86_exception *exception);
124
125 extern u64 host_xcr0;
126
127 extern struct static_key kvm_no_apic_vcpu;
128 #endif
129