• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4 
5 #include <linux/kvm_host.h>
6 
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
9 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10 	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
11 
12 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14 {									      \
15 	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
16 }									      \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
18 						unsigned long val)	      \
19 {									      \
20 	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
21 }
BUILD_KVM_GPR_ACCESSORS(rax,RAX)22 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29 #ifdef CONFIG_X86_64
30 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
31 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
32 BUILD_KVM_GPR_ACCESSORS(r10, R10)
33 BUILD_KVM_GPR_ACCESSORS(r11, R11)
34 BUILD_KVM_GPR_ACCESSORS(r12, R12)
35 BUILD_KVM_GPR_ACCESSORS(r13, R13)
36 BUILD_KVM_GPR_ACCESSORS(r14, R14)
37 BUILD_KVM_GPR_ACCESSORS(r15, R15)
38 #endif
39 
40 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
41 					      enum kvm_reg reg)
42 {
43 	if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
44 		kvm_x86_ops->cache_reg(vcpu, reg);
45 
46 	return vcpu->arch.regs[reg];
47 }
48 
kvm_register_write(struct kvm_vcpu * vcpu,enum kvm_reg reg,unsigned long val)49 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
50 				      enum kvm_reg reg,
51 				      unsigned long val)
52 {
53 	vcpu->arch.regs[reg] = val;
54 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
55 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56 }
57 
kvm_rip_read(struct kvm_vcpu * vcpu)58 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
59 {
60 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
61 }
62 
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)63 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
64 {
65 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
66 }
67 
kvm_rsp_read(struct kvm_vcpu * vcpu)68 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
69 {
70 	return kvm_register_read(vcpu, VCPU_REGS_RSP);
71 }
72 
kvm_rsp_write(struct kvm_vcpu * vcpu,unsigned long val)73 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
74 {
75 	kvm_register_write(vcpu, VCPU_REGS_RSP, val);
76 }
77 
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)78 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
79 {
80 	might_sleep();  /* on svm */
81 
82 	if (!test_bit(VCPU_EXREG_PDPTR,
83 		      (unsigned long *)&vcpu->arch.regs_avail))
84 		kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
85 
86 	return vcpu->arch.walk_mmu->pdptrs[index];
87 }
88 
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)89 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
90 {
91 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
92 	if (tmask & vcpu->arch.cr0_guest_owned_bits)
93 		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
94 	return vcpu->arch.cr0 & mask;
95 }
96 
kvm_read_cr0(struct kvm_vcpu * vcpu)97 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
98 {
99 	return kvm_read_cr0_bits(vcpu, ~0UL);
100 }
101 
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)102 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
103 {
104 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
105 	if (tmask & vcpu->arch.cr4_guest_owned_bits)
106 		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
107 	return vcpu->arch.cr4 & mask;
108 }
109 
kvm_read_cr3(struct kvm_vcpu * vcpu)110 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
111 {
112 	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
113 		kvm_x86_ops->decache_cr3(vcpu);
114 	return vcpu->arch.cr3;
115 }
116 
kvm_read_cr4(struct kvm_vcpu * vcpu)117 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
118 {
119 	return kvm_read_cr4_bits(vcpu, ~0UL);
120 }
121 
kvm_read_edx_eax(struct kvm_vcpu * vcpu)122 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
123 {
124 	return (kvm_rax_read(vcpu) & -1u)
125 		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
126 }
127 
enter_guest_mode(struct kvm_vcpu * vcpu)128 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
129 {
130 	vcpu->arch.hflags |= HF_GUEST_MASK;
131 }
132 
leave_guest_mode(struct kvm_vcpu * vcpu)133 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
134 {
135 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
136 
137 	if (vcpu->arch.load_eoi_exitmap_pending) {
138 		vcpu->arch.load_eoi_exitmap_pending = false;
139 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
140 	}
141 }
142 
is_guest_mode(struct kvm_vcpu * vcpu)143 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
144 {
145 	return vcpu->arch.hflags & HF_GUEST_MASK;
146 }
147 
is_smm(struct kvm_vcpu * vcpu)148 static inline bool is_smm(struct kvm_vcpu *vcpu)
149 {
150 	return vcpu->arch.hflags & HF_SMM_MASK;
151 }
152 
153 #endif
154