1 /*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #ifndef __ARM_KVM_EMULATE_H__
20 #define __ARM_KVM_EMULATE_H__
21
22 #include <linux/kvm_host.h>
23 #include <asm/kvm_asm.h>
24 #include <asm/kvm_mmio.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/cputype.h>
27
28 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
29 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
30
vcpu_get_reg(struct kvm_vcpu * vcpu,u8 reg_num)31 static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
32 u8 reg_num)
33 {
34 return *vcpu_reg(vcpu, reg_num);
35 }
36
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)37 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
38 unsigned long val)
39 {
40 *vcpu_reg(vcpu, reg_num) = val;
41 }
42
43 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
44 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
45 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
46 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
47 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
48 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
49
kvm_condition_valid(const struct kvm_vcpu * vcpu)50 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
51 {
52 return kvm_condition_valid32(vcpu);
53 }
54
kvm_skip_instr(struct kvm_vcpu * vcpu,bool is_wide_instr)55 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
56 {
57 kvm_skip_instr32(vcpu, is_wide_instr);
58 }
59
vcpu_reset_hcr(struct kvm_vcpu * vcpu)60 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
61 {
62 vcpu->arch.hcr = HCR_GUEST_MASK;
63 }
64
vcpu_get_hcr(const struct kvm_vcpu * vcpu)65 static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
66 {
67 return vcpu->arch.hcr;
68 }
69
vcpu_set_hcr(struct kvm_vcpu * vcpu,unsigned long hcr)70 static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
71 {
72 vcpu->arch.hcr = hcr;
73 }
74
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)75 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
76 {
77 return 1;
78 }
79
vcpu_pc(struct kvm_vcpu * vcpu)80 static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
81 {
82 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
83 }
84
vcpu_cpsr(const struct kvm_vcpu * vcpu)85 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
86 {
87 return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
88 }
89
vcpu_set_thumb(struct kvm_vcpu * vcpu)90 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
91 {
92 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
93 }
94
mode_has_spsr(struct kvm_vcpu * vcpu)95 static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
96 {
97 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
98 return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
99 }
100
vcpu_mode_priv(struct kvm_vcpu * vcpu)101 static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
102 {
103 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
104 return cpsr_mode > USR_MODE;;
105 }
106
kvm_vcpu_get_hsr(const struct kvm_vcpu * vcpu)107 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
108 {
109 return vcpu->arch.fault.hsr;
110 }
111
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)112 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
113 {
114 u32 hsr = kvm_vcpu_get_hsr(vcpu);
115
116 if (hsr & HSR_CV)
117 return (hsr & HSR_COND) >> HSR_COND_SHIFT;
118
119 return -1;
120 }
121
kvm_vcpu_get_hfar(struct kvm_vcpu * vcpu)122 static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
123 {
124 return vcpu->arch.fault.hxfar;
125 }
126
kvm_vcpu_get_fault_ipa(struct kvm_vcpu * vcpu)127 static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
128 {
129 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
130 }
131
kvm_vcpu_dabt_isvalid(struct kvm_vcpu * vcpu)132 static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
133 {
134 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
135 }
136
kvm_vcpu_dabt_iswrite(struct kvm_vcpu * vcpu)137 static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
138 {
139 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
140 }
141
kvm_vcpu_dabt_issext(struct kvm_vcpu * vcpu)142 static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
143 {
144 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
145 }
146
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)147 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
148 {
149 return false;
150 }
151
kvm_vcpu_dabt_get_rd(struct kvm_vcpu * vcpu)152 static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
153 {
154 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
155 }
156
kvm_vcpu_dabt_iss1tw(struct kvm_vcpu * vcpu)157 static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
158 {
159 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
160 }
161
kvm_vcpu_dabt_is_cm(struct kvm_vcpu * vcpu)162 static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
163 {
164 return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
165 }
166
167 /* Get Access Size from a data abort */
kvm_vcpu_dabt_get_as(struct kvm_vcpu * vcpu)168 static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
169 {
170 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
171 case 0:
172 return 1;
173 case 1:
174 return 2;
175 case 2:
176 return 4;
177 default:
178 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
179 return -EFAULT;
180 }
181 }
182
183 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(struct kvm_vcpu * vcpu)184 static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
185 {
186 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
187 }
188
kvm_vcpu_trap_get_class(struct kvm_vcpu * vcpu)189 static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
190 {
191 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
192 }
193
kvm_vcpu_trap_is_iabt(struct kvm_vcpu * vcpu)194 static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
195 {
196 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
197 }
198
kvm_vcpu_trap_get_fault(struct kvm_vcpu * vcpu)199 static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
200 {
201 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
202 }
203
kvm_vcpu_trap_get_fault_type(struct kvm_vcpu * vcpu)204 static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
205 {
206 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
207 }
208
kvm_vcpu_dabt_isextabt(struct kvm_vcpu * vcpu)209 static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
210 {
211 switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
212 case FSC_SEA:
213 case FSC_SEA_TTW0:
214 case FSC_SEA_TTW1:
215 case FSC_SEA_TTW2:
216 case FSC_SEA_TTW3:
217 case FSC_SECC:
218 case FSC_SECC_TTW0:
219 case FSC_SECC_TTW1:
220 case FSC_SECC_TTW2:
221 case FSC_SECC_TTW3:
222 return true;
223 default:
224 return false;
225 }
226 }
227
kvm_vcpu_hvc_get_imm(struct kvm_vcpu * vcpu)228 static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
229 {
230 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
231 }
232
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)233 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
234 {
235 return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
236 }
237
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)238 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
239 {
240 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
241 }
242
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)243 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
244 {
245 return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
246 }
247
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)248 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
249 unsigned long data,
250 unsigned int len)
251 {
252 if (kvm_vcpu_is_be(vcpu)) {
253 switch (len) {
254 case 1:
255 return data & 0xff;
256 case 2:
257 return be16_to_cpu(data & 0xffff);
258 default:
259 return be32_to_cpu(data);
260 }
261 } else {
262 switch (len) {
263 case 1:
264 return data & 0xff;
265 case 2:
266 return le16_to_cpu(data & 0xffff);
267 default:
268 return le32_to_cpu(data);
269 }
270 }
271 }
272
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)273 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
274 unsigned long data,
275 unsigned int len)
276 {
277 if (kvm_vcpu_is_be(vcpu)) {
278 switch (len) {
279 case 1:
280 return data & 0xff;
281 case 2:
282 return cpu_to_be16(data & 0xffff);
283 default:
284 return cpu_to_be32(data);
285 }
286 } else {
287 switch (len) {
288 case 1:
289 return data & 0xff;
290 case 2:
291 return cpu_to_le16(data & 0xffff);
292 default:
293 return cpu_to_le32(data);
294 }
295 }
296 }
297
298 #endif /* __ARM_KVM_EMULATE_H__ */
299