1 /*
2 * Fault injection for both 32 and 64bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * Based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/esr.h>
27
28 #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT)
30
31 #define CURRENT_EL_SP_EL0_VECTOR 0x0
32 #define CURRENT_EL_SP_ELx_VECTOR 0x200
33 #define LOWER_EL_AArch64_VECTOR 0x400
34 #define LOWER_EL_AArch32_VECTOR 0x600
35
36 /*
37 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
38 */
39 static const u8 return_offsets[8][2] = {
40 [0] = { 0, 0 }, /* Reset, unused */
41 [1] = { 4, 2 }, /* Undefined */
42 [2] = { 0, 0 }, /* SVC, unused */
43 [3] = { 4, 4 }, /* Prefetch abort */
44 [4] = { 8, 8 }, /* Data abort */
45 [5] = { 0, 0 }, /* HVC, unused */
46 [6] = { 4, 4 }, /* IRQ, unused */
47 [7] = { 4, 4 }, /* FIQ, unused */
48 };
49
prepare_fault32(struct kvm_vcpu * vcpu,u32 mode,u32 vect_offset)50 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
51 {
52 unsigned long cpsr;
53 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
54 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
55 u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
56 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
57
58 cpsr = mode | COMPAT_PSR_I_BIT;
59
60 if (sctlr & (1 << 30))
61 cpsr |= COMPAT_PSR_T_BIT;
62 if (sctlr & (1 << 25))
63 cpsr |= COMPAT_PSR_E_BIT;
64
65 *vcpu_cpsr(vcpu) = cpsr;
66
67 /* Note: These now point to the banked copies */
68 *vcpu_spsr(vcpu) = new_spsr_value;
69 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
70
71 /* Branch to exception vector */
72 if (sctlr & (1 << 13))
73 vect_offset += 0xffff0000;
74 else /* always have security exceptions */
75 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
76
77 *vcpu_pc(vcpu) = vect_offset;
78 }
79
inject_undef32(struct kvm_vcpu * vcpu)80 static void inject_undef32(struct kvm_vcpu *vcpu)
81 {
82 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
83 }
84
85 /*
86 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
87 * pseudocode.
88 */
inject_abt32(struct kvm_vcpu * vcpu,bool is_pabt,unsigned long addr)89 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
90 unsigned long addr)
91 {
92 u32 vect_offset;
93 u32 *far, *fsr;
94 bool is_lpae;
95
96 if (is_pabt) {
97 vect_offset = 12;
98 far = &vcpu_cp15(vcpu, c6_IFAR);
99 fsr = &vcpu_cp15(vcpu, c5_IFSR);
100 } else { /* !iabt */
101 vect_offset = 16;
102 far = &vcpu_cp15(vcpu, c6_DFAR);
103 fsr = &vcpu_cp15(vcpu, c5_DFSR);
104 }
105
106 prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
107
108 *far = addr;
109
110 /* Give the guest an IMPLEMENTATION DEFINED exception */
111 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
112 if (is_lpae)
113 *fsr = 1 << 9 | 0x34;
114 else
115 *fsr = 0x14;
116 }
117
118 enum exception_type {
119 except_type_sync = 0,
120 except_type_irq = 0x80,
121 except_type_fiq = 0x100,
122 except_type_serror = 0x180,
123 };
124
get_except_vector(struct kvm_vcpu * vcpu,enum exception_type type)125 static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
126 {
127 u64 exc_offset;
128
129 switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
130 case PSR_MODE_EL1t:
131 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
132 break;
133 case PSR_MODE_EL1h:
134 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
135 break;
136 case PSR_MODE_EL0t:
137 exc_offset = LOWER_EL_AArch64_VECTOR;
138 break;
139 default:
140 exc_offset = LOWER_EL_AArch32_VECTOR;
141 }
142
143 return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
144 }
145
inject_abt64(struct kvm_vcpu * vcpu,bool is_iabt,unsigned long addr)146 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
147 {
148 unsigned long cpsr = *vcpu_cpsr(vcpu);
149 bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
150 u32 esr = 0;
151
152 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
153 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
154
155 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
156 *vcpu_spsr(vcpu) = cpsr;
157
158 vcpu_sys_reg(vcpu, FAR_EL1) = addr;
159
160 /*
161 * Build an {i,d}abort, depending on the level and the
162 * instruction set. Report an external synchronous abort.
163 */
164 if (kvm_vcpu_trap_il_is32bit(vcpu))
165 esr |= ESR_ELx_IL;
166
167 /*
168 * Here, the guest runs in AArch64 mode when in EL1. If we get
169 * an AArch32 fault, it means we managed to trap an EL0 fault.
170 */
171 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
172 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
173 else
174 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
175
176 if (!is_iabt)
177 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
178
179 vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
180 }
181
inject_undef64(struct kvm_vcpu * vcpu)182 static void inject_undef64(struct kvm_vcpu *vcpu)
183 {
184 unsigned long cpsr = *vcpu_cpsr(vcpu);
185 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
186
187 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
188 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
189
190 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
191 *vcpu_spsr(vcpu) = cpsr;
192
193 /*
194 * Build an unknown exception, depending on the instruction
195 * set.
196 */
197 if (kvm_vcpu_trap_il_is32bit(vcpu))
198 esr |= ESR_ELx_IL;
199
200 vcpu_sys_reg(vcpu, ESR_EL1) = esr;
201 }
202
203 /**
204 * kvm_inject_dabt - inject a data abort into the guest
205 * @vcpu: The VCPU to receive the undefined exception
206 * @addr: The address to report in the DFAR
207 *
208 * It is assumed that this code is called from the VCPU thread and that the
209 * VCPU therefore is not currently executing guest code.
210 */
kvm_inject_dabt(struct kvm_vcpu * vcpu,unsigned long addr)211 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
212 {
213 if (!(vcpu->arch.hcr_el2 & HCR_RW))
214 inject_abt32(vcpu, false, addr);
215 else
216 inject_abt64(vcpu, false, addr);
217 }
218
219 /**
220 * kvm_inject_pabt - inject a prefetch abort into the guest
221 * @vcpu: The VCPU to receive the undefined exception
222 * @addr: The address to report in the DFAR
223 *
224 * It is assumed that this code is called from the VCPU thread and that the
225 * VCPU therefore is not currently executing guest code.
226 */
kvm_inject_pabt(struct kvm_vcpu * vcpu,unsigned long addr)227 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
228 {
229 if (!(vcpu->arch.hcr_el2 & HCR_RW))
230 inject_abt32(vcpu, true, addr);
231 else
232 inject_abt64(vcpu, true, addr);
233 }
234
235 /**
236 * kvm_inject_undefined - inject an undefined instruction into the guest
237 *
238 * It is assumed that this code is called from the VCPU thread and that the
239 * VCPU therefore is not currently executing guest code.
240 */
kvm_inject_undefined(struct kvm_vcpu * vcpu)241 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
242 {
243 if (!(vcpu->arch.hcr_el2 & HCR_RW))
244 inject_undef32(vcpu);
245 else
246 inject_undef64(vcpu);
247 }
248
249 /**
250 * kvm_inject_vabt - inject an async abort / SError into the guest
251 * @vcpu: The VCPU to receive the exception
252 *
253 * It is assumed that this code is called from the VCPU thread and that the
254 * VCPU therefore is not currently executing guest code.
255 */
kvm_inject_vabt(struct kvm_vcpu * vcpu)256 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
257 {
258 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
259 }
260