1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Fault injection for both 32 and 64bit guests.
4 *
5 * Copyright (C) 2012,2013 - ARM Ltd
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 *
8 * Based on arch/arm/kvm/emulate.c
9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 */
12
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/esr.h>
16
17 #define CURRENT_EL_SP_EL0_VECTOR 0x0
18 #define CURRENT_EL_SP_ELx_VECTOR 0x200
19 #define LOWER_EL_AArch64_VECTOR 0x400
20 #define LOWER_EL_AArch32_VECTOR 0x600
21
22 enum exception_type {
23 except_type_sync = 0,
24 except_type_irq = 0x80,
25 except_type_fiq = 0x100,
26 except_type_serror = 0x180,
27 };
28
29 /*
30 * This performs the exception entry at a given EL (@target_mode), stashing PC
31 * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
32 * The EL passed to this function *must* be a non-secure, privileged mode with
33 * bit 0 being set (PSTATE.SP == 1).
34 *
35 * When an exception is taken, most PSTATE fields are left unchanged in the
36 * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
37 * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
38 * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
39 *
40 * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
41 * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
42 *
43 * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
44 * MSB to LSB.
45 */
enter_exception64(struct kvm_vcpu * vcpu,unsigned long target_mode,enum exception_type type)46 static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
47 enum exception_type type)
48 {
49 unsigned long sctlr, vbar, old, new, mode;
50 u64 exc_offset;
51
52 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
53
54 if (mode == target_mode)
55 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
56 else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
57 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
58 else if (!(mode & PSR_MODE32_BIT))
59 exc_offset = LOWER_EL_AArch64_VECTOR;
60 else
61 exc_offset = LOWER_EL_AArch32_VECTOR;
62
63 switch (target_mode) {
64 case PSR_MODE_EL1h:
65 vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
66 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
67 vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
68 break;
69 default:
70 /* Don't do that */
71 BUG();
72 }
73
74 *vcpu_pc(vcpu) = vbar + exc_offset + type;
75
76 old = *vcpu_cpsr(vcpu);
77 new = 0;
78
79 new |= (old & PSR_N_BIT);
80 new |= (old & PSR_Z_BIT);
81 new |= (old & PSR_C_BIT);
82 new |= (old & PSR_V_BIT);
83
84 // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
85
86 new |= (old & PSR_DIT_BIT);
87
88 // PSTATE.UAO is set to zero upon any exception to AArch64
89 // See ARM DDI 0487E.a, page D5-2579.
90
91 // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
92 // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
93 // See ARM DDI 0487E.a, page D5-2578.
94 new |= (old & PSR_PAN_BIT);
95 if (!(sctlr & SCTLR_EL1_SPAN))
96 new |= PSR_PAN_BIT;
97
98 // PSTATE.SS is set to zero upon any exception to AArch64
99 // See ARM DDI 0487E.a, page D2-2452.
100
101 // PSTATE.IL is set to zero upon any exception to AArch64
102 // See ARM DDI 0487E.a, page D1-2306.
103
104 // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
105 // See ARM DDI 0487E.a, page D13-3258
106 if (sctlr & SCTLR_ELx_DSSBS)
107 new |= PSR_SSBS_BIT;
108
109 // PSTATE.BTYPE is set to zero upon any exception to AArch64
110 // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
111
112 new |= PSR_D_BIT;
113 new |= PSR_A_BIT;
114 new |= PSR_I_BIT;
115 new |= PSR_F_BIT;
116
117 new |= target_mode;
118
119 *vcpu_cpsr(vcpu) = new;
120 vcpu_write_spsr(vcpu, old);
121 }
122
inject_abt64(struct kvm_vcpu * vcpu,bool is_iabt,unsigned long addr)123 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
124 {
125 unsigned long cpsr = *vcpu_cpsr(vcpu);
126 bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
127 u32 esr = 0;
128
129 enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
130
131 vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
132
133 /*
134 * Build an {i,d}abort, depending on the level and the
135 * instruction set. Report an external synchronous abort.
136 */
137 if (kvm_vcpu_trap_il_is32bit(vcpu))
138 esr |= ESR_ELx_IL;
139
140 /*
141 * Here, the guest runs in AArch64 mode when in EL1. If we get
142 * an AArch32 fault, it means we managed to trap an EL0 fault.
143 */
144 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
145 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
146 else
147 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
148
149 if (!is_iabt)
150 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
151
152 vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
153 }
154
inject_undef64(struct kvm_vcpu * vcpu)155 static void inject_undef64(struct kvm_vcpu *vcpu)
156 {
157 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
158
159 enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
160
161 /*
162 * Build an unknown exception, depending on the instruction
163 * set.
164 */
165 if (kvm_vcpu_trap_il_is32bit(vcpu))
166 esr |= ESR_ELx_IL;
167
168 vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
169 }
170
171 /**
172 * kvm_inject_dabt - inject a data abort into the guest
173 * @vcpu: The VCPU to receive the data abort
174 * @addr: The address to report in the DFAR
175 *
176 * It is assumed that this code is called from the VCPU thread and that the
177 * VCPU therefore is not currently executing guest code.
178 */
kvm_inject_dabt(struct kvm_vcpu * vcpu,unsigned long addr)179 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
180 {
181 if (vcpu_el1_is_32bit(vcpu))
182 kvm_inject_dabt32(vcpu, addr);
183 else
184 inject_abt64(vcpu, false, addr);
185 }
186
187 /**
188 * kvm_inject_pabt - inject a prefetch abort into the guest
189 * @vcpu: The VCPU to receive the prefetch abort
190 * @addr: The address to report in the DFAR
191 *
192 * It is assumed that this code is called from the VCPU thread and that the
193 * VCPU therefore is not currently executing guest code.
194 */
kvm_inject_pabt(struct kvm_vcpu * vcpu,unsigned long addr)195 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
196 {
197 if (vcpu_el1_is_32bit(vcpu))
198 kvm_inject_pabt32(vcpu, addr);
199 else
200 inject_abt64(vcpu, true, addr);
201 }
202
203 /**
204 * kvm_inject_undefined - inject an undefined instruction into the guest
205 * @vcpu: The vCPU in which to inject the exception
206 *
207 * It is assumed that this code is called from the VCPU thread and that the
208 * VCPU therefore is not currently executing guest code.
209 */
kvm_inject_undefined(struct kvm_vcpu * vcpu)210 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
211 {
212 if (vcpu_el1_is_32bit(vcpu))
213 kvm_inject_undef32(vcpu);
214 else
215 inject_undef64(vcpu);
216 }
217
kvm_set_sei_esr(struct kvm_vcpu * vcpu,u64 esr)218 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
219 {
220 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
221 *vcpu_hcr(vcpu) |= HCR_VSE;
222 }
223
224 /**
225 * kvm_inject_vabt - inject an async abort / SError into the guest
226 * @vcpu: The VCPU to receive the exception
227 *
228 * It is assumed that this code is called from the VCPU thread and that the
229 * VCPU therefore is not currently executing guest code.
230 *
231 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
232 * the remaining ISS all-zeros so that this error is not interpreted as an
233 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
234 * value, so the CPU generates an imp-def value.
235 */
kvm_inject_vabt(struct kvm_vcpu * vcpu)236 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
237 {
238 kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
239 }
240