• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Fault injection for both 32 and 64bit guests.
3  *
4  * Copyright (C) 2012,2013 - ARM Ltd
5  * Author: Marc Zyngier <marc.zyngier@arm.com>
6  *
7  * Based on arch/arm/kvm/emulate.c
8  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/esr.h>
27 
28 #define CURRENT_EL_SP_EL0_VECTOR	0x0
29 #define CURRENT_EL_SP_ELx_VECTOR	0x200
30 #define LOWER_EL_AArch64_VECTOR		0x400
31 #define LOWER_EL_AArch32_VECTOR		0x600
32 
33 enum exception_type {
34 	except_type_sync	= 0,
35 	except_type_irq		= 0x80,
36 	except_type_fiq		= 0x100,
37 	except_type_serror	= 0x180,
38 };
39 
get_except_vector(struct kvm_vcpu * vcpu,enum exception_type type)40 static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
41 {
42 	u64 exc_offset;
43 
44 	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
45 	case PSR_MODE_EL1t:
46 		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
47 		break;
48 	case PSR_MODE_EL1h:
49 		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
50 		break;
51 	case PSR_MODE_EL0t:
52 		exc_offset = LOWER_EL_AArch64_VECTOR;
53 		break;
54 	default:
55 		exc_offset = LOWER_EL_AArch32_VECTOR;
56 	}
57 
58 	return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
59 }
60 
61 /*
62  * When an exception is taken, most PSTATE fields are left unchanged in the
63  * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
64  * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
65  * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
66  *
67  * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
68  * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
69  *
70  * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
71  * MSB to LSB.
72  */
get_except64_pstate(struct kvm_vcpu * vcpu)73 static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
74 {
75 	unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
76 	unsigned long old, new;
77 
78 	old = *vcpu_cpsr(vcpu);
79 	new = 0;
80 
81 	new |= (old & PSR_N_BIT);
82 	new |= (old & PSR_Z_BIT);
83 	new |= (old & PSR_C_BIT);
84 	new |= (old & PSR_V_BIT);
85 
86 	// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
87 
88 	new |= (old & PSR_DIT_BIT);
89 
90 	// PSTATE.UAO is set to zero upon any exception to AArch64
91 	// See ARM DDI 0487E.a, page D5-2579.
92 
93 	// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
94 	// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
95 	// See ARM DDI 0487E.a, page D5-2578.
96 	new |= (old & PSR_PAN_BIT);
97 	if (!(sctlr & SCTLR_EL1_SPAN))
98 		new |= PSR_PAN_BIT;
99 
100 	// PSTATE.SS is set to zero upon any exception to AArch64
101 	// See ARM DDI 0487E.a, page D2-2452.
102 
103 	// PSTATE.IL is set to zero upon any exception to AArch64
104 	// See ARM DDI 0487E.a, page D1-2306.
105 
106 	// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
107 	// See ARM DDI 0487E.a, page D13-3258
108 	if (sctlr & SCTLR_ELx_DSSBS)
109 		new |= PSR_SSBS_BIT;
110 
111 	// PSTATE.BTYPE is set to zero upon any exception to AArch64
112 	// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
113 
114 	new |= PSR_D_BIT;
115 	new |= PSR_A_BIT;
116 	new |= PSR_I_BIT;
117 	new |= PSR_F_BIT;
118 
119 	new |= PSR_MODE_EL1h;
120 
121 	return new;
122 }
123 
inject_abt64(struct kvm_vcpu * vcpu,bool is_iabt,unsigned long addr)124 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
125 {
126 	unsigned long cpsr = *vcpu_cpsr(vcpu);
127 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
128 	u32 esr = 0;
129 
130 	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
131 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
132 
133 	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
134 	vcpu_write_spsr(vcpu, cpsr);
135 
136 	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
137 
138 	/*
139 	 * Build an {i,d}abort, depending on the level and the
140 	 * instruction set. Report an external synchronous abort.
141 	 */
142 	if (kvm_vcpu_trap_il_is32bit(vcpu))
143 		esr |= ESR_ELx_IL;
144 
145 	/*
146 	 * Here, the guest runs in AArch64 mode when in EL1. If we get
147 	 * an AArch32 fault, it means we managed to trap an EL0 fault.
148 	 */
149 	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
150 		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
151 	else
152 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
153 
154 	if (!is_iabt)
155 		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
156 
157 	vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
158 }
159 
inject_undef64(struct kvm_vcpu * vcpu)160 static void inject_undef64(struct kvm_vcpu *vcpu)
161 {
162 	unsigned long cpsr = *vcpu_cpsr(vcpu);
163 	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
164 
165 	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
166 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
167 
168 	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
169 	vcpu_write_spsr(vcpu, cpsr);
170 
171 	/*
172 	 * Build an unknown exception, depending on the instruction
173 	 * set.
174 	 */
175 	if (kvm_vcpu_trap_il_is32bit(vcpu))
176 		esr |= ESR_ELx_IL;
177 
178 	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
179 }
180 
181 /**
182  * kvm_inject_dabt - inject a data abort into the guest
183  * @vcpu: The VCPU to receive the undefined exception
184  * @addr: The address to report in the DFAR
185  *
186  * It is assumed that this code is called from the VCPU thread and that the
187  * VCPU therefore is not currently executing guest code.
188  */
kvm_inject_dabt(struct kvm_vcpu * vcpu,unsigned long addr)189 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
190 {
191 	if (vcpu_el1_is_32bit(vcpu))
192 		kvm_inject_dabt32(vcpu, addr);
193 	else
194 		inject_abt64(vcpu, false, addr);
195 }
196 
197 /**
198  * kvm_inject_pabt - inject a prefetch abort into the guest
199  * @vcpu: The VCPU to receive the undefined exception
200  * @addr: The address to report in the DFAR
201  *
202  * It is assumed that this code is called from the VCPU thread and that the
203  * VCPU therefore is not currently executing guest code.
204  */
kvm_inject_pabt(struct kvm_vcpu * vcpu,unsigned long addr)205 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
206 {
207 	if (vcpu_el1_is_32bit(vcpu))
208 		kvm_inject_pabt32(vcpu, addr);
209 	else
210 		inject_abt64(vcpu, true, addr);
211 }
212 
213 /**
214  * kvm_inject_undefined - inject an undefined instruction into the guest
215  *
216  * It is assumed that this code is called from the VCPU thread and that the
217  * VCPU therefore is not currently executing guest code.
218  */
kvm_inject_undefined(struct kvm_vcpu * vcpu)219 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
220 {
221 	if (vcpu_el1_is_32bit(vcpu))
222 		kvm_inject_undef32(vcpu);
223 	else
224 		inject_undef64(vcpu);
225 }
226 
kvm_set_sei_esr(struct kvm_vcpu * vcpu,u64 esr)227 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
228 {
229 	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
230 	*vcpu_hcr(vcpu) |= HCR_VSE;
231 }
232 
233 /**
234  * kvm_inject_vabt - inject an async abort / SError into the guest
235  * @vcpu: The VCPU to receive the exception
236  *
237  * It is assumed that this code is called from the VCPU thread and that the
238  * VCPU therefore is not currently executing guest code.
239  *
240  * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
241  * the remaining ISS all-zeros so that this error is not interpreted as an
242  * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
243  * value, so the CPU generates an imp-def value.
244  */
kvm_inject_vabt(struct kvm_vcpu * vcpu)245 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
246 {
247 	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
248 }
249