• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/types.h>
19 #include <linux/jump_label.h>
20 #include <uapi/linux/psci.h>
21 
22 #include <kvm/arm_psci.h>
23 
24 #include <asm/kvm_asm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 
__fpsimd_enabled_nvhe(void)28 static bool __hyp_text __fpsimd_enabled_nvhe(void)
29 {
30 	return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
31 }
32 
__fpsimd_enabled_vhe(void)33 static bool __hyp_text __fpsimd_enabled_vhe(void)
34 {
35 	return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
36 }
37 
38 static hyp_alternate_select(__fpsimd_is_enabled,
39 			    __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
40 			    ARM64_HAS_VIRT_HOST_EXTN);
41 
__fpsimd_enabled(void)42 bool __hyp_text __fpsimd_enabled(void)
43 {
44 	return __fpsimd_is_enabled()();
45 }
46 
__activate_traps_vhe(void)47 static void __hyp_text __activate_traps_vhe(void)
48 {
49 	u64 val;
50 
51 	val = read_sysreg(cpacr_el1);
52 	val |= CPACR_EL1_TTA;
53 	val &= ~CPACR_EL1_FPEN;
54 	write_sysreg(val, cpacr_el1);
55 
56 	write_sysreg(kvm_get_hyp_vector(), vbar_el1);
57 }
58 
__activate_traps_nvhe(void)59 static void __hyp_text __activate_traps_nvhe(void)
60 {
61 	u64 val;
62 
63 	val = CPTR_EL2_DEFAULT;
64 	val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
65 	write_sysreg(val, cptr_el2);
66 }
67 
68 static hyp_alternate_select(__activate_traps_arch,
69 			    __activate_traps_nvhe, __activate_traps_vhe,
70 			    ARM64_HAS_VIRT_HOST_EXTN);
71 
__activate_traps(struct kvm_vcpu * vcpu)72 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
73 {
74 	u64 val;
75 
76 	/*
77 	 * We are about to set CPTR_EL2.TFP to trap all floating point
78 	 * register accesses to EL2, however, the ARM ARM clearly states that
79 	 * traps are only taken to EL2 if the operation would not otherwise
80 	 * trap to EL1.  Therefore, always make sure that for 32-bit guests,
81 	 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
82 	 */
83 	val = vcpu->arch.hcr_el2;
84 	if (!(val & HCR_RW)) {
85 		write_sysreg(1 << 30, fpexc32_el2);
86 		isb();
87 	}
88 	write_sysreg(val, hcr_el2);
89 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
90 	write_sysreg(1 << 15, hstr_el2);
91 	/*
92 	 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
93 	 * PMSELR_EL0 to make sure it never contains the cycle
94 	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
95 	 * EL1 instead of being trapped to EL2.
96 	 */
97 	write_sysreg(0, pmselr_el0);
98 	write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
99 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
100 	__activate_traps_arch()();
101 }
102 
__deactivate_traps_vhe(void)103 static void __hyp_text __deactivate_traps_vhe(void)
104 {
105 	extern char vectors[];	/* kernel exception vectors */
106 
107 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
108 	write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
109 	write_sysreg(vectors, vbar_el1);
110 }
111 
__deactivate_traps_nvhe(void)112 static void __hyp_text __deactivate_traps_nvhe(void)
113 {
114 	write_sysreg(HCR_RW, hcr_el2);
115 	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
116 }
117 
118 static hyp_alternate_select(__deactivate_traps_arch,
119 			    __deactivate_traps_nvhe, __deactivate_traps_vhe,
120 			    ARM64_HAS_VIRT_HOST_EXTN);
121 
__deactivate_traps(struct kvm_vcpu * vcpu)122 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
123 {
124 	/*
125 	 * If we pended a virtual abort, preserve it until it gets
126 	 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
127 	 * the crucial bit is "On taking a vSError interrupt,
128 	 * HCR_EL2.VSE is cleared to 0."
129 	 */
130 	if (vcpu->arch.hcr_el2 & HCR_VSE)
131 		vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
132 
133 	__deactivate_traps_arch()();
134 	write_sysreg(0, hstr_el2);
135 	write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
136 	write_sysreg(0, pmuserenr_el0);
137 }
138 
__activate_vm(struct kvm_vcpu * vcpu)139 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
140 {
141 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
142 	write_sysreg(kvm->arch.vttbr, vttbr_el2);
143 }
144 
__deactivate_vm(struct kvm_vcpu * vcpu)145 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
146 {
147 	write_sysreg(0, vttbr_el2);
148 }
149 
__vgic_save_state(struct kvm_vcpu * vcpu)150 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
151 {
152 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
153 		__vgic_v3_save_state(vcpu);
154 	else
155 		__vgic_v2_save_state(vcpu);
156 
157 	write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
158 }
159 
__vgic_restore_state(struct kvm_vcpu * vcpu)160 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
161 {
162 	u64 val;
163 
164 	val = read_sysreg(hcr_el2);
165 	val |= 	HCR_INT_OVERRIDE;
166 	val |= vcpu->arch.irq_lines;
167 	write_sysreg(val, hcr_el2);
168 
169 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
170 		__vgic_v3_restore_state(vcpu);
171 	else
172 		__vgic_v2_restore_state(vcpu);
173 }
174 
__true_value(void)175 static bool __hyp_text __true_value(void)
176 {
177 	return true;
178 }
179 
__false_value(void)180 static bool __hyp_text __false_value(void)
181 {
182 	return false;
183 }
184 
185 static hyp_alternate_select(__check_arm_834220,
186 			    __false_value, __true_value,
187 			    ARM64_WORKAROUND_834220);
188 
__translate_far_to_hpfar(u64 far,u64 * hpfar)189 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
190 {
191 	u64 par, tmp;
192 
193 	/*
194 	 * Resolve the IPA the hard way using the guest VA.
195 	 *
196 	 * Stage-1 translation already validated the memory access
197 	 * rights. As such, we can use the EL1 translation regime, and
198 	 * don't have to distinguish between EL0 and EL1 access.
199 	 *
200 	 * We do need to save/restore PAR_EL1 though, as we haven't
201 	 * saved the guest context yet, and we may return early...
202 	 */
203 	par = read_sysreg(par_el1);
204 	asm volatile("at s1e1r, %0" : : "r" (far));
205 	isb();
206 
207 	tmp = read_sysreg(par_el1);
208 	write_sysreg(par, par_el1);
209 
210 	if (unlikely(tmp & 1))
211 		return false; /* Translation failed, back to guest */
212 
213 	/* Convert PAR to HPFAR format */
214 	*hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
215 	return true;
216 }
217 
__populate_fault_info(struct kvm_vcpu * vcpu)218 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
219 {
220 	u64 esr = read_sysreg_el2(esr);
221 	u8 ec = ESR_ELx_EC(esr);
222 	u64 hpfar, far;
223 
224 	vcpu->arch.fault.esr_el2 = esr;
225 
226 	if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
227 		return true;
228 
229 	far = read_sysreg_el2(far);
230 
231 	/*
232 	 * The HPFAR can be invalid if the stage 2 fault did not
233 	 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
234 	 * bit is clear) and one of the two following cases are true:
235 	 *   1. The fault was due to a permission fault
236 	 *   2. The processor carries errata 834220
237 	 *
238 	 * Therefore, for all non S1PTW faults where we either have a
239 	 * permission fault or the errata workaround is enabled, we
240 	 * resolve the IPA using the AT instruction.
241 	 */
242 	if (!(esr & ESR_ELx_S1PTW) &&
243 	    (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
244 		if (!__translate_far_to_hpfar(far, &hpfar))
245 			return false;
246 	} else {
247 		hpfar = read_sysreg(hpfar_el2);
248 	}
249 
250 	vcpu->arch.fault.far_el2 = far;
251 	vcpu->arch.fault.hpfar_el2 = hpfar;
252 	return true;
253 }
254 
__skip_instr(struct kvm_vcpu * vcpu)255 static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
256 {
257 	*vcpu_pc(vcpu) = read_sysreg_el2(elr);
258 
259 	if (vcpu_mode_is_32bit(vcpu)) {
260 		vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
261 		kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
262 		write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
263 	} else {
264 		*vcpu_pc(vcpu) += 4;
265 	}
266 
267 	write_sysreg_el2(*vcpu_pc(vcpu), elr);
268 }
269 
__kvm_vcpu_run(struct kvm_vcpu * vcpu)270 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
271 {
272 	struct kvm_cpu_context *host_ctxt;
273 	struct kvm_cpu_context *guest_ctxt;
274 	bool fp_enabled;
275 	u64 exit_code;
276 
277 	vcpu = kern_hyp_va(vcpu);
278 	write_sysreg(vcpu, tpidr_el2);
279 
280 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
281 	guest_ctxt = &vcpu->arch.ctxt;
282 
283 	__sysreg_save_host_state(host_ctxt);
284 	__debug_cond_save_host_state(vcpu);
285 
286 	__activate_traps(vcpu);
287 	__activate_vm(vcpu);
288 
289 	__vgic_restore_state(vcpu);
290 	__timer_restore_state(vcpu);
291 
292 	/*
293 	 * We must restore the 32-bit state before the sysregs, thanks
294 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
295 	 */
296 	__sysreg32_restore_state(vcpu);
297 	__sysreg_restore_guest_state(guest_ctxt);
298 	__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
299 
300 	/* Jump in the fire! */
301 again:
302 	exit_code = __guest_enter(vcpu, host_ctxt);
303 	/* And we're baaack! */
304 
305 	/*
306 	 * We're using the raw exception code in order to only process
307 	 * the trap if no SError is pending. We will come back to the
308 	 * same PC once the SError has been injected, and replay the
309 	 * trapping instruction.
310 	 */
311 	if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
312 		goto again;
313 
314 	if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
315 	    exit_code == ARM_EXCEPTION_TRAP) {
316 		bool valid;
317 
318 		valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
319 			kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
320 			kvm_vcpu_dabt_isvalid(vcpu) &&
321 			!kvm_vcpu_dabt_isextabt(vcpu) &&
322 			!kvm_vcpu_dabt_iss1tw(vcpu);
323 
324 		if (valid) {
325 			int ret = __vgic_v2_perform_cpuif_access(vcpu);
326 
327 			if (ret == 1) {
328 				__skip_instr(vcpu);
329 				goto again;
330 			}
331 
332 			if (ret == -1) {
333 				/* Promote an illegal access to an SError */
334 				__skip_instr(vcpu);
335 				exit_code = ARM_EXCEPTION_EL1_SERROR;
336 			}
337 
338 			/* 0 falls through to be handler out of EL2 */
339 		}
340 	}
341 
342 	fp_enabled = __fpsimd_enabled();
343 
344 	__sysreg_save_guest_state(guest_ctxt);
345 	__sysreg32_save_state(vcpu);
346 	__timer_save_state(vcpu);
347 	__vgic_save_state(vcpu);
348 
349 	__deactivate_traps(vcpu);
350 	__deactivate_vm(vcpu);
351 
352 	__sysreg_restore_host_state(host_ctxt);
353 
354 	if (fp_enabled) {
355 		__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
356 		__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
357 	}
358 
359 	__debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
360 	__debug_cond_restore_host_state(vcpu);
361 
362 	return exit_code;
363 }
364 
365 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
366 
__hyp_call_panic_nvhe(u64 spsr,u64 elr,u64 par)367 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
368 {
369 	unsigned long str_va;
370 
371 	/*
372 	 * Force the panic string to be loaded from the literal pool,
373 	 * making sure it is a kernel address and not a PC-relative
374 	 * reference.
375 	 */
376 	asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
377 
378 	__hyp_do_panic(str_va,
379 		       spsr,  elr,
380 		       read_sysreg(esr_el2),   read_sysreg_el2(far),
381 		       read_sysreg(hpfar_el2), par,
382 		       (void *)read_sysreg(tpidr_el2));
383 }
384 
__hyp_call_panic_vhe(u64 spsr,u64 elr,u64 par)385 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
386 {
387 	panic(__hyp_panic_string,
388 	      spsr,  elr,
389 	      read_sysreg_el2(esr),   read_sysreg_el2(far),
390 	      read_sysreg(hpfar_el2), par,
391 	      (void *)read_sysreg(tpidr_el2));
392 }
393 
394 static hyp_alternate_select(__hyp_call_panic,
395 			    __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
396 			    ARM64_HAS_VIRT_HOST_EXTN);
397 
__hyp_panic(void)398 void __hyp_text __noreturn __hyp_panic(void)
399 {
400 	u64 spsr = read_sysreg_el2(spsr);
401 	u64 elr = read_sysreg_el2(elr);
402 	u64 par = read_sysreg(par_el1);
403 
404 	if (read_sysreg(vttbr_el2)) {
405 		struct kvm_vcpu *vcpu;
406 		struct kvm_cpu_context *host_ctxt;
407 
408 		vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
409 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
410 		__timer_save_state(vcpu);
411 		__deactivate_traps(vcpu);
412 		__deactivate_vm(vcpu);
413 		__sysreg_restore_host_state(host_ctxt);
414 	}
415 
416 	/* Call panic for real */
417 	__hyp_call_panic()(spsr, elr, par);
418 
419 	unreachable();
420 }
421