• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/switch.h>
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/kvm_host.h>
11 #include <linux/types.h>
12 #include <linux/jump_label.h>
13 #include <linux/percpu.h>
14 #include <uapi/linux/psci.h>
15 
16 #include <kvm/arm_psci.h>
17 
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 #include <asm/thread_info.h>
29 #include <asm/vectors.h>
30 
31 /* VHE specific context */
32 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
34 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
35 
36 /*
37  * HCR_EL2 bits that the NV guest can freely change (no RES0/RES1
38  * semantics, irrespective of the configuration), but that cannot be
39  * applied to the actual HW as things would otherwise break badly.
40  *
41  * - TGE: we want the guest to use EL1, which is incompatible with
42  *   this bit being set
43  *
44  * - API/APK: they are already accounted for by vcpu_load(), and can
45  *   only take effect across a load/put cycle (such as ERET)
46  */
47 #define NV_HCR_GUEST_EXCLUDE	(HCR_TGE | HCR_API | HCR_APK)
48 
__compute_hcr(struct kvm_vcpu * vcpu)49 static u64 __compute_hcr(struct kvm_vcpu *vcpu)
50 {
51 	u64 hcr = vcpu->arch.hcr_el2;
52 
53 	if (!vcpu_has_nv(vcpu))
54 		return hcr;
55 
56 	if (is_hyp_ctxt(vcpu)) {
57 		hcr |= HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB;
58 
59 		if (!vcpu_el2_e2h_is_set(vcpu))
60 			hcr |= HCR_NV1;
61 
62 		write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2);
63 	}
64 
65 	return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE);
66 }
67 
__activate_cptr_traps(struct kvm_vcpu * vcpu)68 static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
69 {
70 	u64 cptr;
71 
72 	/*
73 	 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
74 	 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
75 	 * except for some missing controls, such as TAM.
76 	 * In this case, CPTR_EL2.TAM has the same position with or without
77 	 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
78 	 * shift value for trapping the AMU accesses.
79 	 */
80 	u64 val = CPACR_ELx_TTA | CPTR_EL2_TAM;
81 
82 	if (guest_owns_fp_regs()) {
83 		val |= CPACR_ELx_FPEN;
84 		if (vcpu_has_sve(vcpu))
85 			val |= CPACR_ELx_ZEN;
86 	} else {
87 		__activate_traps_fpsimd32(vcpu);
88 	}
89 
90 	if (!vcpu_has_nv(vcpu))
91 		goto write;
92 
93 	/*
94 	 * The architecture is a bit crap (what a surprise): an EL2 guest
95 	 * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
96 	 * as they are RES0 in the guest's view. To work around it, trap the
97 	 * sucker using the very same bit it can't set...
98 	 */
99 	if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
100 		val |= CPTR_EL2_TCPAC;
101 
102 	/*
103 	 * Layer the guest hypervisor's trap configuration on top of our own if
104 	 * we're in a nested context.
105 	 */
106 	if (is_hyp_ctxt(vcpu))
107 		goto write;
108 
109 	cptr = vcpu_sanitised_cptr_el2(vcpu);
110 
111 	/*
112 	 * Pay attention, there's some interesting detail here.
113 	 *
114 	 * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
115 	 * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
116 	 *
117 	 *  - CPTR_EL2.xEN = x0, traps are enabled
118 	 *  - CPTR_EL2.xEN = x1, traps are disabled
119 	 *
120 	 * In other words, bit[0] determines if guest accesses trap or not. In
121 	 * the interest of simplicity, clear the entire field if the guest
122 	 * hypervisor has traps enabled to dispel any illusion of something more
123 	 * complicated taking place.
124 	 */
125 	if (!(SYS_FIELD_GET(CPACR_ELx, FPEN, cptr) & BIT(0)))
126 		val &= ~CPACR_ELx_FPEN;
127 	if (!(SYS_FIELD_GET(CPACR_ELx, ZEN, cptr) & BIT(0)))
128 		val &= ~CPACR_ELx_ZEN;
129 
130 	if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
131 		val |= cptr & CPACR_ELx_E0POE;
132 
133 	val |= cptr & CPTR_EL2_TCPAC;
134 
135 write:
136 	write_sysreg(val, cpacr_el1);
137 }
138 
__activate_traps(struct kvm_vcpu * vcpu)139 static void __activate_traps(struct kvm_vcpu *vcpu)
140 {
141 	u64 val;
142 
143 	___activate_traps(vcpu, __compute_hcr(vcpu));
144 
145 	if (has_cntpoff()) {
146 		struct timer_map map;
147 
148 		get_timer_map(vcpu, &map);
149 
150 		/*
151 		 * We're entrering the guest. Reload the correct
152 		 * values from memory now that TGE is clear.
153 		 */
154 		if (map.direct_ptimer == vcpu_ptimer(vcpu))
155 			val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
156 		if (map.direct_ptimer == vcpu_hptimer(vcpu))
157 			val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
158 
159 		if (map.direct_ptimer) {
160 			write_sysreg_el0(val, SYS_CNTP_CVAL);
161 			isb();
162 		}
163 	}
164 
165 	__activate_cptr_traps(vcpu);
166 
167 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
168 }
169 NOKPROBE_SYMBOL(__activate_traps);
170 
__deactivate_traps(struct kvm_vcpu * vcpu)171 static void __deactivate_traps(struct kvm_vcpu *vcpu)
172 {
173 	const char *host_vectors = vectors;
174 
175 	___deactivate_traps(vcpu);
176 
177 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
178 
179 	if (has_cntpoff()) {
180 		struct timer_map map;
181 		u64 val, offset;
182 
183 		get_timer_map(vcpu, &map);
184 
185 		/*
186 		 * We're exiting the guest. Save the latest CVAL value
187 		 * to memory and apply the offset now that TGE is set.
188 		 */
189 		val = read_sysreg_el0(SYS_CNTP_CVAL);
190 		if (map.direct_ptimer == vcpu_ptimer(vcpu))
191 			__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
192 		if (map.direct_ptimer == vcpu_hptimer(vcpu))
193 			__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
194 
195 		offset = read_sysreg_s(SYS_CNTPOFF_EL2);
196 
197 		if (map.direct_ptimer && offset) {
198 			write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
199 			isb();
200 		}
201 	}
202 
203 	/*
204 	 * ARM errata 1165522 and 1530923 require the actual execution of the
205 	 * above before we can switch to the EL2/EL0 translation regime used by
206 	 * the host.
207 	 */
208 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
209 
210 	kvm_reset_cptr_el2(vcpu);
211 
212 	if (!arm64_kernel_unmapped_at_el0())
213 		host_vectors = __this_cpu_read(this_cpu_vector);
214 	write_sysreg(host_vectors, vbar_el1);
215 }
216 NOKPROBE_SYMBOL(__deactivate_traps);
217 
218 /*
219  * Disable IRQs in __vcpu_{load,put}_{activate,deactivate}_traps() to
220  * prevent a race condition between context switching of PMUSERENR_EL0
221  * in __{activate,deactivate}_traps_common() and IPIs that attempts to
222  * update PMUSERENR_EL0. See also kvm_set_pmuserenr().
223  */
__vcpu_load_activate_traps(struct kvm_vcpu * vcpu)224 static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu)
225 {
226 	unsigned long flags;
227 
228 	local_irq_save(flags);
229 	__activate_traps_common(vcpu);
230 	__activate_traps_hcrx(vcpu);
231 	__activate_traps_hfgxtr(vcpu);
232 	local_irq_restore(flags);
233 }
234 
__vcpu_put_deactivate_traps(struct kvm_vcpu * vcpu)235 static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
236 {
237 	unsigned long flags;
238 
239 	local_irq_save(flags);
240 	__deactivate_traps_common(vcpu);
241 	__deactivate_traps_hfgxtr(vcpu);
242 	local_irq_restore(flags);
243 }
244 
kvm_vcpu_load_vhe(struct kvm_vcpu * vcpu)245 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
246 {
247 	host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu;
248 
249 	__vcpu_load_switch_sysregs(vcpu);
250 	__vcpu_load_activate_traps(vcpu);
251 	__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
252 }
253 
kvm_vcpu_put_vhe(struct kvm_vcpu * vcpu)254 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
255 {
256 	__vcpu_put_deactivate_traps(vcpu);
257 	__vcpu_put_switch_sysregs(vcpu);
258 
259 	host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL;
260 }
261 
kvm_hyp_handle_eret(struct kvm_vcpu * vcpu,u64 * exit_code)262 static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
263 {
264 	u64 esr = kvm_vcpu_get_esr(vcpu);
265 	u64 spsr, elr, mode;
266 
267 	/*
268 	 * Going through the whole put/load motions is a waste of time
269 	 * if this is a VHE guest hypervisor returning to its own
270 	 * userspace, or the hypervisor performing a local exception
271 	 * return. No need to save/restore registers, no need to
272 	 * switch S2 MMU. Just do the canonical ERET.
273 	 *
274 	 * Unless the trap has to be forwarded further down the line,
275 	 * of course...
276 	 */
277 	if ((__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV) ||
278 	    (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_ERET))
279 		return false;
280 
281 	spsr = read_sysreg_el1(SYS_SPSR);
282 	mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
283 
284 	switch (mode) {
285 	case PSR_MODE_EL0t:
286 		if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
287 			return false;
288 		break;
289 	case PSR_MODE_EL2t:
290 		mode = PSR_MODE_EL1t;
291 		break;
292 	case PSR_MODE_EL2h:
293 		mode = PSR_MODE_EL1h;
294 		break;
295 	default:
296 		return false;
297 	}
298 
299 	/* If ERETAx fails, take the slow path */
300 	if (esr_iss_is_eretax(esr)) {
301 		if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
302 			return false;
303 	} else {
304 		elr = read_sysreg_el1(SYS_ELR);
305 	}
306 
307 	spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
308 
309 	write_sysreg_el2(spsr, SYS_SPSR);
310 	write_sysreg_el2(elr, SYS_ELR);
311 
312 	return true;
313 }
314 
kvm_hyp_handle_tlbi_el2(struct kvm_vcpu * vcpu,u64 * exit_code)315 static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
316 {
317 	int ret = -EINVAL;
318 	u32 instr;
319 	u64 val;
320 
321 	/*
322 	 * Ideally, we would never trap on EL2 S1 TLB invalidations using
323 	 * the EL1 instructions when the guest's HCR_EL2.{E2H,TGE}=={1,1}.
324 	 * But "thanks" to FEAT_NV2, we don't trap writes to HCR_EL2,
325 	 * meaning that we can't track changes to the virtual TGE bit. So we
326 	 * have to leave HCR_EL2.TTLB set on the host. Oopsie...
327 	 *
328 	 * Try and handle these invalidation as quickly as possible, without
329 	 * fully exiting. Note that we don't need to consider any forwarding
330 	 * here, as having E2H+TGE set is the very definition of being
331 	 * InHost.
332 	 *
333 	 * For the lesser hypervisors out there that have failed to get on
334 	 * with the VHE program, we can also handle the nVHE style of EL2
335 	 * invalidation.
336 	 */
337 	if (!(is_hyp_ctxt(vcpu)))
338 		return false;
339 
340 	instr = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
341 	val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
342 
343 	if ((kvm_supported_tlbi_s1e1_op(vcpu, instr) &&
344 	     vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) ||
345 	    kvm_supported_tlbi_s1e2_op (vcpu, instr))
346 		ret = __kvm_tlbi_s1e2(NULL, val, instr);
347 
348 	if (ret)
349 		return false;
350 
351 	__kvm_skip_instr(vcpu);
352 
353 	return true;
354 }
355 
kvm_hyp_handle_cpacr_el1(struct kvm_vcpu * vcpu,u64 * exit_code)356 static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code)
357 {
358 	u64 esr = kvm_vcpu_get_esr(vcpu);
359 	int rt;
360 
361 	if (!is_hyp_ctxt(vcpu) || esr_sys64_to_sysreg(esr) != SYS_CPACR_EL1)
362 		return false;
363 
364 	rt = kvm_vcpu_sys_get_rt(vcpu);
365 
366 	if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ) {
367 		vcpu_set_reg(vcpu, rt, __vcpu_sys_reg(vcpu, CPTR_EL2));
368 	} else {
369 		vcpu_write_sys_reg(vcpu, vcpu_get_reg(vcpu, rt), CPTR_EL2);
370 		__activate_cptr_traps(vcpu);
371 	}
372 
373 	__kvm_skip_instr(vcpu);
374 
375 	return true;
376 }
377 
kvm_hyp_handle_zcr_el2(struct kvm_vcpu * vcpu,u64 * exit_code)378 static bool kvm_hyp_handle_zcr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
379 {
380 	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
381 
382 	if (!vcpu_has_nv(vcpu))
383 		return false;
384 
385 	if (sysreg != SYS_ZCR_EL2)
386 		return false;
387 
388 	if (guest_owns_fp_regs())
389 		return false;
390 
391 	/*
392 	 * ZCR_EL2 traps are handled in the slow path, with the expectation
393 	 * that the guest's FP context has already been loaded onto the CPU.
394 	 *
395 	 * Load the guest's FP context and unconditionally forward to the
396 	 * slow path for handling (i.e. return false).
397 	 */
398 	kvm_hyp_handle_fpsimd(vcpu, exit_code);
399 	return false;
400 }
401 
kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu * vcpu,u64 * exit_code)402 static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
403 {
404 	if (kvm_hyp_handle_tlbi_el2(vcpu, exit_code))
405 		return true;
406 
407 	if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
408 		return true;
409 
410 	if (kvm_hyp_handle_zcr_el2(vcpu, exit_code))
411 		return true;
412 
413 	return kvm_hyp_handle_sysreg(vcpu, exit_code);
414 }
415 
416 static const exit_handler_fn hyp_exit_handlers[] = {
417 	[0 ... ESR_ELx_EC_MAX]		= NULL,
418 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
419 	[ESR_ELx_EC_SYS64]		= kvm_hyp_handle_sysreg_vhe,
420 	[ESR_ELx_EC_SVE]		= kvm_hyp_handle_fpsimd,
421 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
422 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
423 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
424 	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
425 	[ESR_ELx_EC_ERET]		= kvm_hyp_handle_eret,
426 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
427 };
428 
fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code)429 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
430 {
431 	synchronize_vcpu_pstate(vcpu, exit_code);
432 
433 	/*
434 	 * If we were in HYP context on entry, adjust the PSTATE view
435 	 * so that the usual helpers work correctly.
436 	 */
437 	if (vcpu_has_nv(vcpu) && (read_sysreg(hcr_el2) & HCR_NV)) {
438 		u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
439 
440 		switch (mode) {
441 		case PSR_MODE_EL1t:
442 			mode = PSR_MODE_EL2t;
443 			break;
444 		case PSR_MODE_EL1h:
445 			mode = PSR_MODE_EL2h;
446 			break;
447 		}
448 
449 		*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
450 		*vcpu_cpsr(vcpu) |= mode;
451 	}
452 
453 	return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
454 }
455 
456 /* Switch to the guest for VHE systems running in EL2 */
__kvm_vcpu_run_vhe(struct kvm_vcpu * vcpu)457 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
458 {
459 	struct kvm_cpu_context *host_ctxt;
460 	struct kvm_cpu_context *guest_ctxt;
461 	u64 exit_code;
462 
463 	host_ctxt = host_data_ptr(host_ctxt);
464 	guest_ctxt = &vcpu->arch.ctxt;
465 
466 	sysreg_save_host_state_vhe(host_ctxt);
467 
468 	fpsimd_lazy_switch_to_guest(vcpu);
469 
470 	/*
471 	 * Note that ARM erratum 1165522 requires us to configure both stage 1
472 	 * and stage 2 translation for the guest context before we clear
473 	 * HCR_EL2.TGE. The stage 1 and stage 2 guest context has already been
474 	 * loaded on the CPU in kvm_vcpu_load_vhe().
475 	 */
476 	__activate_traps(vcpu);
477 
478 	__kvm_adjust_pc(vcpu);
479 
480 	sysreg_restore_guest_state_vhe(guest_ctxt);
481 	__debug_switch_to_guest(vcpu);
482 
483 	do {
484 		/* Jump in the fire! */
485 		exit_code = __guest_enter(vcpu);
486 
487 		/* And we're baaack! */
488 	} while (fixup_guest_exit(vcpu, &exit_code));
489 
490 	sysreg_save_guest_state_vhe(guest_ctxt);
491 
492 	__deactivate_traps(vcpu);
493 
494 	fpsimd_lazy_switch_to_host(vcpu);
495 
496 	sysreg_restore_host_state_vhe(host_ctxt);
497 
498 	if (guest_owns_fp_regs())
499 		__fpsimd_save_fpexc32(vcpu);
500 
501 	__debug_switch_to_host(vcpu);
502 
503 	return exit_code;
504 }
505 NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
506 
__kvm_vcpu_run(struct kvm_vcpu * vcpu)507 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
508 {
509 	int ret;
510 
511 	local_daif_mask();
512 
513 	/*
514 	 * Having IRQs masked via PMR when entering the guest means the GIC
515 	 * will not signal the CPU of interrupts of lower priority, and the
516 	 * only way to get out will be via guest exceptions.
517 	 * Naturally, we want to avoid this.
518 	 *
519 	 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
520 	 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
521 	 */
522 	pmr_sync();
523 
524 	ret = __kvm_vcpu_run_vhe(vcpu);
525 
526 	/*
527 	 * local_daif_restore() takes care to properly restore PSTATE.DAIF
528 	 * and the GIC PMR if the host is using IRQ priorities.
529 	 */
530 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
531 
532 	/*
533 	 * When we exit from the guest we change a number of CPU configuration
534 	 * parameters, such as traps.  We rely on the isb() in kvm_call_hyp*()
535 	 * to make sure these changes take effect before running the host or
536 	 * additional guests.
537 	 */
538 	return ret;
539 }
540 
__hyp_call_panic(u64 spsr,u64 elr,u64 par)541 static void __noreturn __hyp_call_panic(u64 spsr, u64 elr, u64 par)
542 {
543 	struct kvm_cpu_context *host_ctxt;
544 	struct kvm_vcpu *vcpu;
545 
546 	host_ctxt = host_data_ptr(host_ctxt);
547 	vcpu = host_ctxt->__hyp_running_vcpu;
548 
549 	__deactivate_traps(vcpu);
550 	sysreg_restore_host_state_vhe(host_ctxt);
551 
552 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n",
553 	      spsr, elr,
554 	      read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
555 	      read_sysreg(hpfar_el2), par, vcpu);
556 }
557 NOKPROBE_SYMBOL(__hyp_call_panic);
558 
hyp_panic(void)559 void __noreturn hyp_panic(void)
560 {
561 	u64 spsr = read_sysreg_el2(SYS_SPSR);
562 	u64 elr = read_sysreg_el2(SYS_ELR);
563 	u64 par = read_sysreg_par();
564 
565 	__hyp_call_panic(spsr, elr, par);
566 }
567 
kvm_unexpected_el2_exception(void)568 asmlinkage void kvm_unexpected_el2_exception(void)
569 {
570 	__kvm_unexpected_el2_exception();
571 }
572