• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/handle_exit.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_mmu.h>
19 #include <asm/kvm_nested.h>
20 #include <asm/kvm_pkvm.h>
21 #include <asm/debug-monitors.h>
22 #include <asm/stacktrace/nvhe.h>
23 #include <asm/traps.h>
24 
25 #include <kvm/arm_hypercalls.h>
26 
27 #define CREATE_TRACE_POINTS
28 #include "trace_handle_exit.h"
29 
30 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
31 
kvm_handle_guest_serror(struct kvm_vcpu * vcpu,u64 esr)32 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
33 {
34 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
35 		kvm_inject_vabt(vcpu);
36 }
37 
handle_hvc(struct kvm_vcpu * vcpu)38 static int handle_hvc(struct kvm_vcpu *vcpu)
39 {
40 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 			    kvm_vcpu_hvc_get_imm(vcpu));
42 	vcpu->stat.hvc_exit_stat++;
43 
44 	/* Forward hvc instructions to the virtual EL2 if the guest has EL2. */
45 	if (vcpu_has_nv(vcpu)) {
46 		if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
47 			kvm_inject_undefined(vcpu);
48 		else
49 			kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
50 
51 		return 1;
52 	}
53 
54 	return kvm_smccc_call_handler(vcpu);
55 }
56 
handle_smc(struct kvm_vcpu * vcpu)57 static int handle_smc(struct kvm_vcpu *vcpu)
58 {
59 	/*
60 	 * Forward this trapped smc instruction to the virtual EL2 if
61 	 * the guest has asked for it.
62 	 */
63 	if (forward_smc_trap(vcpu))
64 		return 1;
65 
66 	/*
67 	 * "If an SMC instruction executed at Non-secure EL1 is
68 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
69 	 * Trap exception, not a Secure Monitor Call exception [...]"
70 	 *
71 	 * We need to advance the PC after the trap, as it would
72 	 * otherwise return to the same address. Furthermore, pre-incrementing
73 	 * the PC before potentially exiting to userspace maintains the same
74 	 * abstraction for both SMCs and HVCs.
75 	 */
76 	kvm_incr_pc(vcpu);
77 
78 	/*
79 	 * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9
80 	 * "SMC and HVC immediate value".
81 	 */
82 	if (kvm_vcpu_hvc_get_imm(vcpu)) {
83 		vcpu_set_reg(vcpu, 0, ~0UL);
84 		return 1;
85 	}
86 
87 	/*
88 	 * If imm is zero then it is likely an SMCCC call.
89 	 *
90 	 * Note that on ARMv8.3, even if EL3 is not implemented, SMC executed
91 	 * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
92 	 * being treated as UNDEFINED.
93 	 */
94 	return kvm_smccc_call_handler(vcpu);
95 }
96 
97 /*
98  * This handles the cases where the system does not support FP/ASIMD or when
99  * we are running nested virtualization and the guest hypervisor is trapping
100  * FP/ASIMD accesses by its guest guest.
101  *
102  * All other handling of guest vs. host FP/ASIMD register state is handled in
103  * fixup_guest_exit().
104  */
kvm_handle_fpasimd(struct kvm_vcpu * vcpu)105 static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
106 {
107 	if (guest_hyp_fpsimd_traps_enabled(vcpu))
108 		return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
109 
110 	/* This is the case when the system doesn't support FP/ASIMD. */
111 	kvm_inject_undefined(vcpu);
112 	return 1;
113 }
114 
115 /**
116  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
117  *		    instruction executed by a guest
118  *
119  * @vcpu:	the vcpu pointer
120  *
121  * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
122  * decides to.
123  * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
124  * world-switches and schedule other host processes until there is an
125  * incoming IRQ or FIQ to the VM.
126  * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
127  *
128  * WF{I,E}T can immediately return if the deadline has already expired.
129  */
kvm_handle_wfx(struct kvm_vcpu * vcpu)130 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
131 {
132 	u64 esr = kvm_vcpu_get_esr(vcpu);
133 
134 	if (esr & ESR_ELx_WFx_ISS_WFE) {
135 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
136 		vcpu->stat.wfe_exit_stat++;
137 	} else {
138 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
139 		vcpu->stat.wfi_exit_stat++;
140 	}
141 
142 	if (esr & ESR_ELx_WFx_ISS_WFxT) {
143 		if (esr & ESR_ELx_WFx_ISS_RV) {
144 			u64 val, now;
145 
146 			now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
147 			val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
148 
149 			if (now >= val)
150 				goto out;
151 		} else {
152 			/* Treat WFxT as WFx if RN is invalid */
153 			esr &= ~ESR_ELx_WFx_ISS_WFxT;
154 		}
155 	}
156 
157 	if (esr & ESR_ELx_WFx_ISS_WFE) {
158 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
159 	} else {
160 		if (esr & ESR_ELx_WFx_ISS_WFxT)
161 			vcpu_set_flag(vcpu, IN_WFIT);
162 
163 		kvm_vcpu_wfi(vcpu);
164 	}
165 out:
166 	kvm_incr_pc(vcpu);
167 
168 	return 1;
169 }
170 
171 /**
172  * kvm_handle_guest_debug - handle a debug exception instruction
173  *
174  * @vcpu:	the vcpu pointer
175  *
176  * We route all debug exceptions through the same handler. If both the
177  * guest and host are using the same debug facilities it will be up to
178  * userspace to re-inject the correct exception for guest delivery.
179  *
180  * @return: 0 (while setting vcpu->run->exit_reason)
181  */
kvm_handle_guest_debug(struct kvm_vcpu * vcpu)182 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
183 {
184 	struct kvm_run *run = vcpu->run;
185 	u64 esr = kvm_vcpu_get_esr(vcpu);
186 
187 	run->exit_reason = KVM_EXIT_DEBUG;
188 	run->debug.arch.hsr = lower_32_bits(esr);
189 	run->debug.arch.hsr_high = upper_32_bits(esr);
190 	run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
191 
192 	switch (ESR_ELx_EC(esr)) {
193 	case ESR_ELx_EC_WATCHPT_LOW:
194 		run->debug.arch.far = vcpu->arch.fault.far_el2;
195 		break;
196 	case ESR_ELx_EC_SOFTSTP_LOW:
197 		vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
198 		break;
199 	}
200 
201 	return 0;
202 }
203 
kvm_handle_unknown_ec(struct kvm_vcpu * vcpu)204 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
205 {
206 	u64 esr = kvm_vcpu_get_esr(vcpu);
207 
208 	kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
209 		      esr, esr_get_class_string(esr));
210 
211 	kvm_inject_undefined(vcpu);
212 	return 1;
213 }
214 
215 /*
216  * Guest access to SVE registers should be routed to this handler only
217  * when the system doesn't support SVE.
218  */
handle_sve(struct kvm_vcpu * vcpu)219 static int handle_sve(struct kvm_vcpu *vcpu)
220 {
221 	if (guest_hyp_sve_traps_enabled(vcpu))
222 		return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
223 
224 	kvm_inject_undefined(vcpu);
225 	return 1;
226 }
227 
228 /*
229  * Two possibilities to handle a trapping ptrauth instruction:
230  *
231  * - Guest usage of a ptrauth instruction (which the guest EL1 did not
232  *   turn into a NOP). If we get here, it is because we didn't enable
233  *   ptrauth for the guest. This results in an UNDEF, as it isn't
234  *   supposed to use ptrauth without being told it could.
235  *
236  * - Running an L2 NV guest while L1 has left HCR_EL2.API==0, and for
237  *   which we reinject the exception into L1.
238  *
239  * Anything else is an emulation bug (hence the WARN_ON + UNDEF).
240  */
kvm_handle_ptrauth(struct kvm_vcpu * vcpu)241 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
242 {
243 	if (!vcpu_has_ptrauth(vcpu)) {
244 		kvm_inject_undefined(vcpu);
245 		return 1;
246 	}
247 
248 	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
249 		kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
250 		return 1;
251 	}
252 
253 	/* Really shouldn't be here! */
254 	WARN_ON_ONCE(1);
255 	kvm_inject_undefined(vcpu);
256 	return 1;
257 }
258 
kvm_handle_eret(struct kvm_vcpu * vcpu)259 static int kvm_handle_eret(struct kvm_vcpu *vcpu)
260 {
261 	if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
262 	    !vcpu_has_ptrauth(vcpu))
263 		return kvm_handle_ptrauth(vcpu);
264 
265 	/*
266 	 * If we got here, two possibilities:
267 	 *
268 	 * - the guest is in EL2, and we need to fully emulate ERET
269 	 *
270 	 * - the guest is in EL1, and we need to reinject the
271          *   exception into the L1 hypervisor.
272 	 *
273 	 * If KVM ever traps ERET for its own use, we'll have to
274 	 * revisit this.
275 	 */
276 	if (is_hyp_ctxt(vcpu))
277 		kvm_emulate_nested_eret(vcpu);
278 	else
279 		kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
280 
281 	return 1;
282 }
283 
handle_svc(struct kvm_vcpu * vcpu)284 static int handle_svc(struct kvm_vcpu *vcpu)
285 {
286 	/*
287 	 * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a
288 	 * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so
289 	 * we should only have to deal with a 64 bit exception.
290 	 */
291 	kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
292 	return 1;
293 }
294 
295 static exit_handle_fn arm_exit_handlers[] = {
296 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
297 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
298 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
299 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
300 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
301 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
302 	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
303 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
304 	[ESR_ELx_EC_HVC32]	= handle_hvc,
305 	[ESR_ELx_EC_SMC32]	= handle_smc,
306 	[ESR_ELx_EC_HVC64]	= handle_hvc,
307 	[ESR_ELx_EC_SMC64]	= handle_smc,
308 	[ESR_ELx_EC_SVC64]	= handle_svc,
309 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
310 	[ESR_ELx_EC_SVE]	= handle_sve,
311 	[ESR_ELx_EC_ERET]	= kvm_handle_eret,
312 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
313 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
314 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
315 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
316 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
317 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
318 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
319 	[ESR_ELx_EC_FP_ASIMD]	= kvm_handle_fpasimd,
320 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
321 };
322 
kvm_get_exit_handler(struct kvm_vcpu * vcpu)323 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
324 {
325 	u64 esr = kvm_vcpu_get_esr(vcpu);
326 	u8 esr_ec = ESR_ELx_EC(esr);
327 
328 	return arm_exit_handlers[esr_ec];
329 }
330 
331 /*
332  * We may be single-stepping an emulated instruction. If the emulation
333  * has been completed in the kernel, we can return to userspace with a
334  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
335  * emulation first.
336  */
handle_trap_exceptions(struct kvm_vcpu * vcpu)337 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
338 {
339 	int handled;
340 
341 	/*
342 	 * If we run a non-protected VM when protection is enabled
343 	 * system-wide, resync the state from the hypervisor and mark
344 	 * it as dirty on the host side if it wasn't dirty already
345 	 * (which could happen if preemption has taken place).
346 	 */
347 	if (is_protected_kvm_enabled() && !kvm_vm_is_protected(vcpu->kvm)) {
348 		preempt_disable();
349 		if (!(vcpu_get_flag(vcpu, PKVM_HOST_STATE_DIRTY))) {
350 			kvm_call_hyp_nvhe(__pkvm_vcpu_sync_state);
351 			vcpu_set_flag(vcpu, PKVM_HOST_STATE_DIRTY);
352 		}
353 		preempt_enable();
354 	}
355 
356 	/*
357 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
358 	 * that fail their condition code check"
359 	 */
360 	if (!kvm_condition_valid(vcpu)) {
361 		kvm_incr_pc(vcpu);
362 		handled = 1;
363 	} else {
364 		exit_handle_fn exit_handler;
365 
366 		exit_handler = kvm_get_exit_handler(vcpu);
367 		handled = exit_handler(vcpu);
368 	}
369 
370 	return handled;
371 }
372 
handle_hyp_req_mem(struct kvm_vcpu * vcpu,struct kvm_hyp_req * req)373 static int handle_hyp_req_mem(struct kvm_vcpu *vcpu,
374 			      struct kvm_hyp_req *req)
375 {
376 	struct kvm *kvm = vcpu->kvm;
377 	unsigned long nr_pages;
378 	int ret;
379 
380 	switch (req->mem.dest) {
381 	case REQ_MEM_DEST_HYP_ALLOC:
382 		return __pkvm_topup_hyp_alloc(req->mem.nr_pages);
383 	case REQ_MEM_DEST_VCPU_MEMCACHE:
384 		nr_pages = vcpu->arch.stage2_mc.nr_pages;
385 		ret = topup_hyp_memcache(&vcpu->arch.stage2_mc,
386 					 req->mem.nr_pages, 0);
387 		nr_pages = vcpu->arch.stage2_mc.nr_pages - nr_pages;
388 		atomic64_add(nr_pages << PAGE_SHIFT, &kvm->stat.protected_hyp_mem);
389 
390 		return ret;
391 	case REQ_MEM_DEST_HYP_IOMMU:
392 		return kvm_iommu_guest_alloc_mc(&vcpu->arch.iommu_mc,
393 						req->mem.sz_alloc, req->mem.nr_pages);
394 	};
395 
396 	pr_warn("Unknown kvm_hyp_req mem dest: %d\n", req->mem.dest);
397 
398 	return -EINVAL;
399 }
400 
handle_hyp_req_map(struct kvm_vcpu * vcpu,struct kvm_hyp_req * req)401 static int handle_hyp_req_map(struct kvm_vcpu *vcpu,
402 			      struct kvm_hyp_req *req)
403 {
404 	return pkvm_mem_abort_range(vcpu, req->map.guest_ipa, req->map.size);
405 }
406 
handle_hyp_req_split(struct kvm_vcpu * vcpu,struct kvm_hyp_req * req)407 static int handle_hyp_req_split(struct kvm_vcpu *vcpu, struct kvm_hyp_req *req)
408 {
409 	return __pkvm_pgtable_stage2_split(vcpu, req->split.guest_ipa, req->split.size);
410 }
411 
handle_hyp_req(struct kvm_vcpu * vcpu)412 static int handle_hyp_req(struct kvm_vcpu *vcpu)
413 {
414 	struct kvm_hyp_req *hyp_req = vcpu->arch.hyp_reqs;
415 	int i, ret;
416 
417 	for (i = 0; i < KVM_HYP_REQ_MAX; i++, hyp_req++) {
418 		if (hyp_req->type == KVM_HYP_LAST_REQ)
419 			break;
420 
421 		switch (hyp_req->type) {
422 		case KVM_HYP_REQ_TYPE_MEM:
423 			ret = handle_hyp_req_mem(vcpu, hyp_req);
424 			break;
425 		case KVM_HYP_REQ_TYPE_MAP:
426 			ret = handle_hyp_req_map(vcpu, hyp_req);
427 			break;
428 		case KVM_HYP_REQ_TYPE_SPLIT:
429 			ret = handle_hyp_req_split(vcpu, hyp_req);
430 			break;
431 		default:
432 			pr_warn("Unknown kvm_hyp_req type: %d\n", hyp_req->type);
433 			ret = -EINVAL;
434 		}
435 
436 		if (ret)
437 			return ret;
438 	}
439 
440 	/* handled */
441 	return 1;
442 }
443 
444 /*
445  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
446  * proper exit to userspace.
447  */
handle_exit(struct kvm_vcpu * vcpu,int exception_index)448 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
449 {
450 	struct kvm_run *run = vcpu->run;
451 
452 	if (ARM_SERROR_PENDING(exception_index)) {
453 		/*
454 		 * The SError is handled by handle_exit_early(). If the guest
455 		 * survives it will re-execute the original instruction.
456 		 */
457 		return 1;
458 	}
459 
460 	exception_index = ARM_EXCEPTION_CODE(exception_index);
461 
462 	switch (exception_index) {
463 	case ARM_EXCEPTION_IRQ:
464 		return 1;
465 	case ARM_EXCEPTION_EL1_SERROR:
466 		return 1;
467 	case ARM_EXCEPTION_TRAP:
468 		return handle_trap_exceptions(vcpu);
469 	case ARM_EXCEPTION_HYP_GONE:
470 		/*
471 		 * EL2 has been reset to the hyp-stub. This happens when a guest
472 		 * is pre-emptied by kvm_reboot()'s shutdown call.
473 		 */
474 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
475 		return 0;
476 	case ARM_EXCEPTION_IL:
477 		/*
478 		 * We attempted an illegal exception return.  Guest state must
479 		 * have been corrupted somehow.  Give up.
480 		 */
481 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
482 		return -EINVAL;
483 	case ARM_EXCEPTION_HYP_REQ:
484 		return handle_hyp_req(vcpu);
485 	default:
486 		kvm_pr_unimpl("Unsupported exception type: %d",
487 			      exception_index);
488 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
489 		return 0;
490 	}
491 }
492 
493 /* For exit types that need handling before we can be preempted */
handle_exit_early(struct kvm_vcpu * vcpu,int exception_index)494 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
495 {
496 	/*
497 	 * We just exited, so the state is clean from a hypervisor
498 	 * perspective.
499 	 */
500 	if (is_protected_kvm_enabled())
501 		vcpu_clear_flag(vcpu, PKVM_HOST_STATE_DIRTY);
502 
503 	if (ARM_SERROR_PENDING(exception_index)) {
504 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
505 			u64 disr = kvm_vcpu_get_disr(vcpu);
506 
507 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
508 		} else {
509 			kvm_inject_vabt(vcpu);
510 		}
511 
512 		return;
513 	}
514 
515 	exception_index = ARM_EXCEPTION_CODE(exception_index);
516 
517 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
518 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
519 }
520 
print_nvhe_hyp_panic(const char * name,u64 panic_addr,u64 kaslr_off)521 static void print_nvhe_hyp_panic(const char *name, u64 panic_addr, u64 kaslr_off)
522 {
523 	kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
524 		(void *)(panic_addr + kaslr_off));
525 }
526 
kvm_nvhe_report_cfi_failure(u64 panic_addr,u64 kaslr_off)527 static void kvm_nvhe_report_cfi_failure(u64 panic_addr, u64 kaslr_off)
528 {
529 	print_nvhe_hyp_panic("CFI failure", panic_addr, kaslr_off);
530 
531 	if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
532 		kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
533 }
534 
nvhe_hyp_panic_handler(u64 esr,u64 spsr,u64 elr_virt,u64 elr_phys,u64 par,uintptr_t vcpu,u64 far,u64 hpfar)535 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
536 					      u64 elr_virt, u64 elr_phys,
537 					      u64 par, uintptr_t vcpu,
538 					      u64 far, u64 hpfar)
539 {
540 	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
541 	u64 kaslr_off = kaslr_offset();
542 	u64 hyp_offset = elr_in_kimg - kaslr_off - elr_virt;
543 	u64 mode = spsr & PSR_MODE_MASK;
544 	u64 panic_addr = elr_virt + hyp_offset;
545 	u64 mod_addr = pkvm_el2_mod_kern_va(elr_virt);
546 
547 	if (mod_addr) {
548 		panic_addr = mod_addr;
549 		kaslr_off = 0;
550 	}
551 
552 	if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
553 		kvm_err("Invalid host exception to nVHE hyp!\n");
554 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
555 		   esr_brk_comment(esr) == BUG_BRK_IMM) {
556 		const char *file = NULL;
557 		unsigned int line = 0;
558 
559 		/* All hyp bugs, including warnings, are treated as fatal. */
560 		if (!is_protected_kvm_enabled() ||
561 		    IS_ENABLED(CONFIG_PKVM_DISABLE_STAGE2_ON_PANIC)) {
562 			struct bug_entry *bug = find_bug(elr_in_kimg);
563 
564 			if (bug)
565 				bug_get_file_line(bug, &file, &line);
566 		}
567 
568 		if (file)
569 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
570 		else
571 			print_nvhe_hyp_panic("BUG", panic_addr, kaslr_off);
572 	} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
573 		kvm_nvhe_report_cfi_failure(panic_addr, kaslr_off);
574 	} else {
575 		print_nvhe_hyp_panic("panic", panic_addr, kaslr_off);
576 	}
577 
578 	/* Dump the nVHE hypervisor backtrace */
579 	kvm_nvhe_dump_backtrace(hyp_offset);
580 
581 	/*
582 	 * Hyp has panicked and we're going to handle that by panicking the
583 	 * kernel. The kernel offset will be revealed in the panic so we're
584 	 * also safe to reveal the hyp offset as a debugging aid for translating
585 	 * hyp VAs to vmlinux addresses.
586 	 */
587 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
588 
589 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
590 	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
591 }
592