• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/handle_exit.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_mmu.h>
19 #include <asm/debug-monitors.h>
20 #include <asm/stacktrace/nvhe.h>
21 #include <asm/traps.h>
22 
23 #include <kvm/arm_hypercalls.h>
24 
25 #define CREATE_TRACE_POINTS
26 #include "trace_handle_exit.h"
27 
28 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
29 
kvm_handle_guest_serror(struct kvm_vcpu * vcpu,u32 esr)30 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
31 {
32 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
33 		kvm_inject_vabt(vcpu);
34 }
35 
handle_hvc(struct kvm_vcpu * vcpu)36 static int handle_hvc(struct kvm_vcpu *vcpu)
37 {
38 	int ret;
39 
40 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 			    kvm_vcpu_hvc_get_imm(vcpu));
42 	vcpu->stat.hvc_exit_stat++;
43 
44 	ret = kvm_hvc_call_handler(vcpu);
45 	if (ret < 0) {
46 		vcpu_set_reg(vcpu, 0, ~0UL);
47 		return 1;
48 	}
49 
50 	return ret;
51 }
52 
handle_smc(struct kvm_vcpu * vcpu)53 static int handle_smc(struct kvm_vcpu *vcpu)
54 {
55 	/*
56 	 * "If an SMC instruction executed at Non-secure EL1 is
57 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
58 	 * Trap exception, not a Secure Monitor Call exception [...]"
59 	 *
60 	 * We need to advance the PC after the trap, as it would
61 	 * otherwise return to the same address...
62 	 */
63 	vcpu_set_reg(vcpu, 0, ~0UL);
64 	kvm_incr_pc(vcpu);
65 	return 1;
66 }
67 
68 /*
69  * Guest access to FP/ASIMD registers are routed to this handler only
70  * when the system doesn't support FP/ASIMD.
71  */
handle_no_fpsimd(struct kvm_vcpu * vcpu)72 static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
73 {
74 	kvm_inject_undefined(vcpu);
75 	return 1;
76 }
77 
78 /**
79  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
80  *		    instruction executed by a guest
81  *
82  * @vcpu:	the vcpu pointer
83  *
84  * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
85  * decides to.
86  * WFI: Simply call kvm_vcpu_block(), which will halt execution of
87  * world-switches and schedule other host processes until there is an
88  * incoming IRQ or FIQ to the VM.
89  * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
90  *
91  * WF{I,E}T can immediately return if the deadline has already expired.
92  */
kvm_handle_wfx(struct kvm_vcpu * vcpu)93 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
94 {
95 	u64 esr = kvm_vcpu_get_esr(vcpu);
96 
97 	if (esr & ESR_ELx_WFx_ISS_WFE) {
98 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
99 		vcpu->stat.wfe_exit_stat++;
100 	} else {
101 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
102 		vcpu->stat.wfi_exit_stat++;
103 	}
104 
105 	if (esr & ESR_ELx_WFx_ISS_WFxT) {
106 		if (esr & ESR_ELx_WFx_ISS_RV) {
107 			u64 val, now;
108 
109 			now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
110 			val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
111 
112 			if (now >= val)
113 				goto out;
114 		} else {
115 			/* Treat WFxT as WFx if RN is invalid */
116 			esr &= ~ESR_ELx_WFx_ISS_WFxT;
117 		}
118 	}
119 
120 	if (esr & ESR_ELx_WFx_ISS_WFE) {
121 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
122 	} else {
123 		if (esr & ESR_ELx_WFx_ISS_WFxT)
124 			vcpu_set_flag(vcpu, IN_WFIT);
125 
126 		kvm_vcpu_wfi(vcpu);
127 	}
128 out:
129 	kvm_incr_pc(vcpu);
130 
131 	return 1;
132 }
133 
134 /**
135  * kvm_handle_guest_debug - handle a debug exception instruction
136  *
137  * @vcpu:	the vcpu pointer
138  *
139  * We route all debug exceptions through the same handler. If both the
140  * guest and host are using the same debug facilities it will be up to
141  * userspace to re-inject the correct exception for guest delivery.
142  *
143  * @return: 0 (while setting vcpu->run->exit_reason)
144  */
kvm_handle_guest_debug(struct kvm_vcpu * vcpu)145 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
146 {
147 	struct kvm_run *run = vcpu->run;
148 	u32 esr = kvm_vcpu_get_esr(vcpu);
149 
150 	run->exit_reason = KVM_EXIT_DEBUG;
151 	run->debug.arch.hsr = esr;
152 
153 	switch (ESR_ELx_EC(esr)) {
154 	case ESR_ELx_EC_WATCHPT_LOW:
155 		run->debug.arch.far = vcpu->arch.fault.far_el2;
156 		break;
157 	case ESR_ELx_EC_SOFTSTP_LOW:
158 		vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
159 		break;
160 	}
161 
162 	return 0;
163 }
164 
kvm_handle_unknown_ec(struct kvm_vcpu * vcpu)165 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
166 {
167 	u32 esr = kvm_vcpu_get_esr(vcpu);
168 
169 	kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
170 		      esr, esr_get_class_string(esr));
171 
172 	kvm_inject_undefined(vcpu);
173 	return 1;
174 }
175 
handle_sve(struct kvm_vcpu * vcpu)176 static int handle_sve(struct kvm_vcpu *vcpu)
177 {
178 	/* Until SVE is supported for guests: */
179 	kvm_inject_undefined(vcpu);
180 	return 1;
181 }
182 
183 /*
184  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
185  * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
186  * that we can do is give the guest an UNDEF.
187  */
kvm_handle_ptrauth(struct kvm_vcpu * vcpu)188 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
189 {
190 	kvm_inject_undefined(vcpu);
191 	return 1;
192 }
193 
194 static exit_handle_fn arm_exit_handlers[] = {
195 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
196 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
197 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
198 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
199 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
200 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
201 	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
202 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
203 	[ESR_ELx_EC_HVC32]	= handle_hvc,
204 	[ESR_ELx_EC_SMC32]	= handle_smc,
205 	[ESR_ELx_EC_HVC64]	= handle_hvc,
206 	[ESR_ELx_EC_SMC64]	= handle_smc,
207 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
208 	[ESR_ELx_EC_SVE]	= handle_sve,
209 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
210 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
211 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
212 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
213 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
214 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
215 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
216 	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
217 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
218 };
219 
kvm_get_exit_handler(struct kvm_vcpu * vcpu)220 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
221 {
222 	u32 esr = kvm_vcpu_get_esr(vcpu);
223 	u8 esr_ec = ESR_ELx_EC(esr);
224 
225 	return arm_exit_handlers[esr_ec];
226 }
227 
228 /*
229  * We may be single-stepping an emulated instruction. If the emulation
230  * has been completed in the kernel, we can return to userspace with a
231  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
232  * emulation first.
233  */
handle_trap_exceptions(struct kvm_vcpu * vcpu)234 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
235 {
236 	int handled;
237 
238 	/*
239 	 * If we run a non-protected VM when protection is enabled
240 	 * system-wide, resync the state from the hypervisor and mark
241 	 * it as dirty on the host side if it wasn't dirty already
242 	 * (which could happen if preemption has taken place).
243 	 */
244 	if (is_protected_kvm_enabled() && !kvm_vm_is_protected(vcpu->kvm)) {
245 		preempt_disable();
246 		if (!(vcpu_get_flag(vcpu, PKVM_HOST_STATE_DIRTY))) {
247 			kvm_call_hyp_nvhe(__pkvm_vcpu_sync_state);
248 			vcpu_set_flag(vcpu, PKVM_HOST_STATE_DIRTY);
249 		}
250 		preempt_enable();
251 	}
252 
253 	/*
254 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
255 	 * that fail their condition code check"
256 	 */
257 	if (!kvm_condition_valid(vcpu)) {
258 		kvm_incr_pc(vcpu);
259 		handled = 1;
260 	} else {
261 		exit_handle_fn exit_handler;
262 
263 		exit_handler = kvm_get_exit_handler(vcpu);
264 		handled = exit_handler(vcpu);
265 	}
266 
267 	return handled;
268 }
269 
270 /*
271  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
272  * proper exit to userspace.
273  */
handle_exit(struct kvm_vcpu * vcpu,int exception_index)274 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
275 {
276 	struct kvm_run *run = vcpu->run;
277 
278 	if (ARM_SERROR_PENDING(exception_index)) {
279 		/*
280 		 * The SError is handled by handle_exit_early(). If the guest
281 		 * survives it will re-execute the original instruction.
282 		 */
283 		return 1;
284 	}
285 
286 	exception_index = ARM_EXCEPTION_CODE(exception_index);
287 
288 	switch (exception_index) {
289 	case ARM_EXCEPTION_IRQ:
290 		return 1;
291 	case ARM_EXCEPTION_EL1_SERROR:
292 		return 1;
293 	case ARM_EXCEPTION_TRAP:
294 		return handle_trap_exceptions(vcpu);
295 	case ARM_EXCEPTION_HYP_GONE:
296 		/*
297 		 * EL2 has been reset to the hyp-stub. This happens when a guest
298 		 * is pre-emptied by kvm_reboot()'s shutdown call.
299 		 */
300 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
301 		return 0;
302 	case ARM_EXCEPTION_IL:
303 		/*
304 		 * We attempted an illegal exception return.  Guest state must
305 		 * have been corrupted somehow.  Give up.
306 		 */
307 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
308 		return -EINVAL;
309 	default:
310 		kvm_pr_unimpl("Unsupported exception type: %d",
311 			      exception_index);
312 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
313 		return 0;
314 	}
315 }
316 
317 /* For exit types that need handling before we can be preempted */
handle_exit_early(struct kvm_vcpu * vcpu,int exception_index)318 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
319 {
320 	/*
321 	 * We just exited, so the state is clean from a hypervisor
322 	 * perspective.
323 	 */
324 	if (is_protected_kvm_enabled())
325 		vcpu_clear_flag(vcpu, PKVM_HOST_STATE_DIRTY);
326 
327 	if (ARM_SERROR_PENDING(exception_index)) {
328 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
329 			u64 disr = kvm_vcpu_get_disr(vcpu);
330 
331 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
332 		} else {
333 			kvm_inject_vabt(vcpu);
334 		}
335 
336 		return;
337 	}
338 
339 	exception_index = ARM_EXCEPTION_CODE(exception_index);
340 
341 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
342 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
343 }
344 
nvhe_hyp_panic_handler(u64 esr,u64 spsr,u64 elr_virt,u64 elr_phys,u64 par,uintptr_t vcpu,u64 far,u64 hpfar)345 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
346 					      u64 elr_virt, u64 elr_phys,
347 					      u64 par, uintptr_t vcpu,
348 					      u64 far, u64 hpfar) {
349 	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
350 	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
351 	u64 mode = spsr & PSR_MODE_MASK;
352 	u64 panic_addr = elr_virt + hyp_offset;
353 
354 	if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
355 		kvm_err("Invalid host exception to nVHE hyp!\n");
356 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
357 		   (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
358 		const char *file = NULL;
359 		unsigned int line = 0;
360 
361 		/* All hyp bugs, including warnings, are treated as fatal. */
362 		if (!is_protected_kvm_enabled() ||
363 		    IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
364 			struct bug_entry *bug = find_bug(elr_in_kimg);
365 
366 			if (bug)
367 				bug_get_file_line(bug, &file, &line);
368 		}
369 
370 		if (file)
371 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
372 		else
373 			kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
374 					(void *)(panic_addr + kaslr_offset()));
375 	} else {
376 		kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
377 				(void *)(panic_addr + kaslr_offset()));
378 	}
379 
380 	/* Dump the nVHE hypervisor backtrace */
381 	kvm_nvhe_dump_backtrace(hyp_offset);
382 
383 	/*
384 	 * Hyp has panicked and we're going to handle that by panicking the
385 	 * kernel. The kernel offset will be revealed in the panic so we're
386 	 * also safe to reveal the hyp offset as a debugging aid for translating
387 	 * hyp VAs to vmlinux addresses.
388 	 */
389 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
390 
391 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
392 	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
393 }
394