1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
9
10 #include <linux/kvm_host.h>
11 #include <linux/types.h>
12 #include <linux/jump_label.h>
13 #include <uapi/linux/psci.h>
14
15 #include <kvm/arm_psci.h>
16
17 #include <asm/barrier.h>
18 #include <asm/cpufeature.h>
19 #include <asm/kprobes.h>
20 #include <asm/kvm_asm.h>
21 #include <asm/kvm_emulate.h>
22 #include <asm/kvm_hyp.h>
23 #include <asm/kvm_hypevents.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28
29 #include <nvhe/mem_protect.h>
30 #include <nvhe/pkvm.h>
31
32 /* Non-VHE specific context */
33 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
35 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
36
37 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
38 extern void __pkvm_unmask_serror(void);
39
40 #define update_pvm_fgt_traps(hctxt, vcpu, kvm, reg) \
41 update_fgt_traps_cs(hctxt, vcpu, kvm, reg, PVM_ ## reg ## _CLR, PVM_ ## reg ## _SET);
42
__activate_pvm_traps_hcrx(struct kvm_vcpu * vcpu)43 static void __activate_pvm_traps_hcrx(struct kvm_vcpu *vcpu)
44 {
45 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
46 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
47 u64 clear = 0;
48 u64 set = 0;
49
50 if (!cpus_have_final_cap(ARM64_HAS_HCX))
51 return;
52
53 ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
54 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
55 compute_clr_set(vcpu, HCRX_EL2, clear, set);
56
57 set |= PVM_HCRX_EL2_SET;
58 clear |= PVM_HCRX_EL2_CLR;
59 if (clear || set) {
60 u64 val = __HCRX_EL2_nMASK;
61
62 val |= set;
63 val &= ~clear;
64 write_sysreg_s(val, SYS_HCRX_EL2);
65 }
66 }
67
__activate_pvm_traps_hfgxtr(struct kvm_vcpu * vcpu)68 static void __activate_pvm_traps_hfgxtr(struct kvm_vcpu *vcpu)
69 {
70 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
71 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
72
73 if (!cpus_have_final_cap(ARM64_HAS_FGT))
74 return;
75
76 update_pvm_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
77
78 /* Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD. */
79 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) {
80 update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, PVM_HFGWTR_EL2_CLR,
81 PVM_HFGWTR_EL2_SET | HFGxTR_EL2_TCR_EL1_MASK);
82 } else {
83 update_pvm_fgt_traps(hctxt, vcpu, kvm, HFGWTR_EL2);
84 }
85
86 update_pvm_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
87 update_pvm_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
88 update_pvm_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
89
90 if (cpu_has_amu())
91 update_pvm_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
92 }
93
__deactivate_pvm_traps_hfgxtr(struct kvm_vcpu * vcpu)94 static void __deactivate_pvm_traps_hfgxtr(struct kvm_vcpu *vcpu)
95 {
96 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
97
98 if (!cpus_have_final_cap(ARM64_HAS_FGT))
99 return;
100
101 write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
102 write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
103 write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
104 write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
105 write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
106
107 if (cpu_has_amu())
108 write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
109 }
110
__activate_cptr_traps(struct kvm_vcpu * vcpu)111 static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
112 {
113 u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
114
115 if (!guest_owns_fp_regs())
116 __activate_traps_fpsimd32(vcpu);
117
118 if (has_hvhe()) {
119 val |= CPACR_ELx_TTA;
120
121 if (guest_owns_fp_regs()) {
122 val |= CPACR_ELx_FPEN;
123 if (vcpu_has_sve(vcpu))
124 val |= CPACR_ELx_ZEN;
125 }
126
127 write_sysreg(val, cpacr_el1);
128 } else {
129 val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
130
131 /*
132 * Always trap SME since it's not supported in KVM.
133 * TSM is RES1 if SME isn't implemented.
134 */
135 val |= CPTR_EL2_TSM;
136
137 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
138 val |= CPTR_EL2_TZ;
139
140 if (!guest_owns_fp_regs())
141 val |= CPTR_EL2_TFP;
142
143 write_sysreg(val, cptr_el2);
144 }
145 }
146
__deactivate_cptr_traps(struct kvm_vcpu * vcpu)147 static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
148 {
149 if (has_hvhe()) {
150 u64 val = CPACR_ELx_FPEN;
151
152 if (cpus_have_final_cap(ARM64_SVE))
153 val |= CPACR_ELx_ZEN;
154 if (cpus_have_final_cap(ARM64_SME))
155 val |= CPACR_ELx_SMEN;
156
157 write_sysreg(val, cpacr_el1);
158 } else {
159 u64 val = CPTR_NVHE_EL2_RES1;
160
161 if (!cpus_have_final_cap(ARM64_SVE))
162 val |= CPTR_EL2_TZ;
163 if (!cpus_have_final_cap(ARM64_SME))
164 val |= CPTR_EL2_TSM;
165
166 write_sysreg(val, cptr_el2);
167 }
168 }
169
__activate_traps(struct kvm_vcpu * vcpu)170 static void __activate_traps(struct kvm_vcpu *vcpu)
171 {
172 ___activate_traps(vcpu, vcpu->arch.hcr_el2);
173 __activate_traps_common(vcpu);
174 __activate_cptr_traps(vcpu);
175
176 if (unlikely(vcpu_is_protected(vcpu))) {
177 __activate_pvm_traps_hcrx(vcpu);
178 __activate_pvm_traps_hfgxtr(vcpu);
179 } else {
180 __activate_traps_hcrx(vcpu);
181 __activate_traps_hfgxtr(vcpu);
182 }
183
184 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
185
186 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
187 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
188
189 isb();
190 /*
191 * At this stage, and thanks to the above isb(), S2 is
192 * configured and enabled. We can now restore the guest's S1
193 * configuration: SCTLR, and only then TCR.
194 */
195 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
196 isb();
197 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
198 }
199 }
200
__deactivate_traps(struct kvm_vcpu * vcpu)201 static void __deactivate_traps(struct kvm_vcpu *vcpu)
202 {
203 extern char __kvm_hyp_host_vector[];
204
205 ___deactivate_traps(vcpu);
206
207 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
208 u64 val;
209
210 /*
211 * Set the TCR and SCTLR registers in the exact opposite
212 * sequence as __activate_traps (first prevent walks,
213 * then force the MMU on). A generous sprinkling of isb()
214 * ensure that things happen in this exact order.
215 */
216 val = read_sysreg_el1(SYS_TCR);
217 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
218 isb();
219 val = read_sysreg_el1(SYS_SCTLR);
220 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
221 isb();
222 }
223
224 __deactivate_traps_common(vcpu);
225
226 if (unlikely(vcpu_is_protected(vcpu)))
227 __deactivate_pvm_traps_hfgxtr(vcpu);
228 else
229 __deactivate_traps_hfgxtr(vcpu);
230
231 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
232
233 __deactivate_cptr_traps(vcpu);
234 write_sysreg(__kvm_hyp_host_vector, vbar_el2);
235 }
236
237 /* Save VGICv3 state on non-VHE systems */
__hyp_vgic_save_state(struct kvm_vcpu * vcpu)238 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
239 {
240 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
241 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
242 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
243 }
244 }
245
246 /* Restore VGICv3 state on non-VHE systems */
__hyp_vgic_restore_state(struct kvm_vcpu * vcpu)247 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
248 {
249 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
250 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
251 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
252 }
253 }
254
255 /*
256 * Disable host events, enable guest events
257 */
258 #ifdef CONFIG_HW_PERF_EVENTS
__pmu_switch_to_guest(struct kvm_vcpu * vcpu)259 static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
260 {
261 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
262
263 if (pmu->events_host)
264 write_sysreg(pmu->events_host, pmcntenclr_el0);
265
266 if (pmu->events_guest)
267 write_sysreg(pmu->events_guest, pmcntenset_el0);
268
269 return (pmu->events_host || pmu->events_guest);
270 }
271
272 /*
273 * Disable guest events, enable host events
274 */
__pmu_switch_to_host(struct kvm_vcpu * vcpu)275 static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
276 {
277 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
278
279 if (pmu->events_guest)
280 write_sysreg(pmu->events_guest, pmcntenclr_el0);
281
282 if (pmu->events_host)
283 write_sysreg(pmu->events_host, pmcntenset_el0);
284 }
285 #else
286 #define __pmu_switch_to_guest(v) ({ false; })
287 #define __pmu_switch_to_host(v) do {} while (0)
288 #endif
289
290 /*
291 * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
292 *
293 * Returns true if the hypervisor has handled the exit, and control should go
294 * back to the guest, or false if it hasn't.
295 */
kvm_handle_pvm_sys64(struct kvm_vcpu * vcpu,u64 * exit_code)296 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
297 {
298 /*
299 * Make sure we handle the exit for workarounds before the pKVM
300 * handling, as the latter could decide to UNDEF.
301 */
302 return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
303 kvm_handle_pvm_sysreg(vcpu, exit_code));
304 }
305
306 static const exit_handler_fn hyp_exit_handlers[] = {
307 [0 ... ESR_ELx_EC_MAX] = NULL,
308 [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
309 [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
310 [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
311 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
312 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
313 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
314 [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
315 [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
316 };
317
318 static const exit_handler_fn pvm_exit_handlers[] = {
319 [0 ... ESR_ELx_EC_MAX] = NULL,
320 [ESR_ELx_EC_HVC64] = kvm_handle_pvm_hvc64,
321 [ESR_ELx_EC_SMC64] = kvm_handle_pvm_smc64,
322 [ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
323 [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
324 [ESR_ELx_EC_SME] = kvm_handle_pvm_restricted,
325 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
326 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
327 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
328 [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
329 [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
330 };
331
kvm_get_exit_handler_array(struct kvm_vcpu * vcpu)332 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
333 {
334 if (unlikely(vcpu_is_protected(vcpu)))
335 return pvm_exit_handlers;
336
337 return hyp_exit_handlers;
338 }
339
340 /*
341 * As we have caught the guest red-handed, decide that it isn't fit for
342 * purpose anymore by making the vcpu invalid. The VMM can try and fix it by
343 * re-initializing the vcpu with KVM_ARM_VCPU_INIT, however, this is likely
344 * not possible for protected VMs.
345 */
vcpu_illegal_trap(struct kvm_vcpu * vcpu,u64 * exit_code)346 void vcpu_illegal_trap(struct kvm_vcpu *vcpu, u64 *exit_code)
347 {
348 trace_vcpu_illegal_trap(kvm_vcpu_get_esr(vcpu));
349
350 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
351 *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
352 *exit_code |= ARM_EXCEPTION_IL;
353 }
354
fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code)355 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
356 {
357 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
358
359 synchronize_vcpu_pstate(vcpu, exit_code);
360
361 /*
362 * Some guests (e.g., protected VMs) are not be allowed to run in
363 * AArch32. The ARMv8 architecture does not give the hypervisor a
364 * mechanism to prevent a guest from dropping to AArch32 EL0 if
365 * implemented by the CPU. If the hypervisor spots a guest in such a
366 * state ensure it is handled, and don't trust the host to spot or fix
367 * it. The check below is based on the one in
368 * kvm_arch_vcpu_ioctl_run().
369 */
370 if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu)))
371 vcpu_illegal_trap(vcpu, exit_code);
372
373 return __fixup_guest_exit(vcpu, exit_code, handlers);
374 }
375
376 /* Switch to the guest for legacy non-VHE systems */
__kvm_vcpu_run(struct kvm_vcpu * vcpu)377 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
378 {
379 struct kvm_cpu_context *host_ctxt;
380 struct kvm_cpu_context *guest_ctxt;
381 struct kvm_s2_mmu *mmu;
382 bool pmu_switch_needed;
383 u64 exit_code;
384
385 /*
386 * Having IRQs masked via PMR when entering the guest means the GIC
387 * will not signal the CPU of interrupts of lower priority, and the
388 * only way to get out will be via guest exceptions.
389 * Naturally, we want to avoid this.
390 */
391 if (system_uses_irq_prio_masking()) {
392 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
393 pmr_sync();
394 }
395
396 host_ctxt = host_data_ptr(host_ctxt);
397 host_ctxt->__hyp_running_vcpu = vcpu;
398 guest_ctxt = &vcpu->arch.ctxt;
399
400 pmu_switch_needed = __pmu_switch_to_guest(vcpu);
401
402 __sysreg_save_state_nvhe(host_ctxt);
403 /*
404 * We must flush and disable the SPE buffer for nVHE, as
405 * the translation regime(EL1&0) is going to be loaded with
406 * that of the guest. And we must do this before we change the
407 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
408 * before we load guest Stage1.
409 */
410 __debug_save_host_buffers_nvhe(vcpu);
411
412 /*
413 * We're about to restore some new MMU state. Make sure
414 * ongoing page-table walks that have started before we
415 * trapped to EL2 have completed. This also synchronises the
416 * above disabling of SPE and TRBE.
417 *
418 * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
419 * rule R_LFHQG and subsequent information statements.
420 */
421 dsb(nsh);
422
423 __kvm_adjust_pc(vcpu);
424
425 /*
426 * We must restore the 32-bit state before the sysregs, thanks
427 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
428 *
429 * Also, and in order to be able to deal with erratum #1319537 (A57)
430 * and #1319367 (A72), we must ensure that all VM-related sysreg are
431 * restored before we enable S2 translation.
432 */
433 __sysreg32_restore_state(vcpu);
434 __sysreg_restore_state_nvhe(guest_ctxt);
435
436 mmu = kern_hyp_va(vcpu->arch.hw_mmu);
437 __load_stage2(mmu, kern_hyp_va(mmu->arch));
438 __activate_traps(vcpu);
439
440 __hyp_vgic_restore_state(vcpu);
441 __timer_enable_traps(vcpu);
442
443 __debug_switch_to_guest(vcpu);
444
445 do {
446 trace_hyp_exit();
447
448 /* Jump in the fire! */
449 exit_code = __guest_enter(vcpu);
450
451 /* And we're baaack! */
452 trace_hyp_enter();
453 } while (fixup_guest_exit(vcpu, &exit_code));
454
455 __sysreg_save_state_nvhe(guest_ctxt);
456 __sysreg32_save_state(vcpu);
457 __timer_disable_traps(vcpu);
458 __hyp_vgic_save_state(vcpu);
459
460 /*
461 * Same thing as before the guest run: we're about to switch
462 * the MMU context, so let's make sure we don't have any
463 * ongoing EL1&0 translations.
464 */
465 dsb(nsh);
466
467 __deactivate_traps(vcpu);
468 __load_host_stage2();
469
470 __sysreg_restore_state_nvhe(host_ctxt);
471
472 if (guest_owns_fp_regs())
473 __fpsimd_save_fpexc32(vcpu);
474
475 __debug_switch_to_host(vcpu);
476 /*
477 * This must come after restoring the host sysregs, since a non-VHE
478 * system may enable SPE here and make use of the TTBRs.
479 */
480 __debug_restore_host_buffers_nvhe(vcpu);
481
482 if (pmu_switch_needed)
483 __pmu_switch_to_host(vcpu);
484
485 /* Returning to host will clear PSR.I, remask PMR if needed */
486 if (system_uses_irq_prio_masking())
487 gic_write_pmr(GIC_PRIO_IRQOFF);
488
489 host_ctxt->__hyp_running_vcpu = NULL;
490
491 __pkvm_unmask_serror();
492
493 return exit_code;
494 }
495
496 static void (*hyp_panic_notifier)(struct user_pt_regs *regs);
__pkvm_register_hyp_panic_notifier(void (* cb)(struct user_pt_regs * regs))497 int __pkvm_register_hyp_panic_notifier(void (*cb)(struct user_pt_regs *regs))
498 {
499 return cmpxchg(&hyp_panic_notifier, NULL, cb) ? -EBUSY : 0;
500 }
501
hyp_panic(void)502 asmlinkage void __noreturn hyp_panic(void)
503 {
504 u64 spsr = read_sysreg_el2(SYS_SPSR);
505 u64 elr = read_sysreg_el2(SYS_ELR);
506 u64 par = read_sysreg_par();
507 struct kvm_cpu_context *host_ctxt;
508 struct kvm_vcpu *vcpu;
509
510 host_ctxt = host_data_ptr(host_ctxt);
511 vcpu = host_ctxt->__hyp_running_vcpu;
512
513 if (READ_ONCE(hyp_panic_notifier))
514 hyp_panic_notifier(&host_ctxt->regs);
515
516 if (vcpu) {
517 __timer_disable_traps(vcpu);
518 __deactivate_traps(vcpu);
519 __load_host_stage2();
520 __sysreg_restore_state_nvhe(host_ctxt);
521 }
522
523 /* Prepare to dump kvm nvhe hyp stacktrace */
524 kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
525 _THIS_IP_);
526
527 __pkvm_panic_tracing();
528
529 __hyp_do_panic(host_ctxt, spsr, elr, par);
530 unreachable();
531 }
532
hyp_panic_bad_stack(void)533 asmlinkage void __noreturn hyp_panic_bad_stack(void)
534 {
535 hyp_panic();
536 }
537
kvm_unexpected_el2_exception(void)538 asmlinkage void kvm_unexpected_el2_exception(void)
539 {
540 __kvm_unexpected_el2_exception();
541 }
542