1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
9
10 #include <hyp/adjust_pc.h>
11 #include <hyp/fault.h>
12
13 #include <linux/arm-smccc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <uapi/linux/psci.h>
18
19 #include <kvm/arm_psci.h>
20
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/extable.h>
24 #include <asm/kprobes.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/fpsimd.h>
30 #include <asm/debug-monitors.h>
31 #include <asm/processor.h>
32
33 extern struct exception_table_entry __start___kvm_ex_table;
34 extern struct exception_table_entry __stop___kvm_ex_table;
35
36 /* Check whether the FP regs were dirtied while in the host-side run loop: */
update_fp_enabled(struct kvm_vcpu * vcpu)37 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
38 {
39 /*
40 * When the system doesn't support FP/SIMD, we cannot rely on
41 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
42 * abort on the very first access to FP and thus we should never
43 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
44 * trap the accesses.
45 */
46 if (!system_supports_fpsimd() ||
47 vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
48 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
49 KVM_ARM64_FP_HOST);
50
51 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
52 }
53
54 /* Save the 32-bit only FPSIMD system register state */
__fpsimd_save_fpexc32(struct kvm_vcpu * vcpu)55 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
56 {
57 if (!vcpu_el1_is_32bit(vcpu))
58 return;
59
60 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
61 }
62
__activate_traps_fpsimd32(struct kvm_vcpu * vcpu)63 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
64 {
65 /*
66 * We are about to set CPTR_EL2.TFP to trap all floating point
67 * register accesses to EL2, however, the ARM ARM clearly states that
68 * traps are only taken to EL2 if the operation would not otherwise
69 * trap to EL1. Therefore, always make sure that for 32-bit guests,
70 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
71 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
72 * it will cause an exception.
73 */
74 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
75 write_sysreg(1 << 30, fpexc32_el2);
76 isb();
77 }
78 }
79
__activate_traps_common(struct kvm_vcpu * vcpu)80 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
81 {
82 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
83 write_sysreg(1 << 15, hstr_el2);
84
85 /*
86 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
87 * PMSELR_EL0 to make sure it never contains the cycle
88 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
89 * EL1 instead of being trapped to EL2.
90 */
91 if (kvm_arm_support_pmu_v3()) {
92 write_sysreg(0, pmselr_el0);
93 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
94 }
95
96 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
97 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
98 }
99
__deactivate_traps_common(struct kvm_vcpu * vcpu)100 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
101 {
102 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
103
104 write_sysreg(0, hstr_el2);
105 if (kvm_arm_support_pmu_v3())
106 write_sysreg(0, pmuserenr_el0);
107 }
108
___activate_traps(struct kvm_vcpu * vcpu)109 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
110 {
111 u64 hcr = vcpu->arch.hcr_el2;
112
113 write_sysreg(hcr, hcr_el2);
114
115 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
116 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
117 }
118
___deactivate_traps(struct kvm_vcpu * vcpu)119 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
120 {
121 /*
122 * If we pended a virtual abort, preserve it until it gets
123 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
124 * the crucial bit is "On taking a vSError interrupt,
125 * HCR_EL2.VSE is cleared to 0."
126 */
127 if (vcpu->arch.hcr_el2 & HCR_VSE) {
128 vcpu->arch.hcr_el2 &= ~HCR_VSE;
129 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
130 }
131 }
132
__populate_fault_info(struct kvm_vcpu * vcpu)133 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
134 {
135 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
136 }
137
__hyp_sve_restore_guest(struct kvm_vcpu * vcpu)138 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
139 {
140 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
141 __sve_restore_state(vcpu_sve_pffr(vcpu),
142 &vcpu->arch.ctxt.fp_regs.fpsr);
143 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
144 }
145
146 /*
147 * We trap the first access to the FP/SIMD to save the host context and
148 * restore the guest context lazily.
149 * If FP/SIMD is not implemented, handle the trap and inject an undefined
150 * instruction exception to the guest. Similarly for trapped SVE accesses.
151 */
kvm_hyp_handle_fpsimd(struct kvm_vcpu * vcpu,u64 * exit_code)152 static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
153 {
154 bool sve_guest;
155 u8 esr_ec;
156 u64 reg;
157
158 if (!system_supports_fpsimd())
159 return false;
160
161 sve_guest = vcpu_has_sve(vcpu);
162 esr_ec = kvm_vcpu_trap_get_class(vcpu);
163
164 /* Don't handle SVE traps for non-SVE vcpus here: */
165 if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
166 return false;
167
168 /* Valid trap. Switch the context: */
169 if (has_vhe()) {
170 reg = CPACR_EL1_FPEN;
171 if (sve_guest)
172 reg |= CPACR_EL1_ZEN;
173
174 sysreg_clear_set(cpacr_el1, 0, reg);
175 } else {
176 reg = CPTR_EL2_TFP;
177 if (sve_guest)
178 reg |= CPTR_EL2_TZ;
179
180 sysreg_clear_set(cptr_el2, reg, 0);
181 }
182 isb();
183
184 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
185 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
186 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
187 }
188
189 if (sve_guest)
190 __hyp_sve_restore_guest(vcpu);
191 else
192 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
193
194 /* Skip restoring fpexc32 for AArch64 guests */
195 if (!(read_sysreg(hcr_el2) & HCR_RW))
196 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
197
198 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
199
200 return true;
201 }
202
esr_is_ptrauth_trap(u32 esr)203 static inline bool esr_is_ptrauth_trap(u32 esr)
204 {
205 switch (esr_sys64_to_sysreg(esr)) {
206 case SYS_APIAKEYLO_EL1:
207 case SYS_APIAKEYHI_EL1:
208 case SYS_APIBKEYLO_EL1:
209 case SYS_APIBKEYHI_EL1:
210 case SYS_APDAKEYLO_EL1:
211 case SYS_APDAKEYHI_EL1:
212 case SYS_APDBKEYLO_EL1:
213 case SYS_APDBKEYHI_EL1:
214 case SYS_APGAKEYLO_EL1:
215 case SYS_APGAKEYHI_EL1:
216 return true;
217 }
218
219 return false;
220 }
221
222 #define __ptrauth_save_key(ctxt, key) \
223 do { \
224 u64 __val; \
225 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
226 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
227 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
228 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
229 } while(0)
230
231 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
232
kvm_hyp_handle_ptrauth(struct kvm_vcpu * vcpu,u64 * exit_code)233 static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
234 {
235 struct kvm_cpu_context *ctxt;
236 u64 val;
237
238 if (!vcpu_has_ptrauth(vcpu))
239 return false;
240
241 ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
242 __ptrauth_save_key(ctxt, APIA);
243 __ptrauth_save_key(ctxt, APIB);
244 __ptrauth_save_key(ctxt, APDA);
245 __ptrauth_save_key(ctxt, APDB);
246 __ptrauth_save_key(ctxt, APGA);
247
248 vcpu_ptrauth_enable(vcpu);
249
250 val = read_sysreg(hcr_el2);
251 val |= (HCR_API | HCR_APK);
252 write_sysreg(val, hcr_el2);
253
254 return true;
255 }
256
kvm_hyp_handle_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)257 static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
258 {
259 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
260 __vgic_v3_perform_cpuif_access(vcpu) == 1)
261 return true;
262
263 if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
264 return kvm_hyp_handle_ptrauth(vcpu, exit_code);
265
266 return false;
267 }
268
kvm_hyp_handle_cp15_32(struct kvm_vcpu * vcpu,u64 * exit_code)269 static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
270 {
271 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
272 __vgic_v3_perform_cpuif_access(vcpu) == 1)
273 return true;
274
275 return false;
276 }
277
kvm_hyp_handle_memory_fault(struct kvm_vcpu * vcpu,u64 * exit_code)278 static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
279 {
280 if (!__populate_fault_info(vcpu))
281 return true;
282
283 return false;
284 }
285 static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
286 __alias(kvm_hyp_handle_memory_fault);
287 static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
288 __alias(kvm_hyp_handle_memory_fault);
289
kvm_hyp_handle_dabt_low(struct kvm_vcpu * vcpu,u64 * exit_code)290 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
291 {
292 if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
293 return true;
294
295 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
296 bool valid;
297
298 valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
299 kvm_vcpu_dabt_isvalid(vcpu) &&
300 !kvm_vcpu_abt_issea(vcpu) &&
301 !kvm_vcpu_abt_iss1tw(vcpu);
302
303 if (valid) {
304 int ret = __vgic_v2_perform_cpuif_access(vcpu);
305
306 if (ret == 1)
307 return true;
308
309 /* Promote an illegal access to an SError.*/
310 if (ret == -1)
311 *exit_code = ARM_EXCEPTION_EL1_SERROR;
312 }
313 }
314
315 return false;
316 }
317
318 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
319
320 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
321
322 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
323
324 /*
325 * Allow the hypervisor to handle the exit with an exit handler if it has one.
326 *
327 * Returns true if the hypervisor handled the exit, and control should go back
328 * to the guest, or false if it hasn't.
329 */
kvm_hyp_handle_exit(struct kvm_vcpu * vcpu,u64 * exit_code)330 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
331 {
332 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
333 exit_handler_fn fn;
334
335 fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
336
337 if (fn)
338 return fn(vcpu, exit_code);
339
340 return false;
341 }
342
343 /*
344 * Return true when we were able to fixup the guest exit and should return to
345 * the guest, false when we should restore the host state and return to the
346 * main run loop.
347 */
fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code)348 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
349 {
350 /*
351 * Save PSTATE early so that we can evaluate the vcpu mode
352 * early on.
353 */
354 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
355
356 /*
357 * Check whether we want to repaint the state one way or
358 * another.
359 */
360 early_exit_filter(vcpu, exit_code);
361
362 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
363 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
364
365 if (ARM_SERROR_PENDING(*exit_code) &&
366 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
367 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
368
369 /*
370 * HVC already have an adjusted PC, which we need to
371 * correct in order to return to after having injected
372 * the SError.
373 *
374 * SMC, on the other hand, is *trapped*, meaning its
375 * preferred return address is the SMC itself.
376 */
377 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
378 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
379 }
380
381 /*
382 * We're using the raw exception code in order to only process
383 * the trap if no SError is pending. We will come back to the
384 * same PC once the SError has been injected, and replay the
385 * trapping instruction.
386 */
387 if (*exit_code != ARM_EXCEPTION_TRAP)
388 goto exit;
389
390 /* Check if there's an exit handler and allow it to handle the exit. */
391 if (kvm_hyp_handle_exit(vcpu, exit_code))
392 goto guest;
393 exit:
394 /* Return to the host kernel and handle the exit */
395 return false;
396
397 guest:
398 /* Re-enter the guest */
399 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
400 return true;
401 }
402
__kvm_unexpected_el2_exception(void)403 static inline void __kvm_unexpected_el2_exception(void)
404 {
405 extern char __guest_exit_panic[];
406 unsigned long addr, fixup;
407 struct exception_table_entry *entry, *end;
408 unsigned long elr_el2 = read_sysreg(elr_el2);
409
410 entry = &__start___kvm_ex_table;
411 end = &__stop___kvm_ex_table;
412
413 while (entry < end) {
414 addr = (unsigned long)&entry->insn + entry->insn;
415 fixup = (unsigned long)&entry->fixup + entry->fixup;
416
417 if (addr != elr_el2) {
418 entry++;
419 continue;
420 }
421
422 write_sysreg(fixup, elr_el2);
423 return;
424 }
425
426 /* Trigger a panic after restoring the hyp context. */
427 write_sysreg(__guest_exit_panic, elr_el2);
428 }
429
430 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */
431