1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/bitfield.h>
15 #include <linux/kvm_host.h>
16
17 #include <asm/debug-monitors.h>
18 #include <asm/esr.h>
19 #include <asm/kvm_arm.h>
20 #include <asm/kvm_hyp.h>
21 #include <asm/kvm_nested.h>
22 #include <asm/ptrace.h>
23 #include <asm/cputype.h>
24 #include <asm/virt.h>
25
26 #define CURRENT_EL_SP_EL0_VECTOR 0x0
27 #define CURRENT_EL_SP_ELx_VECTOR 0x200
28 #define LOWER_EL_AArch64_VECTOR 0x400
29 #define LOWER_EL_AArch32_VECTOR 0x600
30
31 enum exception_type {
32 except_type_sync = 0,
33 except_type_irq = 0x80,
34 except_type_fiq = 0x100,
35 except_type_serror = 0x180,
36 };
37
38 #define kvm_exception_type_names \
39 { except_type_sync, "SYNC" }, \
40 { except_type_irq, "IRQ" }, \
41 { except_type_fiq, "FIQ" }, \
42 { except_type_serror, "SERROR" }
43
44 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
45 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
46
47 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
48 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
49 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
50 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
51 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
52
53 unsigned long get_except64_offset(unsigned long psr, unsigned long target_mode,
54 enum exception_type type);
55 unsigned long get_except64_cpsr(unsigned long old, bool has_mte,
56 unsigned long sctlr, unsigned long mode);
57
58 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
59
60 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
61 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
62 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
63
kvm_inject_nested_sve_trap(struct kvm_vcpu * vcpu)64 static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
65 {
66 u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
67 ESR_ELx_IL;
68
69 kvm_inject_nested_sync(vcpu, esr);
70 }
71
72 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)73 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
74 {
75 return !(vcpu->arch.hcr_el2 & HCR_RW);
76 }
77 #else
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)78 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
79 {
80 return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
81 }
82 #endif
83
vcpu_reset_hcr(struct kvm_vcpu * vcpu)84 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
85 {
86 if (!vcpu_has_run_once(vcpu))
87 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
88
89 /*
90 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
91 * get set in SCTLR_EL1 such that we can detect when the guest
92 * MMU gets turned on and do the necessary cache maintenance
93 * then.
94 */
95 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
96 vcpu->arch.hcr_el2 |= HCR_TVM;
97 }
98
vcpu_hcr(struct kvm_vcpu * vcpu)99 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
100 {
101 return (unsigned long *)&vcpu->arch.hcr_el2;
102 }
103
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)104 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
105 {
106 vcpu->arch.hcr_el2 &= ~HCR_TWE;
107 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
108 vcpu->kvm->arch.vgic.nassgireq)
109 vcpu->arch.hcr_el2 &= ~HCR_TWI;
110 else
111 vcpu->arch.hcr_el2 |= HCR_TWI;
112 }
113
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)114 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
115 {
116 vcpu->arch.hcr_el2 |= HCR_TWE;
117 vcpu->arch.hcr_el2 |= HCR_TWI;
118 }
119
vcpu_get_vsesr(struct kvm_vcpu * vcpu)120 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
121 {
122 return vcpu->arch.vsesr_el2;
123 }
124
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)125 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
126 {
127 vcpu->arch.vsesr_el2 = vsesr;
128 }
129
vcpu_pc(const struct kvm_vcpu * vcpu)130 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
131 {
132 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
133 }
134
vcpu_cpsr(const struct kvm_vcpu * vcpu)135 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
136 {
137 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
138 }
139
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)140 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
141 {
142 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
143 }
144
kvm_condition_valid(const struct kvm_vcpu * vcpu)145 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
146 {
147 if (vcpu_mode_is_32bit(vcpu))
148 return kvm_condition_valid32(vcpu);
149
150 return true;
151 }
152
vcpu_set_thumb(struct kvm_vcpu * vcpu)153 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
154 {
155 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
156 }
157
158 /*
159 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
160 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
161 * AArch32 with banked registers.
162 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)163 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
164 u8 reg_num)
165 {
166 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
167 }
168
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)169 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
170 unsigned long val)
171 {
172 if (reg_num != 31)
173 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
174 }
175
vcpu_is_el2_ctxt(const struct kvm_cpu_context * ctxt)176 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
177 {
178 switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
179 case PSR_MODE_EL2h:
180 case PSR_MODE_EL2t:
181 return true;
182 default:
183 return false;
184 }
185 }
186
vcpu_is_el2(const struct kvm_vcpu * vcpu)187 static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
188 {
189 return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
190 }
191
__vcpu_el2_e2h_is_set(const struct kvm_cpu_context * ctxt)192 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
193 {
194 return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
195 (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
196 }
197
vcpu_el2_e2h_is_set(const struct kvm_vcpu * vcpu)198 static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
199 {
200 return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
201 }
202
__vcpu_el2_tge_is_set(const struct kvm_cpu_context * ctxt)203 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
204 {
205 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
206 }
207
vcpu_el2_tge_is_set(const struct kvm_vcpu * vcpu)208 static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
209 {
210 return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
211 }
212
__is_hyp_ctxt(const struct kvm_cpu_context * ctxt)213 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
214 {
215 /*
216 * We are in a hypervisor context if the vcpu mode is EL2 or
217 * E2H and TGE bits are set. The latter means we are in the user space
218 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
219 *
220 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
221 * rest of the KVM code, and will result in a misbehaving guest.
222 */
223 return vcpu_is_el2_ctxt(ctxt) ||
224 (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
225 __vcpu_el2_tge_is_set(ctxt);
226 }
227
is_hyp_ctxt(const struct kvm_vcpu * vcpu)228 static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
229 {
230 return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
231 }
232
233 #define PURE_EL2_SYSREG(el2) \
234 case el2: { \
235 *el1r = el2; \
236 return true; \
237 }
238
239 #define MAPPED_EL2_SYSREG(el2, el1, fn) \
240 case el2: { \
241 *xlate = fn; \
242 *el1r = el1; \
243 return true; \
244 }
245
get_el2_to_el1_mapping(unsigned int reg,unsigned int * el1r,u64 (** xlate)(u64))246 static inline bool get_el2_to_el1_mapping(unsigned int reg,
247 unsigned int *el1r, u64 (**xlate)(u64))
248 {
249 switch (reg) {
250 PURE_EL2_SYSREG( VPIDR_EL2 );
251 PURE_EL2_SYSREG( VMPIDR_EL2 );
252 PURE_EL2_SYSREG( ACTLR_EL2 );
253 PURE_EL2_SYSREG( HCR_EL2 );
254 PURE_EL2_SYSREG( MDCR_EL2 );
255 PURE_EL2_SYSREG( HSTR_EL2 );
256 PURE_EL2_SYSREG( HACR_EL2 );
257 PURE_EL2_SYSREG( VTTBR_EL2 );
258 PURE_EL2_SYSREG( VTCR_EL2 );
259 PURE_EL2_SYSREG( RVBAR_EL2 );
260 PURE_EL2_SYSREG( TPIDR_EL2 );
261 PURE_EL2_SYSREG( HPFAR_EL2 );
262 PURE_EL2_SYSREG( CNTHCTL_EL2 );
263 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
264 translate_sctlr_el2_to_sctlr_el1 );
265 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
266 translate_cptr_el2_to_cpacr_el1 );
267 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
268 translate_ttbr0_el2_to_ttbr0_el1 );
269 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
270 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
271 translate_tcr_el2_to_tcr_el1 );
272 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
273 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
274 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
275 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
276 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
277 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
278 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
279 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
280 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
281 MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
282 default:
283 return false;
284 }
285 }
286
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,int reg)287 static inline u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
288 {
289 u64 val = 0x8badf00d8badf00d;
290 u64 (*xlate)(u64) = NULL;
291 unsigned int el1r;
292
293 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
294 goto memory_read;
295
296 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
297 if (!is_hyp_ctxt(vcpu))
298 goto memory_read;
299
300 /*
301 * If this register does not have an EL1 counterpart,
302 * then read the stored EL2 version.
303 */
304 if (reg == el1r)
305 goto memory_read;
306
307 /*
308 * If we have a non-VHE guest and that the sysreg
309 * requires translation to be used at EL1, use the
310 * in-memory copy instead.
311 */
312 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
313 goto memory_read;
314
315 /* Get the current version of the EL1 counterpart. */
316 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
317 return val;
318 }
319
320 /* EL1 register can't be on the CPU if the guest is in vEL2. */
321 if (unlikely(is_hyp_ctxt(vcpu)))
322 goto memory_read;
323
324 if (__vcpu_read_sys_reg_from_cpu(reg, &val))
325 return val;
326
327 memory_read:
328 return __vcpu_sys_reg(vcpu, reg);
329 }
330
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)331 static inline void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
332 {
333 u64 (*xlate)(u64) = NULL;
334 unsigned int el1r;
335
336 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
337 goto memory_write;
338
339 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
340 if (!is_hyp_ctxt(vcpu))
341 goto memory_write;
342
343 /*
344 * Always store a copy of the write to memory to avoid having
345 * to reverse-translate virtual EL2 system registers for a
346 * non-VHE guest hypervisor.
347 */
348 __vcpu_sys_reg(vcpu, reg) = val;
349
350 /* No EL1 counterpart? We're done here.? */
351 if (reg == el1r)
352 return;
353
354 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
355 val = xlate(val);
356
357 /* Redirect this to the EL1 version of the register. */
358 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
359 return;
360 }
361
362 /* EL1 register can't be on the CPU if the guest is in vEL2. */
363 if (unlikely(is_hyp_ctxt(vcpu)))
364 goto memory_write;
365
366 if (__vcpu_write_sys_reg_to_cpu(val, reg))
367 return;
368
369 memory_write:
370 __vcpu_sys_reg(vcpu, reg) = val;
371 }
372
373 /*
374 * The layout of SPSR for an AArch32 state is different when observed from an
375 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
376 * view given an AArch64 view.
377 *
378 * In ARM DDI 0487E.a see:
379 *
380 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
381 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
382 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
383 *
384 * Which show the following differences:
385 *
386 * | Bit | AA64 | AA32 | Notes |
387 * +-----+------+------+-----------------------------|
388 * | 24 | DIT | J | J is RES0 in ARMv8 |
389 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
390 *
391 * ... and all other bits are (currently) common.
392 */
host_spsr_to_spsr32(unsigned long spsr)393 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
394 {
395 const unsigned long overlap = BIT(24) | BIT(21);
396 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
397
398 spsr &= ~overlap;
399
400 spsr |= dit << 21;
401
402 return spsr;
403 }
404
vcpu_mode_priv(const struct kvm_vcpu * vcpu)405 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
406 {
407 u32 mode;
408
409 if (vcpu_mode_is_32bit(vcpu)) {
410 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
411 return mode > PSR_AA32_MODE_USR;
412 }
413
414 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
415
416 return mode != PSR_MODE_EL0t;
417 }
418
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)419 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
420 {
421 return vcpu->arch.fault.esr_el2;
422 }
423
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)424 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
425 {
426 u64 esr = kvm_vcpu_get_esr(vcpu);
427
428 if (esr & ESR_ELx_CV)
429 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
430
431 return -1;
432 }
433
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)434 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
435 {
436 return vcpu->arch.fault.far_el2;
437 }
438
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)439 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
440 {
441 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
442 }
443
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)444 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
445 {
446 return vcpu->arch.fault.disr_el1;
447 }
448
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)449 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
450 {
451 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
452 }
453
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)454 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
455 {
456 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
457 }
458
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)459 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
460 {
461 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
462 }
463
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)464 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
465 {
466 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
467 }
468
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)469 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
470 {
471 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
472 }
473
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)474 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
475 {
476 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
477 }
478
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)479 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
480 {
481 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
482 }
483
484 /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)485 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
486 {
487 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
488 }
489
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)490 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
491 {
492 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
493 }
494
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)495 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
496 {
497 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
498 }
499
500 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)501 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
502 {
503 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
504 }
505
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)506 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
507 {
508 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
509 }
510
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)511 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
512 {
513 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
514 }
515
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)516 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
517 {
518 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
519 }
520
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)521 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
522 {
523 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
524 }
525
526 static inline
kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu * vcpu)527 bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
528 {
529 return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
530 }
531
532 static inline
kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu * vcpu)533 bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
534 {
535 return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
536 }
537
538 static inline
kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu * vcpu)539 u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
540 {
541 unsigned long esr = kvm_vcpu_get_esr(vcpu);
542
543 BUG_ON(!esr_fsc_is_permission_fault(esr));
544 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
545 }
546
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)547 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
548 {
549 switch (kvm_vcpu_trap_get_fault(vcpu)) {
550 case ESR_ELx_FSC_EXTABT:
551 case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
552 case ESR_ELx_FSC_SECC:
553 case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
554 return true;
555 default:
556 return false;
557 }
558 }
559
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)560 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
561 {
562 u64 esr = kvm_vcpu_get_esr(vcpu);
563 return ESR_ELx_SYS64_ISS_RT(esr);
564 }
565
kvm_is_write_fault(struct kvm_vcpu * vcpu)566 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
567 {
568 if (kvm_vcpu_abt_iss1tw(vcpu)) {
569 /*
570 * Only a permission fault on a S1PTW should be
571 * considered as a write. Otherwise, page tables baked
572 * in a read-only memslot will result in an exception
573 * being delivered in the guest.
574 *
575 * The drawback is that we end-up faulting twice if the
576 * guest is using any of HW AF/DB: a translation fault
577 * to map the page containing the PT (read only at
578 * first), then a permission fault to allow the flags
579 * to be set.
580 */
581 return kvm_vcpu_trap_is_permission_fault(vcpu);
582 }
583
584 if (kvm_vcpu_trap_is_iabt(vcpu))
585 return false;
586
587 return kvm_vcpu_dabt_iswrite(vcpu);
588 }
589
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)590 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
591 {
592 return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
593 }
594
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)595 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
596 {
597 if (vcpu_mode_is_32bit(vcpu)) {
598 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
599 } else {
600 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
601 sctlr |= SCTLR_ELx_EE;
602 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
603 }
604 }
605
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)606 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
607 {
608 if (vcpu_mode_is_32bit(vcpu))
609 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
610
611 if (vcpu_mode_priv(vcpu))
612 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
613 else
614 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
615 }
616
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)617 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
618 unsigned long data,
619 unsigned int len)
620 {
621 if (kvm_vcpu_is_be(vcpu)) {
622 switch (len) {
623 case 1:
624 return data & 0xff;
625 case 2:
626 return be16_to_cpu(data & 0xffff);
627 case 4:
628 return be32_to_cpu(data & 0xffffffff);
629 default:
630 return be64_to_cpu(data);
631 }
632 } else {
633 switch (len) {
634 case 1:
635 return data & 0xff;
636 case 2:
637 return le16_to_cpu(data & 0xffff);
638 case 4:
639 return le32_to_cpu(data & 0xffffffff);
640 default:
641 return le64_to_cpu(data);
642 }
643 }
644
645 return data; /* Leave LE untouched */
646 }
647
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)648 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
649 unsigned long data,
650 unsigned int len)
651 {
652 if (kvm_vcpu_is_be(vcpu)) {
653 switch (len) {
654 case 1:
655 return data & 0xff;
656 case 2:
657 return cpu_to_be16(data & 0xffff);
658 case 4:
659 return cpu_to_be32(data & 0xffffffff);
660 default:
661 return cpu_to_be64(data);
662 }
663 } else {
664 switch (len) {
665 case 1:
666 return data & 0xff;
667 case 2:
668 return cpu_to_le16(data & 0xffff);
669 case 4:
670 return cpu_to_le32(data & 0xffffffff);
671 default:
672 return cpu_to_le64(data);
673 }
674 }
675
676 return data; /* Leave LE untouched */
677 }
678
kvm_incr_pc(struct kvm_vcpu * vcpu)679 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
680 {
681 WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
682 vcpu_set_flag(vcpu, INCREMENT_PC);
683 }
684
685 #define kvm_pend_exception(v, e) \
686 do { \
687 WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
688 vcpu_set_flag((v), PENDING_EXCEPTION); \
689 vcpu_set_flag((v), e); \
690 } while (0)
691
692 #define __build_check_all_or_none(r, bits) \
693 BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
694
695 #define __cpacr_to_cptr_clr(clr, set) \
696 ({ \
697 u64 cptr = 0; \
698 \
699 if ((set) & CPACR_ELx_FPEN) \
700 cptr |= CPTR_EL2_TFP; \
701 if ((set) & CPACR_ELx_ZEN) \
702 cptr |= CPTR_EL2_TZ; \
703 if ((set) & CPACR_ELx_SMEN) \
704 cptr |= CPTR_EL2_TSM; \
705 if ((clr) & CPACR_ELx_TTA) \
706 cptr |= CPTR_EL2_TTA; \
707 if ((clr) & CPTR_EL2_TAM) \
708 cptr |= CPTR_EL2_TAM; \
709 if ((clr) & CPTR_EL2_TCPAC) \
710 cptr |= CPTR_EL2_TCPAC; \
711 \
712 cptr; \
713 })
714
715 #define __cpacr_to_cptr_set(clr, set) \
716 ({ \
717 u64 cptr = 0; \
718 \
719 if ((clr) & CPACR_ELx_FPEN) \
720 cptr |= CPTR_EL2_TFP; \
721 if ((clr) & CPACR_ELx_ZEN) \
722 cptr |= CPTR_EL2_TZ; \
723 if ((clr) & CPACR_ELx_SMEN) \
724 cptr |= CPTR_EL2_TSM; \
725 if ((set) & CPACR_ELx_TTA) \
726 cptr |= CPTR_EL2_TTA; \
727 if ((set) & CPTR_EL2_TAM) \
728 cptr |= CPTR_EL2_TAM; \
729 if ((set) & CPTR_EL2_TCPAC) \
730 cptr |= CPTR_EL2_TCPAC; \
731 \
732 cptr; \
733 })
734
735 #define cpacr_clear_set(clr, set) \
736 do { \
737 BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
738 BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
739 __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
740 __build_check_all_or_none((set), CPACR_ELx_FPEN); \
741 __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
742 __build_check_all_or_none((set), CPACR_ELx_ZEN); \
743 __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
744 __build_check_all_or_none((set), CPACR_ELx_SMEN); \
745 \
746 if (has_vhe() || has_hvhe()) \
747 sysreg_clear_set(cpacr_el1, clr, set); \
748 else \
749 sysreg_clear_set(cptr_el2, \
750 __cpacr_to_cptr_clr(clr, set), \
751 __cpacr_to_cptr_set(clr, set));\
752 } while (0)
753
kvm_write_cptr_el2(u64 val)754 static __always_inline void kvm_write_cptr_el2(u64 val)
755 {
756 if (has_vhe() || has_hvhe())
757 write_sysreg(val, cpacr_el1);
758 else
759 write_sysreg(val, cptr_el2);
760 }
761
762 /* Resets the value of cptr_el2 when returning to the host. */
__kvm_reset_cptr_el2(struct kvm * kvm)763 static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
764 {
765 u64 val;
766
767 if (has_vhe()) {
768 val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
769 if (cpus_have_final_cap(ARM64_SME))
770 val |= CPACR_EL1_SMEN_EL1EN;
771 } else if (has_hvhe()) {
772 val = CPACR_ELx_FPEN;
773
774 if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
775 val |= CPACR_ELx_ZEN;
776 if (cpus_have_final_cap(ARM64_SME))
777 val |= CPACR_ELx_SMEN;
778 } else {
779 val = CPTR_NVHE_EL2_RES1;
780
781 if (kvm_has_sve(kvm) && guest_owns_fp_regs())
782 val |= CPTR_EL2_TZ;
783 if (!cpus_have_final_cap(ARM64_SME))
784 val |= CPTR_EL2_TSM;
785 }
786
787 kvm_write_cptr_el2(val);
788 }
789
790 #ifdef __KVM_NVHE_HYPERVISOR__
791 #define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
792 #else
793 #define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm)
794 #endif
795
796 /*
797 * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
798 * format if E2H isn't set.
799 */
vcpu_sanitised_cptr_el2(const struct kvm_vcpu * vcpu)800 static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
801 {
802 u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
803
804 if (!vcpu_el2_e2h_is_set(vcpu))
805 cptr = translate_cptr_el2_to_cpacr_el1(cptr);
806
807 return cptr;
808 }
809
____cptr_xen_trap_enabled(const struct kvm_vcpu * vcpu,unsigned int xen)810 static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
811 unsigned int xen)
812 {
813 switch (xen) {
814 case 0b00:
815 case 0b10:
816 return true;
817 case 0b01:
818 return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
819 case 0b11:
820 default:
821 return false;
822 }
823 }
824
825 #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
826 (!vcpu_has_nv(vcpu) ? false : \
827 ____cptr_xen_trap_enabled(vcpu, \
828 SYS_FIELD_GET(CPACR_ELx, xen, \
829 vcpu_sanitised_cptr_el2(vcpu))))
830
guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu * vcpu)831 static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
832 {
833 return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
834 }
835
guest_hyp_sve_traps_enabled(const struct kvm_vcpu * vcpu)836 static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
837 {
838 return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
839 }
840
841 /* Reset a vcpu's core registers. */
kvm_reset_vcpu_core(struct kvm_vcpu * vcpu)842 static inline void kvm_reset_vcpu_core(struct kvm_vcpu *vcpu)
843 {
844 u32 pstate;
845
846 if (vcpu_el1_is_32bit(vcpu)) {
847 pstate = VCPU_RESET_PSTATE_SVC;
848 } else if (vcpu_has_nv(vcpu)) {
849 pstate = VCPU_RESET_PSTATE_EL2;
850 } else {
851 pstate = VCPU_RESET_PSTATE_EL1;
852 }
853
854 /* Reset core registers */
855 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
856 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
857 vcpu->arch.ctxt.spsr_abt = 0;
858 vcpu->arch.ctxt.spsr_und = 0;
859 vcpu->arch.ctxt.spsr_irq = 0;
860 vcpu->arch.ctxt.spsr_fiq = 0;
861 vcpu_gp_regs(vcpu)->pstate = pstate;
862 }
863
864 /* PSCI reset handling for a vcpu. */
kvm_reset_vcpu_psci(struct kvm_vcpu * vcpu,struct vcpu_reset_state * reset_state)865 static inline void kvm_reset_vcpu_psci(struct kvm_vcpu *vcpu,
866 struct vcpu_reset_state *reset_state)
867 {
868 unsigned long target_pc = reset_state->pc;
869
870 /* Gracefully handle Thumb2 entry point */
871 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
872 target_pc &= ~1UL;
873 vcpu_set_thumb(vcpu);
874 }
875
876 /* Propagate caller endianness */
877 if (reset_state->be)
878 kvm_vcpu_set_be(vcpu);
879
880 *vcpu_pc(vcpu) = target_pc;
881 vcpu_set_reg(vcpu, 0, reset_state->r0);
882 }
883
884 #endif /* __ARM64_KVM_EMULATE_H__ */
885