1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/kvm_nested.h>
21 #include <asm/ptrace.h>
22 #include <asm/cputype.h>
23 #include <asm/virt.h>
24
25 #define CURRENT_EL_SP_EL0_VECTOR 0x0
26 #define CURRENT_EL_SP_ELx_VECTOR 0x200
27 #define LOWER_EL_AArch64_VECTOR 0x400
28 #define LOWER_EL_AArch32_VECTOR 0x600
29
30 enum exception_type {
31 except_type_sync = 0,
32 except_type_irq = 0x80,
33 except_type_fiq = 0x100,
34 except_type_serror = 0x180,
35 };
36
37 #define kvm_exception_type_names \
38 { except_type_sync, "SYNC" }, \
39 { except_type_irq, "IRQ" }, \
40 { except_type_fiq, "FIQ" }, \
41 { except_type_serror, "SERROR" }
42
43 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
44 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
45
46 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
47 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
48 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
49 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
50 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
51
52 unsigned long get_except64_offset(unsigned long psr, unsigned long target_mode,
53 enum exception_type type);
54 unsigned long get_except64_cpsr(unsigned long old, bool has_mte,
55 unsigned long sctlr, unsigned long mode);
56
57 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
58
59 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
60 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
61 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
62
63 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)64 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
65 {
66 return !(vcpu->arch.hcr_el2 & HCR_RW);
67 }
68 #else
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)69 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
70 {
71 return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
72 }
73 #endif
74
vcpu_reset_hcr(struct kvm_vcpu * vcpu)75 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
76 {
77 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
78 if (has_vhe() || has_hvhe())
79 vcpu->arch.hcr_el2 |= HCR_E2H;
80 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
81 /* route synchronous external abort exceptions to EL2 */
82 vcpu->arch.hcr_el2 |= HCR_TEA;
83 /* trap error record accesses */
84 vcpu->arch.hcr_el2 |= HCR_TERR;
85 }
86
87 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
88 vcpu->arch.hcr_el2 |= HCR_FWB;
89 } else {
90 /*
91 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
92 * get set in SCTLR_EL1 such that we can detect when the guest
93 * MMU gets turned on and do the necessary cache maintenance
94 * then.
95 */
96 vcpu->arch.hcr_el2 |= HCR_TVM;
97 }
98
99 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
100 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
101 vcpu->arch.hcr_el2 |= HCR_TID4;
102 else
103 vcpu->arch.hcr_el2 |= HCR_TID2;
104
105 if (vcpu_el1_is_32bit(vcpu))
106 vcpu->arch.hcr_el2 &= ~HCR_RW;
107
108 if (kvm_has_mte(vcpu->kvm))
109 vcpu->arch.hcr_el2 |= HCR_ATA;
110 }
111
vcpu_hcr(struct kvm_vcpu * vcpu)112 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
113 {
114 return (unsigned long *)&vcpu->arch.hcr_el2;
115 }
116
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)117 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
118 {
119 vcpu->arch.hcr_el2 &= ~HCR_TWE;
120 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
121 vcpu->kvm->arch.vgic.nassgireq)
122 vcpu->arch.hcr_el2 &= ~HCR_TWI;
123 else
124 vcpu->arch.hcr_el2 |= HCR_TWI;
125 }
126
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)127 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
128 {
129 vcpu->arch.hcr_el2 |= HCR_TWE;
130 vcpu->arch.hcr_el2 |= HCR_TWI;
131 }
132
vcpu_ptrauth_enable(struct kvm_vcpu * vcpu)133 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
134 {
135 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
136 }
137
vcpu_ptrauth_disable(struct kvm_vcpu * vcpu)138 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
139 {
140 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
141 }
142
vcpu_get_vsesr(struct kvm_vcpu * vcpu)143 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
144 {
145 return vcpu->arch.vsesr_el2;
146 }
147
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)148 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
149 {
150 vcpu->arch.vsesr_el2 = vsesr;
151 }
152
vcpu_pc(const struct kvm_vcpu * vcpu)153 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
154 {
155 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
156 }
157
vcpu_cpsr(const struct kvm_vcpu * vcpu)158 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
159 {
160 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
161 }
162
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)163 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
164 {
165 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
166 }
167
kvm_condition_valid(const struct kvm_vcpu * vcpu)168 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
169 {
170 if (vcpu_mode_is_32bit(vcpu))
171 return kvm_condition_valid32(vcpu);
172
173 return true;
174 }
175
vcpu_set_thumb(struct kvm_vcpu * vcpu)176 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
177 {
178 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
179 }
180
181 /*
182 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
183 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
184 * AArch32 with banked registers.
185 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)186 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
187 u8 reg_num)
188 {
189 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
190 }
191
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)192 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
193 unsigned long val)
194 {
195 if (reg_num != 31)
196 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
197 }
198
vcpu_is_el2_ctxt(const struct kvm_cpu_context * ctxt)199 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
200 {
201 switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
202 case PSR_MODE_EL2h:
203 case PSR_MODE_EL2t:
204 return true;
205 default:
206 return false;
207 }
208 }
209
vcpu_is_el2(const struct kvm_vcpu * vcpu)210 static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
211 {
212 return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
213 }
214
__vcpu_el2_e2h_is_set(const struct kvm_cpu_context * ctxt)215 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
216 {
217 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
218 }
219
vcpu_el2_e2h_is_set(const struct kvm_vcpu * vcpu)220 static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
221 {
222 return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
223 }
224
__vcpu_el2_tge_is_set(const struct kvm_cpu_context * ctxt)225 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
226 {
227 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
228 }
229
vcpu_el2_tge_is_set(const struct kvm_vcpu * vcpu)230 static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
231 {
232 return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
233 }
234
__is_hyp_ctxt(const struct kvm_cpu_context * ctxt)235 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
236 {
237 /*
238 * We are in a hypervisor context if the vcpu mode is EL2 or
239 * E2H and TGE bits are set. The latter means we are in the user space
240 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
241 *
242 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
243 * rest of the KVM code, and will result in a misbehaving guest.
244 */
245 return vcpu_is_el2_ctxt(ctxt) ||
246 (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
247 __vcpu_el2_tge_is_set(ctxt);
248 }
249
is_hyp_ctxt(const struct kvm_vcpu * vcpu)250 static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
251 {
252 return __is_hyp_ctxt(&vcpu->arch.ctxt);
253 }
254
255 /*
256 * The layout of SPSR for an AArch32 state is different when observed from an
257 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
258 * view given an AArch64 view.
259 *
260 * In ARM DDI 0487E.a see:
261 *
262 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
263 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
264 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
265 *
266 * Which show the following differences:
267 *
268 * | Bit | AA64 | AA32 | Notes |
269 * +-----+------+------+-----------------------------|
270 * | 24 | DIT | J | J is RES0 in ARMv8 |
271 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
272 *
273 * ... and all other bits are (currently) common.
274 */
host_spsr_to_spsr32(unsigned long spsr)275 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
276 {
277 const unsigned long overlap = BIT(24) | BIT(21);
278 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
279
280 spsr &= ~overlap;
281
282 spsr |= dit << 21;
283
284 return spsr;
285 }
286
vcpu_mode_priv(const struct kvm_vcpu * vcpu)287 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
288 {
289 u32 mode;
290
291 if (vcpu_mode_is_32bit(vcpu)) {
292 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
293 return mode > PSR_AA32_MODE_USR;
294 }
295
296 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
297
298 return mode != PSR_MODE_EL0t;
299 }
300
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)301 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
302 {
303 return vcpu->arch.fault.esr_el2;
304 }
305
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)306 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
307 {
308 u64 esr = kvm_vcpu_get_esr(vcpu);
309
310 if (esr & ESR_ELx_CV)
311 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
312
313 return -1;
314 }
315
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)316 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
317 {
318 return vcpu->arch.fault.far_el2;
319 }
320
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)321 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
322 {
323 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
324 }
325
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)326 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
327 {
328 return vcpu->arch.fault.disr_el1;
329 }
330
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)331 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
332 {
333 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
334 }
335
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)336 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
337 {
338 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
339 }
340
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)341 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
342 {
343 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
344 }
345
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)346 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
347 {
348 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
349 }
350
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)351 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
352 {
353 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
354 }
355
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)356 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
357 {
358 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
359 }
360
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)361 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
362 {
363 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
364 }
365
366 /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)367 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
368 {
369 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
370 }
371
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)372 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
373 {
374 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
375 }
376
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)377 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
378 {
379 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
380 }
381
382 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)383 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
384 {
385 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
386 }
387
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)388 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
389 {
390 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
391 }
392
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)393 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
394 {
395 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
396 }
397
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)398 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
399 {
400 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
401 }
402
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)403 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
404 {
405 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
406 }
407
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)408 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
409 {
410 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
411 }
412
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu * vcpu)413 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
414 {
415 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
416 }
417
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)418 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
419 {
420 switch (kvm_vcpu_trap_get_fault(vcpu)) {
421 case ESR_ELx_FSC_EXTABT:
422 case ESR_ELx_FSC_SEA_TTW0:
423 case ESR_ELx_FSC_SEA_TTW1:
424 case ESR_ELx_FSC_SEA_TTW2:
425 case ESR_ELx_FSC_SEA_TTW3:
426 case ESR_ELx_FSC_SECC:
427 case ESR_ELx_FSC_SECC_TTW0:
428 case ESR_ELx_FSC_SECC_TTW1:
429 case ESR_ELx_FSC_SECC_TTW2:
430 case ESR_ELx_FSC_SECC_TTW3:
431 return true;
432 default:
433 return false;
434 }
435 }
436
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)437 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
438 {
439 u64 esr = kvm_vcpu_get_esr(vcpu);
440 return ESR_ELx_SYS64_ISS_RT(esr);
441 }
442
kvm_is_write_fault(struct kvm_vcpu * vcpu)443 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
444 {
445 if (kvm_vcpu_abt_iss1tw(vcpu)) {
446 /*
447 * Only a permission fault on a S1PTW should be
448 * considered as a write. Otherwise, page tables baked
449 * in a read-only memslot will result in an exception
450 * being delivered in the guest.
451 *
452 * The drawback is that we end-up faulting twice if the
453 * guest is using any of HW AF/DB: a translation fault
454 * to map the page containing the PT (read only at
455 * first), then a permission fault to allow the flags
456 * to be set.
457 */
458 switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
459 case ESR_ELx_FSC_PERM:
460 return true;
461 default:
462 return false;
463 }
464 }
465
466 if (kvm_vcpu_trap_is_iabt(vcpu))
467 return false;
468
469 return kvm_vcpu_dabt_iswrite(vcpu);
470 }
471
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)472 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
473 {
474 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
475 }
476
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)477 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
478 {
479 if (vcpu_mode_is_32bit(vcpu)) {
480 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
481 } else {
482 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
483 sctlr |= SCTLR_ELx_EE;
484 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
485 }
486 }
487
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)488 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
489 {
490 if (vcpu_mode_is_32bit(vcpu))
491 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
492
493 if (vcpu_mode_priv(vcpu))
494 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
495 else
496 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
497 }
498
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)499 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
500 unsigned long data,
501 unsigned int len)
502 {
503 if (kvm_vcpu_is_be(vcpu)) {
504 switch (len) {
505 case 1:
506 return data & 0xff;
507 case 2:
508 return be16_to_cpu(data & 0xffff);
509 case 4:
510 return be32_to_cpu(data & 0xffffffff);
511 default:
512 return be64_to_cpu(data);
513 }
514 } else {
515 switch (len) {
516 case 1:
517 return data & 0xff;
518 case 2:
519 return le16_to_cpu(data & 0xffff);
520 case 4:
521 return le32_to_cpu(data & 0xffffffff);
522 default:
523 return le64_to_cpu(data);
524 }
525 }
526
527 return data; /* Leave LE untouched */
528 }
529
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)530 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
531 unsigned long data,
532 unsigned int len)
533 {
534 if (kvm_vcpu_is_be(vcpu)) {
535 switch (len) {
536 case 1:
537 return data & 0xff;
538 case 2:
539 return cpu_to_be16(data & 0xffff);
540 case 4:
541 return cpu_to_be32(data & 0xffffffff);
542 default:
543 return cpu_to_be64(data);
544 }
545 } else {
546 switch (len) {
547 case 1:
548 return data & 0xff;
549 case 2:
550 return cpu_to_le16(data & 0xffff);
551 case 4:
552 return cpu_to_le32(data & 0xffffffff);
553 default:
554 return cpu_to_le64(data);
555 }
556 }
557
558 return data; /* Leave LE untouched */
559 }
560
kvm_incr_pc(struct kvm_vcpu * vcpu)561 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
562 {
563 WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
564 vcpu_set_flag(vcpu, INCREMENT_PC);
565 }
566
567 #define kvm_pend_exception(v, e) \
568 do { \
569 WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
570 vcpu_set_flag((v), PENDING_EXCEPTION); \
571 vcpu_set_flag((v), e); \
572 } while (0)
573
574
vcpu_has_feature(struct kvm_vcpu * vcpu,int feature)575 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
576 {
577 return test_bit(feature, vcpu->arch.features);
578 }
579
kvm_write_cptr_el2(u64 val)580 static __always_inline void kvm_write_cptr_el2(u64 val)
581 {
582 if (has_vhe() || has_hvhe())
583 write_sysreg(val, cpacr_el1);
584 else
585 write_sysreg(val, cptr_el2);
586 }
587
kvm_get_reset_cptr_el2(struct kvm_vcpu * vcpu)588 static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
589 {
590 u64 val;
591
592 if (has_vhe()) {
593 val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
594 CPACR_EL1_ZEN_EL1EN);
595 if (cpus_have_final_cap(ARM64_SME))
596 val |= CPACR_EL1_SMEN_EL1EN;
597 } else if (has_hvhe()) {
598 val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
599
600 if (!vcpu_has_sve(vcpu) ||
601 (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
602 val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
603 if (cpus_have_final_cap(ARM64_SME))
604 val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
605 } else {
606 val = CPTR_NVHE_EL2_RES1;
607
608 if (vcpu_has_sve(vcpu) &&
609 (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
610 val |= CPTR_EL2_TZ;
611 if (cpus_have_final_cap(ARM64_SME))
612 val &= ~CPTR_EL2_TSM;
613 }
614
615 return val;
616 }
617
kvm_reset_cptr_el2(struct kvm_vcpu * vcpu)618 static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
619 {
620 u64 val = kvm_get_reset_cptr_el2(vcpu);
621
622 kvm_write_cptr_el2(val);
623 }
624
kvm_vcpu_enable_ptrauth(struct kvm_vcpu * vcpu)625 static inline int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
626 {
627 /*
628 * For now make sure that both address/generic pointer authentication
629 * features are requested by the userspace together and the system
630 * supports these capabilities.
631 */
632 if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
633 !vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC) ||
634 !system_has_full_ptr_auth())
635 return -EINVAL;
636
637 vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
638 return 0;
639 }
640
641 /* Reset a vcpu's core registers. */
kvm_reset_vcpu_core(struct kvm_vcpu * vcpu)642 static inline void kvm_reset_vcpu_core(struct kvm_vcpu *vcpu)
643 {
644 u32 pstate;
645
646 if (vcpu_el1_is_32bit(vcpu)) {
647 pstate = VCPU_RESET_PSTATE_SVC;
648 } else if (vcpu_has_nv(vcpu)) {
649 pstate = VCPU_RESET_PSTATE_EL2;
650 } else {
651 pstate = VCPU_RESET_PSTATE_EL1;
652 }
653
654 /* Reset core registers */
655 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
656 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
657 vcpu->arch.ctxt.spsr_abt = 0;
658 vcpu->arch.ctxt.spsr_und = 0;
659 vcpu->arch.ctxt.spsr_irq = 0;
660 vcpu->arch.ctxt.spsr_fiq = 0;
661 vcpu_gp_regs(vcpu)->pstate = pstate;
662 }
663
664 /* PSCI reset handling for a vcpu. */
kvm_reset_vcpu_psci(struct kvm_vcpu * vcpu,struct vcpu_reset_state * reset_state)665 static inline void kvm_reset_vcpu_psci(struct kvm_vcpu *vcpu,
666 struct vcpu_reset_state *reset_state)
667 {
668 unsigned long target_pc = reset_state->pc;
669
670 /* Gracefully handle Thumb2 entry point */
671 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
672 target_pc &= ~1UL;
673 vcpu_set_thumb(vcpu);
674 }
675
676 /* Propagate caller endianness */
677 if (reset_state->be)
678 kvm_vcpu_set_be(vcpu);
679
680 *vcpu_pc(vcpu) = target_pc;
681 vcpu_set_reg(vcpu, 0, reset_state->r0);
682 }
683
684 #endif /* __ARM64_KVM_EMULATE_H__ */
685