1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23
24 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
25 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
27
28 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
29 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
30
31 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
33 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
34 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
35 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
36 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
37 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
38
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)39 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
40 {
41 return !(vcpu->arch.hcr_el2 & HCR_RW);
42 }
43
vcpu_reset_hcr(struct kvm_vcpu * vcpu)44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 {
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47 if (is_kernel_in_hyp_mode())
48 vcpu->arch.hcr_el2 |= HCR_E2H;
49 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50 /* route synchronous external abort exceptions to EL2 */
51 vcpu->arch.hcr_el2 |= HCR_TEA;
52 /* trap error record accesses */
53 vcpu->arch.hcr_el2 |= HCR_TERR;
54 }
55
56 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
57 vcpu->arch.hcr_el2 |= HCR_FWB;
58 } else {
59 /*
60 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
61 * get set in SCTLR_EL1 such that we can detect when the guest
62 * MMU gets turned on and do the necessary cache maintenance
63 * then.
64 */
65 vcpu->arch.hcr_el2 |= HCR_TVM;
66 }
67
68 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69 vcpu->arch.hcr_el2 &= ~HCR_RW;
70
71 /*
72 * TID3: trap feature register accesses that we virtualise.
73 * For now this is conditional, since no AArch32 feature regs
74 * are currently virtualised.
75 */
76 if (!vcpu_el1_is_32bit(vcpu))
77 vcpu->arch.hcr_el2 |= HCR_TID3;
78
79 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80 vcpu_el1_is_32bit(vcpu))
81 vcpu->arch.hcr_el2 |= HCR_TID2;
82 }
83
vcpu_hcr(struct kvm_vcpu * vcpu)84 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
85 {
86 return (unsigned long *)&vcpu->arch.hcr_el2;
87 }
88
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)89 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
90 {
91 vcpu->arch.hcr_el2 &= ~HCR_TWE;
92 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
93 vcpu->kvm->arch.vgic.nassgireq)
94 vcpu->arch.hcr_el2 &= ~HCR_TWI;
95 else
96 vcpu->arch.hcr_el2 |= HCR_TWI;
97 }
98
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)99 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
100 {
101 vcpu->arch.hcr_el2 |= HCR_TWE;
102 vcpu->arch.hcr_el2 |= HCR_TWI;
103 }
104
vcpu_ptrauth_enable(struct kvm_vcpu * vcpu)105 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
106 {
107 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
108 }
109
vcpu_ptrauth_disable(struct kvm_vcpu * vcpu)110 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
111 {
112 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113 }
114
vcpu_get_vsesr(struct kvm_vcpu * vcpu)115 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116 {
117 return vcpu->arch.vsesr_el2;
118 }
119
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)120 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
121 {
122 vcpu->arch.vsesr_el2 = vsesr;
123 }
124
vcpu_pc(const struct kvm_vcpu * vcpu)125 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
126 {
127 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
128 }
129
vcpu_cpsr(const struct kvm_vcpu * vcpu)130 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
131 {
132 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
133 }
134
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)135 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
136 {
137 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
138 }
139
kvm_condition_valid(const struct kvm_vcpu * vcpu)140 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
141 {
142 if (vcpu_mode_is_32bit(vcpu))
143 return kvm_condition_valid32(vcpu);
144
145 return true;
146 }
147
vcpu_set_thumb(struct kvm_vcpu * vcpu)148 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
149 {
150 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
151 }
152
153 /*
154 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156 * AArch32 with banked registers.
157 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)158 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
159 u8 reg_num)
160 {
161 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
162 }
163
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)164 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
165 unsigned long val)
166 {
167 if (reg_num != 31)
168 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
169 }
170
vcpu_read_spsr(const struct kvm_vcpu * vcpu)171 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
172 {
173 if (vcpu_mode_is_32bit(vcpu))
174 return vcpu_read_spsr32(vcpu);
175
176 if (vcpu->arch.sysregs_loaded_on_cpu)
177 return read_sysreg_el1(SYS_SPSR);
178 else
179 return __vcpu_sys_reg(vcpu, SPSR_EL1);
180 }
181
vcpu_write_spsr(struct kvm_vcpu * vcpu,unsigned long v)182 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
183 {
184 if (vcpu_mode_is_32bit(vcpu)) {
185 vcpu_write_spsr32(vcpu, v);
186 return;
187 }
188
189 if (vcpu->arch.sysregs_loaded_on_cpu)
190 write_sysreg_el1(v, SYS_SPSR);
191 else
192 __vcpu_sys_reg(vcpu, SPSR_EL1) = v;
193 }
194
195 /*
196 * The layout of SPSR for an AArch32 state is different when observed from an
197 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
198 * view given an AArch64 view.
199 *
200 * In ARM DDI 0487E.a see:
201 *
202 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
203 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
204 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
205 *
206 * Which show the following differences:
207 *
208 * | Bit | AA64 | AA32 | Notes |
209 * +-----+------+------+-----------------------------|
210 * | 24 | DIT | J | J is RES0 in ARMv8 |
211 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
212 *
213 * ... and all other bits are (currently) common.
214 */
host_spsr_to_spsr32(unsigned long spsr)215 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
216 {
217 const unsigned long overlap = BIT(24) | BIT(21);
218 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
219
220 spsr &= ~overlap;
221
222 spsr |= dit << 21;
223
224 return spsr;
225 }
226
vcpu_mode_priv(const struct kvm_vcpu * vcpu)227 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
228 {
229 u32 mode;
230
231 if (vcpu_mode_is_32bit(vcpu)) {
232 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
233 return mode > PSR_AA32_MODE_USR;
234 }
235
236 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
237
238 return mode != PSR_MODE_EL0t;
239 }
240
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)241 static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
242 {
243 return vcpu->arch.fault.esr_el2;
244 }
245
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)246 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
247 {
248 u32 esr = kvm_vcpu_get_esr(vcpu);
249
250 if (esr & ESR_ELx_CV)
251 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
252
253 return -1;
254 }
255
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)256 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
257 {
258 return vcpu->arch.fault.far_el2;
259 }
260
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)261 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
262 {
263 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
264 }
265
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)266 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
267 {
268 return vcpu->arch.fault.disr_el1;
269 }
270
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)271 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
272 {
273 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
274 }
275
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)276 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
277 {
278 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
279 }
280
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)281 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
282 {
283 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
284 }
285
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)286 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
287 {
288 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
289 }
290
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)291 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
292 {
293 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
294 }
295
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)296 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
297 {
298 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
299 }
300
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)301 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
302 {
303 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
304 }
305
306 /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)307 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
308 {
309 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
310 }
311
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)312 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
313 {
314 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
315 }
316
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)317 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
318 {
319 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
320 }
321
322 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)323 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
324 {
325 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
326 }
327
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)328 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
329 {
330 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
331 }
332
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)333 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
334 {
335 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
336 }
337
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)338 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
339 {
340 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
341 }
342
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)343 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
344 {
345 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
346 }
347
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)348 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
349 {
350 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
351 }
352
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu * vcpu)353 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
354 {
355 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
356 }
357
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)358 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
359 {
360 switch (kvm_vcpu_trap_get_fault(vcpu)) {
361 case FSC_SEA:
362 case FSC_SEA_TTW0:
363 case FSC_SEA_TTW1:
364 case FSC_SEA_TTW2:
365 case FSC_SEA_TTW3:
366 case FSC_SECC:
367 case FSC_SECC_TTW0:
368 case FSC_SECC_TTW1:
369 case FSC_SECC_TTW2:
370 case FSC_SECC_TTW3:
371 return true;
372 default:
373 return false;
374 }
375 }
376
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)377 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
378 {
379 u32 esr = kvm_vcpu_get_esr(vcpu);
380 return ESR_ELx_SYS64_ISS_RT(esr);
381 }
382
kvm_is_write_fault(struct kvm_vcpu * vcpu)383 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
384 {
385 if (kvm_vcpu_abt_iss1tw(vcpu))
386 return true;
387
388 if (kvm_vcpu_trap_is_iabt(vcpu))
389 return false;
390
391 return kvm_vcpu_dabt_iswrite(vcpu);
392 }
393
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)394 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
395 {
396 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
397 }
398
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)399 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
400 {
401 if (vcpu_mode_is_32bit(vcpu)) {
402 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
403 } else {
404 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
405 sctlr |= (1 << 25);
406 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
407 }
408 }
409
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)410 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
411 {
412 if (vcpu_mode_is_32bit(vcpu))
413 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
414
415 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
416 }
417
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)418 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
419 unsigned long data,
420 unsigned int len)
421 {
422 if (kvm_vcpu_is_be(vcpu)) {
423 switch (len) {
424 case 1:
425 return data & 0xff;
426 case 2:
427 return be16_to_cpu(data & 0xffff);
428 case 4:
429 return be32_to_cpu(data & 0xffffffff);
430 default:
431 return be64_to_cpu(data);
432 }
433 } else {
434 switch (len) {
435 case 1:
436 return data & 0xff;
437 case 2:
438 return le16_to_cpu(data & 0xffff);
439 case 4:
440 return le32_to_cpu(data & 0xffffffff);
441 default:
442 return le64_to_cpu(data);
443 }
444 }
445
446 return data; /* Leave LE untouched */
447 }
448
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)449 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
450 unsigned long data,
451 unsigned int len)
452 {
453 if (kvm_vcpu_is_be(vcpu)) {
454 switch (len) {
455 case 1:
456 return data & 0xff;
457 case 2:
458 return cpu_to_be16(data & 0xffff);
459 case 4:
460 return cpu_to_be32(data & 0xffffffff);
461 default:
462 return cpu_to_be64(data);
463 }
464 } else {
465 switch (len) {
466 case 1:
467 return data & 0xff;
468 case 2:
469 return cpu_to_le16(data & 0xffff);
470 case 4:
471 return cpu_to_le32(data & 0xffffffff);
472 default:
473 return cpu_to_le64(data);
474 }
475 }
476
477 return data; /* Leave LE untouched */
478 }
479
kvm_skip_instr(struct kvm_vcpu * vcpu,bool is_wide_instr)480 static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
481 {
482 if (vcpu_mode_is_32bit(vcpu)) {
483 kvm_skip_instr32(vcpu, is_wide_instr);
484 } else {
485 *vcpu_pc(vcpu) += 4;
486 *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
487 }
488
489 /* advance the singlestep state machine */
490 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
491 }
492
493 /*
494 * Skip an instruction which has been emulated at hyp while most guest sysregs
495 * are live.
496 */
__kvm_skip_instr(struct kvm_vcpu * vcpu)497 static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
498 {
499 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
500 vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
501
502 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
503
504 write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
505 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
506 }
507
vcpu_has_feature(struct kvm_vcpu * vcpu,int feature)508 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
509 {
510 return test_bit(feature, vcpu->arch.features);
511 }
512
513 #endif /* __ARM64_KVM_EMULATE_H__ */
514