1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23
24 #define CURRENT_EL_SP_EL0_VECTOR 0x0
25 #define CURRENT_EL_SP_ELx_VECTOR 0x200
26 #define LOWER_EL_AArch64_VECTOR 0x400
27 #define LOWER_EL_AArch32_VECTOR 0x600
28
29 enum exception_type {
30 except_type_sync = 0,
31 except_type_irq = 0x80,
32 except_type_fiq = 0x100,
33 except_type_serror = 0x180,
34 };
35
36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43
44 unsigned long get_except64_offset(unsigned long psr, unsigned long target_mode,
45 enum exception_type type);
46 unsigned long get_except64_cpsr(unsigned long old, bool has_mte,
47 unsigned long sctlr, unsigned long mode);
48
kvm_vcpu_enable_ptrauth(struct kvm_vcpu * vcpu)49 static inline int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
50 {
51 /*
52 * For now make sure that both address/generic pointer authentication
53 * features are requested by the userspace together and the system
54 * supports these capabilities.
55 */
56 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
57 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
58 !system_has_full_ptr_auth())
59 return -EINVAL;
60
61 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
62 return 0;
63 }
64
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)65 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
66 {
67 return !(vcpu->arch.hcr_el2 & HCR_RW);
68 }
69
vcpu_reset_hcr(struct kvm_vcpu * vcpu)70 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
71 {
72 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
73 if (is_kernel_in_hyp_mode())
74 vcpu->arch.hcr_el2 |= HCR_E2H;
75 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
76 /* route synchronous external abort exceptions to EL2 */
77 vcpu->arch.hcr_el2 |= HCR_TEA;
78 /* trap error record accesses */
79 vcpu->arch.hcr_el2 |= HCR_TERR;
80 }
81
82 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
83 vcpu->arch.hcr_el2 |= HCR_FWB;
84 } else {
85 /*
86 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
87 * get set in SCTLR_EL1 such that we can detect when the guest
88 * MMU gets turned on and do the necessary cache maintenance
89 * then.
90 */
91 vcpu->arch.hcr_el2 |= HCR_TVM;
92 }
93
94 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
95 vcpu->arch.hcr_el2 &= ~HCR_RW;
96
97 /*
98 * TID3: trap feature register accesses that we virtualise.
99 * For now this is conditional, since no AArch32 feature regs
100 * are currently virtualised.
101 */
102 if (!vcpu_el1_is_32bit(vcpu))
103 vcpu->arch.hcr_el2 |= HCR_TID3;
104
105 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
106 vcpu_el1_is_32bit(vcpu))
107 vcpu->arch.hcr_el2 |= HCR_TID2;
108
109 if (kvm_has_mte(vcpu->kvm))
110 vcpu->arch.hcr_el2 |= HCR_ATA;
111 }
112
vcpu_hcr(struct kvm_vcpu * vcpu)113 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
114 {
115 return (unsigned long *)&vcpu->arch.hcr_el2;
116 }
117
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)118 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
119 {
120 vcpu->arch.hcr_el2 &= ~HCR_TWE;
121 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
122 vcpu->kvm->arch.vgic.nassgireq)
123 vcpu->arch.hcr_el2 &= ~HCR_TWI;
124 else
125 vcpu->arch.hcr_el2 |= HCR_TWI;
126 }
127
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)128 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
129 {
130 vcpu->arch.hcr_el2 |= HCR_TWE;
131 vcpu->arch.hcr_el2 |= HCR_TWI;
132 }
133
vcpu_ptrauth_enable(struct kvm_vcpu * vcpu)134 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
135 {
136 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
137 }
138
vcpu_ptrauth_disable(struct kvm_vcpu * vcpu)139 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
140 {
141 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
142 }
143
vcpu_get_vsesr(struct kvm_vcpu * vcpu)144 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
145 {
146 return vcpu->arch.vsesr_el2;
147 }
148
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)149 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
150 {
151 vcpu->arch.vsesr_el2 = vsesr;
152 }
153
vcpu_pc(const struct kvm_vcpu * vcpu)154 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
155 {
156 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
157 }
158
vcpu_cpsr(const struct kvm_vcpu * vcpu)159 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
160 {
161 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
162 }
163
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)164 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
165 {
166 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
167 }
168
kvm_condition_valid(const struct kvm_vcpu * vcpu)169 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
170 {
171 if (vcpu_mode_is_32bit(vcpu))
172 return kvm_condition_valid32(vcpu);
173
174 return true;
175 }
176
vcpu_set_thumb(struct kvm_vcpu * vcpu)177 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
178 {
179 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
180 }
181
182 /*
183 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
184 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
185 * AArch32 with banked registers.
186 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)187 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
188 u8 reg_num)
189 {
190 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
191 }
192
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)193 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
194 unsigned long val)
195 {
196 if (reg_num != 31)
197 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
198 }
199
200 /*
201 * The layout of SPSR for an AArch32 state is different when observed from an
202 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
203 * view given an AArch64 view.
204 *
205 * In ARM DDI 0487E.a see:
206 *
207 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
208 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
209 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
210 *
211 * Which show the following differences:
212 *
213 * | Bit | AA64 | AA32 | Notes |
214 * +-----+------+------+-----------------------------|
215 * | 24 | DIT | J | J is RES0 in ARMv8 |
216 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
217 *
218 * ... and all other bits are (currently) common.
219 */
host_spsr_to_spsr32(unsigned long spsr)220 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
221 {
222 const unsigned long overlap = BIT(24) | BIT(21);
223 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
224
225 spsr &= ~overlap;
226
227 spsr |= dit << 21;
228
229 return spsr;
230 }
231
vcpu_mode_priv(const struct kvm_vcpu * vcpu)232 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
233 {
234 u32 mode;
235
236 if (vcpu_mode_is_32bit(vcpu)) {
237 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
238 return mode > PSR_AA32_MODE_USR;
239 }
240
241 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
242
243 return mode != PSR_MODE_EL0t;
244 }
245
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)246 static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
247 {
248 return vcpu->arch.fault.esr_el2;
249 }
250
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)251 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
252 {
253 u32 esr = kvm_vcpu_get_esr(vcpu);
254
255 if (esr & ESR_ELx_CV)
256 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
257
258 return -1;
259 }
260
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)261 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
262 {
263 return vcpu->arch.fault.far_el2;
264 }
265
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)266 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
267 {
268 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
269 }
270
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)271 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
272 {
273 return vcpu->arch.fault.disr_el1;
274 }
275
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)276 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
277 {
278 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
279 }
280
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)281 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
282 {
283 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
284 }
285
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)286 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
287 {
288 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
289 }
290
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)291 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
292 {
293 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
294 }
295
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)296 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
297 {
298 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
299 }
300
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)301 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
302 {
303 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
304 }
305
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)306 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
307 {
308 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
309 }
310
311 /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)312 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
313 {
314 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
315 }
316
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)317 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
318 {
319 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
320 }
321
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)322 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
323 {
324 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
325 }
326
327 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)328 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
329 {
330 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
331 }
332
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)333 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
334 {
335 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
336 }
337
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)338 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
339 {
340 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
341 }
342
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)343 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
344 {
345 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
346 }
347
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)348 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
349 {
350 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
351 }
352
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)353 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
354 {
355 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
356 }
357
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu * vcpu)358 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
359 {
360 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
361 }
362
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)363 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
364 {
365 switch (kvm_vcpu_trap_get_fault(vcpu)) {
366 case FSC_SEA:
367 case FSC_SEA_TTW0:
368 case FSC_SEA_TTW1:
369 case FSC_SEA_TTW2:
370 case FSC_SEA_TTW3:
371 case FSC_SECC:
372 case FSC_SECC_TTW0:
373 case FSC_SECC_TTW1:
374 case FSC_SECC_TTW2:
375 case FSC_SECC_TTW3:
376 return true;
377 default:
378 return false;
379 }
380 }
381
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)382 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
383 {
384 u32 esr = kvm_vcpu_get_esr(vcpu);
385 return ESR_ELx_SYS64_ISS_RT(esr);
386 }
387
kvm_is_write_fault(struct kvm_vcpu * vcpu)388 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
389 {
390 if (kvm_vcpu_abt_iss1tw(vcpu)) {
391 /*
392 * Only a permission fault on a S1PTW should be
393 * considered as a write. Otherwise, page tables baked
394 * in a read-only memslot will result in an exception
395 * being delivered in the guest.
396 *
397 * The drawback is that we end-up faulting twice if the
398 * guest is using any of HW AF/DB: a translation fault
399 * to map the page containing the PT (read only at
400 * first), then a permission fault to allow the flags
401 * to be set.
402 */
403 switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
404 case ESR_ELx_FSC_PERM:
405 return true;
406 default:
407 return false;
408 }
409 }
410
411 if (kvm_vcpu_trap_is_iabt(vcpu))
412 return false;
413
414 return kvm_vcpu_dabt_iswrite(vcpu);
415 }
416
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)417 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
418 {
419 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
420 }
421
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)422 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
423 {
424 if (vcpu_mode_is_32bit(vcpu)) {
425 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
426 } else {
427 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
428 sctlr |= SCTLR_ELx_EE;
429 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
430 }
431 }
432
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)433 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
434 {
435 if (vcpu_mode_is_32bit(vcpu))
436 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
437
438 if (vcpu_mode_priv(vcpu))
439 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
440 else
441 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
442 }
443
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)444 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
445 unsigned long data,
446 unsigned int len)
447 {
448 if (kvm_vcpu_is_be(vcpu)) {
449 switch (len) {
450 case 1:
451 return data & 0xff;
452 case 2:
453 return be16_to_cpu(data & 0xffff);
454 case 4:
455 return be32_to_cpu(data & 0xffffffff);
456 default:
457 return be64_to_cpu(data);
458 }
459 } else {
460 switch (len) {
461 case 1:
462 return data & 0xff;
463 case 2:
464 return le16_to_cpu(data & 0xffff);
465 case 4:
466 return le32_to_cpu(data & 0xffffffff);
467 default:
468 return le64_to_cpu(data);
469 }
470 }
471
472 return data; /* Leave LE untouched */
473 }
474
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)475 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
476 unsigned long data,
477 unsigned int len)
478 {
479 if (kvm_vcpu_is_be(vcpu)) {
480 switch (len) {
481 case 1:
482 return data & 0xff;
483 case 2:
484 return cpu_to_be16(data & 0xffff);
485 case 4:
486 return cpu_to_be32(data & 0xffffffff);
487 default:
488 return cpu_to_be64(data);
489 }
490 } else {
491 switch (len) {
492 case 1:
493 return data & 0xff;
494 case 2:
495 return cpu_to_le16(data & 0xffff);
496 case 4:
497 return cpu_to_le32(data & 0xffffffff);
498 default:
499 return cpu_to_le64(data);
500 }
501 }
502
503 return data; /* Leave LE untouched */
504 }
505
kvm_incr_pc(struct kvm_vcpu * vcpu)506 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
507 {
508 vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
509 }
510
vcpu_has_feature(struct kvm_vcpu * vcpu,int feature)511 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
512 {
513 return test_bit(feature, vcpu->arch.features);
514 }
515
516 /* Narrow the PSCI register arguments (r1 to r3) to 32 bits. */
kvm_psci_narrow_to_32bit(struct kvm_vcpu * vcpu)517 static inline void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
518 {
519 int i;
520
521 /*
522 * Zero the input registers' upper 32 bits. They will be fully
523 * zeroed on exit, so we're fine changing them in place.
524 */
525 for (i = 1; i < 4; i++)
526 vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i)));
527 }
528
kvm_psci_valid_affinity(struct kvm_vcpu * vcpu,unsigned long affinity)529 static inline bool kvm_psci_valid_affinity(struct kvm_vcpu *vcpu,
530 unsigned long affinity)
531 {
532 return !(affinity & ~MPIDR_HWID_BITMASK);
533 }
534
535
536 #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
537
psci_affinity_mask(unsigned long affinity_level)538 static inline unsigned long psci_affinity_mask(unsigned long affinity_level)
539 {
540 if (affinity_level <= 3)
541 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
542
543 return 0;
544 }
545
546 #endif /* __ARM64_KVM_EMULATE_H__ */
547