• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_HYP_H__
8 #define __ARM64_KVM_HYP_H__
9 
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12 #include <asm/alternative.h>
13 #include <asm/sysreg.h>
14 
15 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
16 DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
17 DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18 DECLARE_PER_CPU(int, hyp_cpu_number);
19 
20 #define hyp_smp_processor_id() (__this_cpu_read(hyp_cpu_number))
21 
22 #define read_sysreg_elx(r,nvh,vh)					\
23 	({								\
24 		u64 reg;						\
25 		asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh),	\
26 					 __mrs_s("%0", r##vh),		\
27 					 ARM64_HAS_VIRT_HOST_EXTN)	\
28 			     : "=r" (reg));				\
29 		reg;							\
30 	})
31 
32 #define write_sysreg_elx(v,r,nvh,vh)					\
33 	do {								\
34 		u64 __val = (u64)(v);					\
35 		asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"),	\
36 					 __msr_s(r##vh, "%x0"),		\
37 					 ARM64_HAS_VIRT_HOST_EXTN)	\
38 					 : : "rZ" (__val));		\
39 	} while (0)
40 
41 /*
42  * Unified accessors for registers that have a different encoding
43  * between VHE and non-VHE. They must be specified without their "ELx"
44  * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
45  */
46 
47 #define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
48 #define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
49 #define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
50 #define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
51 #define read_sysreg_el2(r)	read_sysreg_elx(r, _EL2, _EL1)
52 #define write_sysreg_el2(v,r)	write_sysreg_elx(v, r, _EL2, _EL1)
53 
54 /*
55  * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
56  * static inline can allow the compiler to out-of-line this. KVM always wants
57  * the macro version as its always inlined.
58  */
59 #define __kvm_swab32(x)	___constant_swab32(x)
60 
61 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
62 
63 void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
64 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
65 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
66 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
67 void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
68 void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
69 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
70 
71 #ifdef __KVM_NVHE_HYPERVISOR__
72 void __timer_enable_traps(struct kvm_vcpu *vcpu);
73 void __timer_disable_traps(struct kvm_vcpu *vcpu);
74 #endif
75 
76 #ifdef __KVM_NVHE_HYPERVISOR__
77 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
78 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
79 #else
80 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
81 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
82 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
83 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
84 #endif
85 
86 void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
87 void __debug_switch_to_host(struct kvm_vcpu *vcpu);
88 
89 #ifdef __KVM_NVHE_HYPERVISOR__
90 void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
91 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
92 #endif
93 
94 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
95 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
96 void __sve_save_state(void *sve_pffr, u32 *fpsr);
97 void __sve_restore_state(void *sve_pffr, u32 *fpsr);
98 
99 #ifndef __KVM_NVHE_HYPERVISOR__
100 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
101 void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
102 #endif
103 
104 u64 __guest_enter(struct kvm_vcpu *vcpu);
105 
106 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
107 
108 #ifdef __KVM_NVHE_HYPERVISOR__
109 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
110 			       u64 elr, u64 par);
111 #endif
112 
113 #ifdef __KVM_NVHE_HYPERVISOR__
114 void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
115 			    phys_addr_t pgd, void *sp, void *cont_fn);
116 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
117 		unsigned long *per_cpu_base, u32 hyp_va_bits);
118 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
119 #endif
120 
121 #ifdef __KVM_NVHE_HYPERVISOR__
122 struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu);
123 struct kvm_host_sve_state *get_host_sve_state(struct kvm_vcpu *vcpu);
124 #endif
125 
126 extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
127 extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
128 extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
129 extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
130 extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
131 extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
132 extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
133 extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
134 
135 extern unsigned long kvm_nvhe_sym(__icache_flags);
136 extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
137 extern bool kvm_nvhe_sym(smccc_trng_available);
138 extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
139 
140 struct kvm_nvhe_clock_data {
141 	u32 mult;
142 	u32 shift;
143 	u64 epoch_ns;
144 	u64 epoch_cyc;
145 };
146 #endif /* __ARM64_KVM_HYP_H__ */
147