• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/hyp_image.h>
11 #include <asm/virt.h>
12 
13 #define ARM_EXIT_WITH_SERROR_BIT  31
14 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17 
18 #define ARM_EXCEPTION_IRQ	  0
19 #define ARM_EXCEPTION_EL1_SERROR  1
20 #define ARM_EXCEPTION_TRAP	  2
21 #define ARM_EXCEPTION_IL	  3
22 /* The hyp-stub will return this for any kvm_call_hyp() call */
23 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
24 
25 #define kvm_arm_exception_type					\
26 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
27 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
28 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
29 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
30 
31 /*
32  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33  * that jumps over this.
34  */
35 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
36 
37 #define KVM_HOST_SMCCC_ID(id)						\
38 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
39 			   ARM_SMCCC_SMC_64,				\
40 			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
41 			   (id))
42 
43 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
44 
45 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
46 
47 #ifndef __ASSEMBLY__
48 
49 #include <linux/mm.h>
50 
51 enum __kvm_host_smccc_func {
52 	/* Hypercalls available only prior to pKVM finalisation */
53 	/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
54 	__KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
55 	__KVM_HOST_SMCCC_FUNC___pkvm_init,
56 	__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
57 	__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
58 	__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
59 	__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
60 	__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
61 	__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
62 
63 	/* Hypercalls available after pKVM finalisation */
64 	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
65 	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
66 	__KVM_HOST_SMCCC_FUNC___pkvm_host_reclaim_page,
67 	__KVM_HOST_SMCCC_FUNC___pkvm_host_donate_guest,
68 	__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
69 	__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
70 	__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
71 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
72 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
73 	__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
74 	__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
75 	__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
76 	__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
77 	__KVM_HOST_SMCCC_FUNC___pkvm_init_shadow,
78 	__KVM_HOST_SMCCC_FUNC___pkvm_init_shadow_vcpu,
79 	__KVM_HOST_SMCCC_FUNC___pkvm_teardown_shadow,
80 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
81 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
82 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_sync_state,
83 	__KVM_HOST_SMCCC_FUNC___pkvm_iommu_driver_init,
84 	__KVM_HOST_SMCCC_FUNC___pkvm_iommu_register,
85 	__KVM_HOST_SMCCC_FUNC___pkvm_iommu_pm_notify,
86 	__KVM_HOST_SMCCC_FUNC___pkvm_iommu_finalize,
87 };
88 
89 #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
90 #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
91 
92 /*
93  * Define a pair of symbols sharing the same name but one defined in
94  * VHE and the other in nVHE hyp implementations.
95  */
96 #define DECLARE_KVM_HYP_SYM(sym)		\
97 	DECLARE_KVM_VHE_SYM(sym);		\
98 	DECLARE_KVM_NVHE_SYM(sym)
99 
100 #define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
101 	DECLARE_PER_CPU(type, sym)
102 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
103 	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
104 
105 #define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
106 	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
107 	DECLARE_KVM_NVHE_PER_CPU(type, sym)
108 
109 /*
110  * Compute pointer to a symbol defined in nVHE percpu region.
111  * Returns NULL if percpu memory has not been allocated yet.
112  */
113 #define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
114 #define per_cpu_ptr_nvhe_sym(sym, cpu)						\
115 	({									\
116 		unsigned long base, off;					\
117 		base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];		\
118 		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
119 		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
120 		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
121 	})
122 
123 #if defined(__KVM_NVHE_HYPERVISOR__)
124 
125 #define CHOOSE_NVHE_SYM(sym)	sym
126 #define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
127 
128 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
129 extern void *__nvhe_undefined_symbol;
130 #define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
131 #define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
132 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
133 
134 #elif defined(__KVM_VHE_HYPERVISOR__)
135 
136 #define CHOOSE_VHE_SYM(sym)	sym
137 #define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
138 
139 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
140 extern void *__vhe_undefined_symbol;
141 #define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
142 #define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
143 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
144 
145 #else
146 
147 /*
148  * BIG FAT WARNINGS:
149  *
150  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
151  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
152  *   while this is used early at boot time, when the capabilities are
153  *   not final yet....
154  *
155  * - Don't let the nVHE hypervisor have access to this, as it will
156  *   pick the *wrong* symbol (yes, it runs at EL2...).
157  */
158 #define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
159 					   ? CHOOSE_VHE_SYM(sym)	\
160 					   : CHOOSE_NVHE_SYM(sym))
161 
162 #define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
163 					   ? this_cpu_ptr(&sym)		\
164 					   : this_cpu_ptr_nvhe_sym(sym))
165 
166 #define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
167 					   ? per_cpu_ptr(&sym, cpu)	\
168 					   : per_cpu_ptr_nvhe_sym(sym, cpu))
169 
170 #define CHOOSE_VHE_SYM(sym)	sym
171 #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
172 
173 #endif
174 
175 struct kvm_nvhe_init_params {
176 	unsigned long mair_el2;
177 	unsigned long tcr_el2;
178 	unsigned long tpidr_el2;
179 	unsigned long stack_hyp_va;
180 	phys_addr_t pgd_pa;
181 	unsigned long hcr_el2;
182 	unsigned long vttbr;
183 	unsigned long vtcr;
184 };
185 
186 /* Translate a kernel address @ptr into its equivalent linear mapping */
187 #define kvm_ksym_ref(ptr)						\
188 	({								\
189 		void *val = (ptr);					\
190 		if (!is_kernel_in_hyp_mode())				\
191 			val = lm_alias((ptr));				\
192 		val;							\
193 	 })
194 #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(__va_function(kvm_nvhe_sym(sym)))
195 
196 struct kvm;
197 struct kvm_vcpu;
198 struct kvm_s2_mmu;
199 
200 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
201 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
202 #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
203 #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
204 
205 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
206 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
207 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
208 
209 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
210 #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
211 
212 extern void __kvm_flush_vm_context(void);
213 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
214 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
215 				     int level);
216 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
217 
218 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
219 
220 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
221 
222 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
223 
224 extern u64 __vgic_v3_get_gic_config(void);
225 extern void __vgic_v3_init_lrs(void);
226 
227 extern u64 __kvm_get_mdcr_el2(void);
228 
229 #define __KVM_EXTABLE(from, to)						\
230 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
231 	"	.align		3\n"					\
232 	"	.long		(" #from " - .), (" #to " - .)\n"	\
233 	"	.popsection\n"
234 
235 
236 #define __kvm_at(at_op, addr)						\
237 ( { 									\
238 	int __kvm_at_err = 0;						\
239 	u64 spsr, elr;							\
240 	asm volatile(							\
241 	"	mrs	%1, spsr_el2\n"					\
242 	"	mrs	%2, elr_el2\n"					\
243 	"1:	at	"at_op", %3\n"					\
244 	"	isb\n"							\
245 	"	b	9f\n"						\
246 	"2:	msr	spsr_el2, %1\n"					\
247 	"	msr	elr_el2, %2\n"					\
248 	"	mov	%w0, %4\n"					\
249 	"9:\n"								\
250 	__KVM_EXTABLE(1b, 2b)						\
251 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
252 	: "r" (addr), "i" (-EFAULT));					\
253 	__kvm_at_err;							\
254 } )
255 
256 
257 #else /* __ASSEMBLY__ */
258 
259 .macro get_host_ctxt reg, tmp
260 	adr_this_cpu \reg, kvm_host_data, \tmp
261 	add	\reg, \reg, #HOST_DATA_CONTEXT
262 .endm
263 
264 .macro get_vcpu_ptr vcpu, ctxt
265 	get_host_ctxt \ctxt, \vcpu
266 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
267 .endm
268 
269 .macro get_loaded_vcpu vcpu, ctxt
270 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
271 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
272 .endm
273 
274 .macro set_loaded_vcpu vcpu, ctxt, tmp
275 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
276 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
277 .endm
278 
279 /*
280  * KVM extable for unexpected exceptions.
281  * In the same format _asm_extable, but output to a different section so that
282  * it can be mapped to EL2. The KVM version is not sorted. The caller must
283  * ensure:
284  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
285  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
286  */
287 .macro	_kvm_extable, from, to
288 	.pushsection	__kvm_ex_table, "a"
289 	.align		3
290 	.long		(\from - .), (\to - .)
291 	.popsection
292 .endm
293 
294 #define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
295 #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
296 #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
297 
298 /*
299  * We treat x18 as callee-saved as the host may use it as a platform
300  * register (e.g. for shadow call stack).
301  */
302 .macro save_callee_saved_regs ctxt
303 	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
304 	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
305 	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
306 	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
307 	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
308 	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
309 	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
310 .endm
311 
312 .macro restore_callee_saved_regs ctxt
313 	// We require \ctxt is not x18-x28
314 	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
315 	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
316 	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
317 	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
318 	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
319 	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
320 	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
321 .endm
322 
323 .macro save_sp_el0 ctxt, tmp
324 	mrs	\tmp,	sp_el0
325 	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
326 .endm
327 
328 .macro restore_sp_el0 ctxt, tmp
329 	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
330 	msr	sp_el0, \tmp
331 .endm
332 
333 #endif
334 
335 #endif /* __ARM_KVM_ASM_H__ */
336