• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/hyp_image.h>
11 #include <asm/virt.h>
12 
13 #define ARM_EXIT_WITH_SERROR_BIT  31
14 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17 
18 #define ARM_EXCEPTION_IRQ	  0
19 #define ARM_EXCEPTION_EL1_SERROR  1
20 #define ARM_EXCEPTION_TRAP	  2
21 #define ARM_EXCEPTION_IL	  3
22 /* The hyp-stub will return this for any kvm_call_hyp() call */
23 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
24 
25 #define kvm_arm_exception_type					\
26 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
27 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
28 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
29 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
30 
31 /*
32  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33  * that jumps over this.
34  */
35 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
36 
37 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
38 #define __SMCCC_WORKAROUND_3_SMC_SZ 36
39 #define __SPECTRE_BHB_LOOP_SZ       44
40 #define __SPECTRE_BHB_CLEARBHB_SZ   12
41 
42 #define KVM_HOST_SMCCC_ID(id)						\
43 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
44 			   ARM_SMCCC_SMC_64,				\
45 			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
46 			   (id))
47 
48 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
49 
50 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
51 #define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run			1
52 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		2
53 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		3
54 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		4
55 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context		5
56 #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		6
57 #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			7
58 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2		8
59 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr		9
60 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr		10
61 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs		11
62 #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		12
63 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		13
64 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		14
65 
66 #ifndef __ASSEMBLY__
67 
68 #include <linux/mm.h>
69 
70 #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
71 #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
72 
73 /*
74  * Define a pair of symbols sharing the same name but one defined in
75  * VHE and the other in nVHE hyp implementations.
76  */
77 #define DECLARE_KVM_HYP_SYM(sym)		\
78 	DECLARE_KVM_VHE_SYM(sym);		\
79 	DECLARE_KVM_NVHE_SYM(sym)
80 
81 #define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
82 	DECLARE_PER_CPU(type, sym)
83 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
84 	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
85 
86 #define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
87 	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
88 	DECLARE_KVM_NVHE_PER_CPU(type, sym)
89 
90 /*
91  * Compute pointer to a symbol defined in nVHE percpu region.
92  * Returns NULL if percpu memory has not been allocated yet.
93  */
94 #define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
95 #define per_cpu_ptr_nvhe_sym(sym, cpu)						\
96 	({									\
97 		unsigned long base, off;					\
98 		base = kvm_arm_hyp_percpu_base[cpu];				\
99 		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
100 		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
101 		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
102 	})
103 
104 #if defined(__KVM_NVHE_HYPERVISOR__)
105 
106 #define CHOOSE_NVHE_SYM(sym)	sym
107 #define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
108 
109 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
110 extern void *__nvhe_undefined_symbol;
111 #define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
112 #define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
113 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
114 
115 #elif defined(__KVM_VHE_HYPERVISOR__)
116 
117 #define CHOOSE_VHE_SYM(sym)	sym
118 #define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
119 
120 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
121 extern void *__vhe_undefined_symbol;
122 #define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
123 #define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
124 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
125 
126 #else
127 
128 /*
129  * BIG FAT WARNINGS:
130  *
131  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
132  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
133  *   while this is used early at boot time, when the capabilities are
134  *   not final yet....
135  *
136  * - Don't let the nVHE hypervisor have access to this, as it will
137  *   pick the *wrong* symbol (yes, it runs at EL2...).
138  */
139 #define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
140 					   ? CHOOSE_VHE_SYM(sym)	\
141 					   : CHOOSE_NVHE_SYM(sym))
142 
143 #define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
144 					   ? this_cpu_ptr(&sym)		\
145 					   : this_cpu_ptr_nvhe_sym(sym))
146 
147 #define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
148 					   ? per_cpu_ptr(&sym, cpu)	\
149 					   : per_cpu_ptr_nvhe_sym(sym, cpu))
150 
151 #define CHOOSE_VHE_SYM(sym)	sym
152 #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
153 
154 #endif
155 
156 /* Translate a kernel address @ptr into its equivalent linear mapping */
157 #define kvm_ksym_ref(ptr)						\
158 	({								\
159 		void *val = (ptr);					\
160 		if (!is_kernel_in_hyp_mode())				\
161 			val = lm_alias((ptr));				\
162 		val;							\
163 	 })
164 #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
165 
166 struct kvm;
167 struct kvm_vcpu;
168 struct kvm_s2_mmu;
169 
170 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
171 DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
172 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
173 #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
174 #define __kvm_hyp_host_vector	CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
175 #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
176 
177 extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
178 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
179 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
180 
181 extern atomic_t arm64_el2_vector_last_slot;
182 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
183 #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
184 
185 extern void __kvm_flush_vm_context(void);
186 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
187 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
188 				     int level);
189 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
190 
191 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
192 
193 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
194 
195 extern void __kvm_enable_ssbs(void);
196 
197 extern u64 __vgic_v3_get_ich_vtr_el2(void);
198 extern u64 __vgic_v3_read_vmcr(void);
199 extern void __vgic_v3_write_vmcr(u32 vmcr);
200 extern void __vgic_v3_init_lrs(void);
201 
202 extern u32 __kvm_get_mdcr_el2(void);
203 
204 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
205 extern char __smccc_workaround_3_smc[__SMCCC_WORKAROUND_3_SMC_SZ];
206 extern char __spectre_bhb_loop_k8[__SPECTRE_BHB_LOOP_SZ];
207 extern char __spectre_bhb_loop_k24[__SPECTRE_BHB_LOOP_SZ];
208 extern char __spectre_bhb_loop_k32[__SPECTRE_BHB_LOOP_SZ];
209 extern char __spectre_bhb_clearbhb[__SPECTRE_BHB_LOOP_SZ];
210 
211 /*
212  * Obtain the PC-relative address of a kernel symbol
213  * s: symbol
214  *
215  * The goal of this macro is to return a symbol's address based on a
216  * PC-relative computation, as opposed to a loading the VA from a
217  * constant pool or something similar. This works well for HYP, as an
218  * absolute VA is guaranteed to be wrong. Only use this if trying to
219  * obtain the address of a symbol (i.e. not something you obtained by
220  * following a pointer).
221  */
222 #define hyp_symbol_addr(s)						\
223 	({								\
224 		typeof(s) *addr;					\
225 		asm("adrp	%0, %1\n"				\
226 		    "add	%0, %0, :lo12:%1\n"			\
227 		    : "=r" (addr) : "S" (&s));				\
228 		addr;							\
229 	})
230 
231 #define __KVM_EXTABLE(from, to)						\
232 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
233 	"	.align		3\n"					\
234 	"	.long		(" #from " - .), (" #to " - .)\n"	\
235 	"	.popsection\n"
236 
237 
238 #define __kvm_at(at_op, addr)						\
239 ( { 									\
240 	int __kvm_at_err = 0;						\
241 	u64 spsr, elr;							\
242 	asm volatile(							\
243 	"	mrs	%1, spsr_el2\n"					\
244 	"	mrs	%2, elr_el2\n"					\
245 	"1:	at	"at_op", %3\n"					\
246 	"	isb\n"							\
247 	"	b	9f\n"						\
248 	"2:	msr	spsr_el2, %1\n"					\
249 	"	msr	elr_el2, %2\n"					\
250 	"	mov	%w0, %4\n"					\
251 	"9:\n"								\
252 	__KVM_EXTABLE(1b, 2b)						\
253 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
254 	: "r" (addr), "i" (-EFAULT));					\
255 	__kvm_at_err;							\
256 } )
257 
258 
259 #else /* __ASSEMBLY__ */
260 
261 .macro get_host_ctxt reg, tmp
262 	adr_this_cpu \reg, kvm_host_data, \tmp
263 	add	\reg, \reg, #HOST_DATA_CONTEXT
264 .endm
265 
266 .macro get_vcpu_ptr vcpu, ctxt
267 	get_host_ctxt \ctxt, \vcpu
268 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
269 .endm
270 
271 .macro get_loaded_vcpu vcpu, ctxt
272 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
273 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
274 .endm
275 
276 .macro set_loaded_vcpu vcpu, ctxt, tmp
277 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
278 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
279 .endm
280 
281 /*
282  * KVM extable for unexpected exceptions.
283  * In the same format _asm_extable, but output to a different section so that
284  * it can be mapped to EL2. The KVM version is not sorted. The caller must
285  * ensure:
286  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
287  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
288  */
289 .macro	_kvm_extable, from, to
290 	.pushsection	__kvm_ex_table, "a"
291 	.align		3
292 	.long		(\from - .), (\to - .)
293 	.popsection
294 .endm
295 
296 #define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
297 #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
298 #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
299 
300 /*
301  * We treat x18 as callee-saved as the host may use it as a platform
302  * register (e.g. for shadow call stack).
303  */
304 .macro save_callee_saved_regs ctxt
305 	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
306 	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
307 	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
308 	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
309 	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
310 	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
311 	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
312 .endm
313 
314 .macro restore_callee_saved_regs ctxt
315 	// We require \ctxt is not x18-x28
316 	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
317 	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
318 	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
319 	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
320 	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
321 	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
322 	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
323 .endm
324 
325 .macro save_sp_el0 ctxt, tmp
326 	mrs	\tmp,	sp_el0
327 	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
328 .endm
329 
330 .macro restore_sp_el0 ctxt, tmp
331 	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
332 	msr	sp_el0, \tmp
333 .endm
334 
335 #endif
336 
337 #endif /* __ARM_KVM_ASM_H__ */
338