• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/hyp_image.h>
11 #include <asm/insn.h>
12 #include <asm/virt.h>
13 #include <asm/sysreg.h>
14 
15 #define ARM_EXIT_WITH_SERROR_BIT  31
16 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
19 
20 #define ARM_EXCEPTION_IRQ	  0
21 #define ARM_EXCEPTION_EL1_SERROR  1
22 #define ARM_EXCEPTION_TRAP	  2
23 #define ARM_EXCEPTION_IL	  3
24 /* The hyp-stub will return this for any kvm_call_hyp() call */
25 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
26 #define ARM_EXCEPTION_HYP_REQ	  5
27 
28 #define kvm_arm_exception_type					\
29 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
30 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
31 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
32 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	},	\
33 	{ARM_EXCEPTION_HYP_REQ,		"HYP_REQ"	}
34 
35 /*
36  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
37  * that jumps over this.
38  */
39 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
40 
41 #define KVM_HOST_SMCCC_ID(id)						\
42 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
43 			   ARM_SMCCC_SMC_64,				\
44 			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
45 			   (id))
46 
47 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
48 
49 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
50 
51 #ifndef __ASSEMBLY__
52 
53 #include <linux/mm.h>
54 
55 enum __kvm_host_smccc_func {
56 	/* Hypercalls available only prior to pKVM finalisation */
57 	/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
58 	__KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
59 	__KVM_HOST_SMCCC_FUNC___pkvm_init,
60 	__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
61 	__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
62 	__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
63 	__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
64 	__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
65 	__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
66 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
67 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
68 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
69 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
70 	__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
71 	__KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va,
72 	__KVM_HOST_SMCCC_FUNC___pkvm_map_module_page,
73 	__KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page,
74 	__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
75 	__KVM_HOST_SMCCC_FUNC___pkvm_register_hcall,
76 	__KVM_HOST_SMCCC_FUNC___pkvm_iommu_init,
77 	__KVM_HOST_SMCCC_FUNC___pkvm_devices_init,
78 	__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
79 
80 	/* Hypercalls available after pKVM finalisation */
81 	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
82 	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
83 	__KVM_HOST_SMCCC_FUNC___pkvm_host_donate_guest,
84 	__KVM_HOST_SMCCC_FUNC___pkvm_host_donate_guest_sglist,
85 	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
86 	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
87 	__KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest,
88 	__KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
89 	__KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
90 	__KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
91 	__KVM_HOST_SMCCC_FUNC___pkvm_host_split_guest,
92 	__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
93 	__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
94 	__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
95 	__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
96 	__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
97 	__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
98 	__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
99 	__KVM_HOST_SMCCC_FUNC___pkvm_start_teardown_vm,
100 	__KVM_HOST_SMCCC_FUNC___pkvm_finalize_teardown_vm,
101 	__KVM_HOST_SMCCC_FUNC___pkvm_reclaim_dying_guest_page,
102 	__KVM_HOST_SMCCC_FUNC___pkvm_reclaim_dying_guest_ffa_resources,
103 	__KVM_HOST_SMCCC_FUNC___pkvm_notify_guest_vm_avail,
104 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
105 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
106 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_sync_state,
107 	__KVM_HOST_SMCCC_FUNC___pkvm_update_clock_tracing,
108 	__KVM_HOST_SMCCC_FUNC___pkvm_load_tracing,
109 	__KVM_HOST_SMCCC_FUNC___pkvm_teardown_tracing,
110 	__KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing,
111 	__KVM_HOST_SMCCC_FUNC___pkvm_reset_tracing,
112 	__KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
113 	__KVM_HOST_SMCCC_FUNC___pkvm_enable_event,
114 	__KVM_HOST_SMCCC_FUNC___pkvm_selftest_event,
115 	__KVM_HOST_SMCCC_FUNC___pkvm_sync_ftrace,
116 	__KVM_HOST_SMCCC_FUNC___pkvm_disable_ftrace,
117 	__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
118 	__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_refill,
119 	__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaimable,
120 	__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaim,
121 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_alloc_domain,
122 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_free_domain,
123 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_attach_dev,
124 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_detach_dev,
125 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_pages,
126 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_unmap_pages,
127 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys,
128 	__KVM_HOST_SMCCC_FUNC___pkvm_host_hvc_pd,
129 	__KVM_HOST_SMCCC_FUNC___pkvm_ptdump,
130 	__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_sg,
131 	__KVM_HOST_SMCCC_FUNC___pkvm_host_donate_hyp_mmio,
132 	__KVM_HOST_SMCCC_FUNC___pkvm_host_reclaim_hyp_mmio,
133 	__KVM_HOST_SMCCC_FUNC___pkvm_host_map_guest_mmio,
134 	__KVM_HOST_SMCCC_FUNC___pkvm_pviommu_attach,
135 	__KVM_HOST_SMCCC_FUNC___pkvm_pviommu_add_vsid,
136 	__KVM_HOST_SMCCC_FUNC___pkvm_host_get_ffa_version,
137 
138 	/*
139 	 * Start of the dynamically registered hypercalls. Start a bit
140 	 * further, just in case some modules...
141 	 */
142 	__KVM_HOST_SMCCC_FUNC___dynamic_hcalls = 128,
143 };
144 
145 #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
146 #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
147 
148 /*
149  * Define a pair of symbols sharing the same name but one defined in
150  * VHE and the other in nVHE hyp implementations.
151  */
152 #define DECLARE_KVM_HYP_SYM(sym)		\
153 	DECLARE_KVM_VHE_SYM(sym);		\
154 	DECLARE_KVM_NVHE_SYM(sym)
155 
156 #define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
157 	DECLARE_PER_CPU(type, sym)
158 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
159 	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
160 
161 #define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
162 	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
163 	DECLARE_KVM_NVHE_PER_CPU(type, sym)
164 
165 /*
166  * Compute pointer to a symbol defined in nVHE percpu region.
167  * Returns NULL if percpu memory has not been allocated yet.
168  */
169 #define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
170 #define per_cpu_ptr_nvhe_sym(sym, cpu)						\
171 	({									\
172 		unsigned long base, off;					\
173 		base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];		\
174 		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
175 		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
176 		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
177 	})
178 
179 #if defined(__KVM_NVHE_HYPERVISOR__)
180 
181 #define CHOOSE_NVHE_SYM(sym)	sym
182 #define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
183 
184 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
185 extern void *__nvhe_undefined_symbol;
186 #define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
187 #define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
188 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
189 
190 /*
191  * pKVM uses the module_ops struct to expose services to modules but
192  * doesn't allow fine-grained definition of the license for each export,
193  * and doesn't have a way to check the license of the loaded module.
194  * Given that said module may be proprietary, let's seek GPL compliance
195  * by preventing the accidental export of GPL symbols to hyp modules via
196  * pKVM's module_ops struct.
197  */
198 #ifdef EXPORT_SYMBOL_GPL
199 #undef EXPORT_SYMBOL_GPL
200 #endif
201 #define EXPORT_SYMBOL_GPL(sym) BUILD_BUG()
202 
203 #elif defined(__KVM_VHE_HYPERVISOR__)
204 
205 #define CHOOSE_VHE_SYM(sym)	sym
206 #define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
207 
208 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
209 extern void *__vhe_undefined_symbol;
210 #define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
211 #define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
212 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
213 
214 #else
215 
216 /*
217  * BIG FAT WARNINGS:
218  *
219  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
220  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
221  *   while this is used early at boot time, when the capabilities are
222  *   not final yet....
223  *
224  * - Don't let the nVHE hypervisor have access to this, as it will
225  *   pick the *wrong* symbol (yes, it runs at EL2...).
226  */
227 #define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
228 					   ? CHOOSE_VHE_SYM(sym)	\
229 					   : CHOOSE_NVHE_SYM(sym))
230 
231 #define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
232 					   ? this_cpu_ptr(&sym)		\
233 					   : this_cpu_ptr_nvhe_sym(sym))
234 
235 #define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
236 					   ? per_cpu_ptr(&sym, cpu)	\
237 					   : per_cpu_ptr_nvhe_sym(sym, cpu))
238 
239 #define CHOOSE_VHE_SYM(sym)	sym
240 #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
241 
242 #endif
243 
244 struct kvm_nvhe_init_params {
245 	unsigned long mair_el2;
246 	unsigned long tcr_el2;
247 	unsigned long tpidr_el2;
248 	unsigned long stack_hyp_va;
249 	unsigned long stack_pa;
250 	phys_addr_t pgd_pa;
251 	unsigned long hcr_el2;
252 	unsigned long hfgwtr_el2;
253 	unsigned long vttbr;
254 	unsigned long vtcr;
255 	unsigned long tmp;
256 };
257 
258 /*
259  * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
260  * hyp_panic() in non-protected mode.
261  *
262  * @stack_base:                 hyp VA of the hyp_stack base.
263  * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
264  * @fp:                         hyp FP where the backtrace begins.
265  * @pc:                         hyp PC where the backtrace begins.
266  */
267 struct kvm_nvhe_stacktrace_info {
268 	unsigned long stack_base;
269 	unsigned long overflow_stack_base;
270 	unsigned long fp;
271 	unsigned long pc;
272 };
273 
274 /* Translate a kernel address @ptr into its equivalent linear mapping */
275 #define kvm_ksym_ref(ptr)						\
276 	({								\
277 		void *val = (ptr);					\
278 		if (!is_kernel_in_hyp_mode())				\
279 			val = lm_alias((ptr));				\
280 		val;							\
281 	 })
282 #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
283 
284 struct kvm;
285 struct kvm_vcpu;
286 struct kvm_s2_mmu;
287 
288 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
289 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
290 #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
291 #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
292 
293 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
294 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
295 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
296 
297 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
298 #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
299 
300 extern void __kvm_flush_vm_context(void);
301 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
302 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
303 				     int level);
304 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
305 					 phys_addr_t ipa,
306 					 int level);
307 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
308 					phys_addr_t start, unsigned long pages);
309 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
310 
311 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
312 
313 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
314 extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
315 extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
316 extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
317 
318 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
319 
320 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
321 
322 extern u64 __vgic_v3_get_gic_config(void);
323 extern void __vgic_v3_init_lrs(void);
324 
325 extern u64 __kvm_get_mdcr_el2(void);
326 
327 #define __KVM_EXTABLE(from, to)						\
328 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
329 	"	.align		3\n"					\
330 	"	.long		(" #from " - .), (" #to " - .)\n"	\
331 	"	.popsection\n"
332 
333 
334 #define __kvm_at(at_op, addr)						\
335 ( { 									\
336 	int __kvm_at_err = 0;						\
337 	u64 spsr, elr;							\
338 	asm volatile(							\
339 	"	mrs	%1, spsr_el2\n"					\
340 	"	mrs	%2, elr_el2\n"					\
341 	"1:	" __msr_s(at_op, "%3") "\n"				\
342 	"	isb\n"							\
343 	"	b	9f\n"						\
344 	"2:	msr	spsr_el2, %1\n"					\
345 	"	msr	elr_el2, %2\n"					\
346 	"	mov	%w0, %4\n"					\
347 	"9:\n"								\
348 	__KVM_EXTABLE(1b, 2b)						\
349 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
350 	: "r" (addr), "i" (-EFAULT));					\
351 	__kvm_at_err;							\
352 } )
353 
354 void vcpu_illegal_trap(struct kvm_vcpu *vcpu, u64 *exit_code);
355 
356 void __noreturn hyp_panic(void);
357 asmlinkage void kvm_unexpected_el2_exception(void);
358 asmlinkage void __noreturn hyp_panic(void);
359 asmlinkage void __noreturn hyp_panic_bad_stack(void);
360 asmlinkage void kvm_unexpected_el2_exception(void);
361 struct kvm_cpu_context;
362 void handle_trap(struct kvm_cpu_context *host_ctxt);
363 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
364 void __noreturn __pkvm_init_finalise(void);
365 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
366 void kvm_patch_vector_branch(struct alt_instr *alt,
367 	__le32 *origptr, __le32 *updptr, int nr_inst);
368 void kvm_get_kimage_voffset(struct alt_instr *alt,
369 	__le32 *origptr, __le32 *updptr, int nr_inst);
370 void kvm_compute_final_ctr_el0(struct alt_instr *alt,
371 	__le32 *origptr, __le32 *updptr, int nr_inst);
372 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
373 	u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
374 
375 #else /* __ASSEMBLY__ */
376 
377 .macro get_host_ctxt reg, tmp
378 	adr_this_cpu \reg, kvm_host_data, \tmp
379 	add	\reg, \reg, #HOST_DATA_CONTEXT
380 .endm
381 
382 .macro get_vcpu_ptr vcpu, ctxt
383 	get_host_ctxt \ctxt, \vcpu
384 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
385 .endm
386 
387 .macro get_loaded_vcpu vcpu, ctxt
388 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
389 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
390 .endm
391 
392 .macro set_loaded_vcpu vcpu, ctxt, tmp
393 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
394 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
395 .endm
396 
397 /*
398  * KVM extable for unexpected exceptions.
399  * Create a struct kvm_exception_table_entry output to a section that can be
400  * mapped by EL2. The table is not sorted.
401  *
402  * The caller must ensure:
403  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
404  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
405  */
406 .macro	_kvm_extable, from, to
407 	.pushsection	__kvm_ex_table, "a"
408 	.align		3
409 	.long		(\from - .), (\to - .)
410 	.popsection
411 .endm
412 
413 #define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
414 #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
415 #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
416 
417 /*
418  * We treat x18 as callee-saved as the host may use it as a platform
419  * register (e.g. for shadow call stack).
420  */
421 .macro save_callee_saved_regs ctxt
422 	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
423 	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
424 	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
425 	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
426 	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
427 	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
428 	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
429 .endm
430 
431 .macro restore_callee_saved_regs ctxt
432 	// We require \ctxt is not x18-x28
433 	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
434 	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
435 	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
436 	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
437 	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
438 	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
439 	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
440 .endm
441 
442 .macro save_sp_el0 ctxt, tmp
443 	mrs	\tmp,	sp_el0
444 	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
445 .endm
446 
447 .macro restore_sp_el0 ctxt, tmp
448 	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
449 	msr	sp_el0, \tmp
450 .endm
451 
452 #endif
453 
454 #endif /* __ARM_KVM_ASM_H__ */
455