• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/virt.h>
11 
12 #define	VCPU_WORKAROUND_2_FLAG_SHIFT	0
13 #define	VCPU_WORKAROUND_2_FLAG		(_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
14 
15 #define ARM_EXIT_WITH_SERROR_BIT  31
16 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
19 
20 #define ARM_EXCEPTION_IRQ	  0
21 #define ARM_EXCEPTION_EL1_SERROR  1
22 #define ARM_EXCEPTION_TRAP	  2
23 #define ARM_EXCEPTION_IL	  3
24 /* The hyp-stub will return this for any kvm_call_hyp() call */
25 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
26 
27 #define kvm_arm_exception_type					\
28 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
29 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
30 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
31 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
32 
33 /*
34  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
35  * that jumps over this.
36  */
37 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
38 
39 #ifndef __ASSEMBLY__
40 
41 #include <linux/mm.h>
42 
43 /* Translate a kernel address of @sym into its equivalent linear mapping */
44 #define kvm_ksym_ref(sym)						\
45 	({								\
46 		void *val = __va_function(sym);				\
47 		if (!is_kernel_in_hyp_mode())				\
48 			val = lm_alias(val);				\
49 		val;							\
50 	 })
51 
52 struct kvm;
53 struct kvm_vcpu;
54 
55 extern char __kvm_hyp_init[];
56 extern char __kvm_hyp_init_end[];
57 
58 extern char __kvm_hyp_vector[];
59 
60 extern void __kvm_flush_vm_context(void);
61 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
62 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
63 extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu);
64 
65 extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
66 
67 extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
68 
69 extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
70 
71 extern u64 __vgic_v3_get_ich_vtr_el2(void);
72 extern u64 __vgic_v3_read_vmcr(void);
73 extern void __vgic_v3_write_vmcr(u32 vmcr);
74 extern void __vgic_v3_init_lrs(void);
75 
76 extern u32 __kvm_get_mdcr_el2(void);
77 
78 /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
79 #define __hyp_this_cpu_ptr(sym)						\
80 	({								\
81 		void *__ptr = hyp_symbol_addr(sym);			\
82 		__ptr += read_sysreg(tpidr_el2);			\
83 		(typeof(&sym))__ptr;					\
84 	 })
85 
86 #define __hyp_this_cpu_read(sym)					\
87 	({								\
88 		*__hyp_this_cpu_ptr(sym);				\
89 	 })
90 
91 #define __KVM_EXTABLE(from, to)						\
92 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
93 	"	.align		3\n"					\
94 	"	.long		(" #from " - .), (" #to " - .)\n"	\
95 	"	.popsection\n"
96 
97 
98 #define __kvm_at(at_op, addr)						\
99 ( { 									\
100 	int __kvm_at_err = 0;						\
101 	u64 spsr, elr;							\
102 	asm volatile(							\
103 	"	mrs	%1, spsr_el2\n"					\
104 	"	mrs	%2, elr_el2\n"					\
105 	"1:	at	"at_op", %3\n"					\
106 	"	isb\n"							\
107 	"	b	9f\n"						\
108 	"2:	msr	spsr_el2, %1\n"					\
109 	"	msr	elr_el2, %2\n"					\
110 	"	mov	%w0, %4\n"					\
111 	"9:\n"								\
112 	__KVM_EXTABLE(1b, 2b)						\
113 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
114 	: "r" (addr), "i" (-EFAULT));					\
115 	__kvm_at_err;							\
116 } )
117 
118 
119 #else /* __ASSEMBLY__ */
120 
121 .macro hyp_adr_this_cpu reg, sym, tmp
122 	adr_l	\reg, \sym
123 	mrs	\tmp, tpidr_el2
124 	add	\reg, \reg, \tmp
125 .endm
126 
127 .macro hyp_ldr_this_cpu reg, sym, tmp
128 	adr_l	\reg, \sym
129 	mrs	\tmp, tpidr_el2
130 	ldr	\reg,  [\reg, \tmp]
131 .endm
132 
133 .macro get_host_ctxt reg, tmp
134 	hyp_adr_this_cpu \reg, kvm_host_data, \tmp
135 	add	\reg, \reg, #HOST_DATA_CONTEXT
136 .endm
137 
138 .macro get_vcpu_ptr vcpu, ctxt
139 	get_host_ctxt \ctxt, \vcpu
140 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
141 	kern_hyp_va	\vcpu
142 .endm
143 
144 /*
145  * KVM extable for unexpected exceptions.
146  * In the same format _asm_extable, but output to a different section so that
147  * it can be mapped to EL2. The KVM version is not sorted. The caller must
148  * ensure:
149  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
150  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
151  */
152 .macro	_kvm_extable, from, to
153 	.pushsection	__kvm_ex_table, "a"
154 	.align		3
155 	.long		(\from - .), (\to - .)
156 	.popsection
157 .endm
158 
159 #endif
160 
161 #endif /* __ARM_KVM_ASM_H__ */
162