• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/linkage.h>
20
21#include <asm/alternative.h>
22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27
28	.text
29	.pushsection	.hyp.text, "ax"
30
31.macro do_el2_call
32	/*
33	 * Shuffle the parameters before calling the function
34	 * pointed to in x0. Assumes parameters in x[1,2,3].
35	 */
36	mov	lr, x0
37	mov	x0, x1
38	mov	x1, x2
39	mov	x2, x3
40	blr	lr
41.endm
42
43ENTRY(__vhe_hyp_call)
44	str	lr, [sp, #-16]!
45	do_el2_call
46	ldr	lr, [sp], #16
47	/*
48	 * We used to rely on having an exception return to get
49	 * an implicit isb. In the E2H case, we don't have it anymore.
50	 * rather than changing all the leaf functions, just do it here
51	 * before returning to the rest of the kernel.
52	 */
53	isb
54	ret
55ENDPROC(__vhe_hyp_call)
56
57/*
58 * Compute the idmap address of __kvm_hyp_reset based on the idmap
59 * start passed as a parameter, and jump there.
60 *
61 * x0: HYP phys_idmap_start
62 */
63ENTRY(__kvm_hyp_teardown)
64	mov	x4, x0
65	adr_l	x3, __kvm_hyp_reset
66
67	/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
68	bfi	x4, x3, #0, #PAGE_SHIFT
69	br	x4
70ENDPROC(__kvm_hyp_teardown)
71
72el1_sync:				// Guest trapped into EL2
73	stp	x0, x1, [sp, #-16]!
74
75alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
76	mrs	x1, esr_el2
77alternative_else
78	mrs	x1, esr_el1
79alternative_endif
80	lsr	x0, x1, #ESR_ELx_EC_SHIFT
81
82	cmp	x0, #ESR_ELx_EC_HVC64
83	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
84	b.ne	el1_trap
85
86	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
87	cbnz	x1, el1_hvc_guest	// called HVC
88
89	/* Here, we're pretty sure the host called HVC. */
90	ldp	x0, x1, [sp], #16
91
92	cmp	x0, #HVC_GET_VECTORS
93	b.ne	1f
94	mrs	x0, vbar_el2
95	b	2f
96
971:
98	/*
99	 * Perform the EL2 call
100	 */
101	kern_hyp_va	x0
102	do_el2_call
103
1042:	eret
105
106el1_hvc_guest:
107	/*
108	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
109	 * The workaround has already been applied on the host,
110	 * so let's quickly get back to the guest. We don't bother
111	 * restoring x1, as it can be clobbered anyway.
112	 */
113	ldr	x1, [sp]				// Guest's x0
114	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
115	cbnz	w1, el1_trap
116	mov	x0, x1
117	add	sp, sp, #16
118	eret
119
120el1_trap:
121	/*
122	 * x0: ESR_EC
123	 */
124
125	/* Guest accessed VFP/SIMD registers, save host, restore Guest */
126	cmp	x0, #ESR_ELx_EC_FP_ASIMD
127	b.eq	__fpsimd_guest_restore
128
129	mrs	x1, tpidr_el2
130	mov	x0, #ARM_EXCEPTION_TRAP
131	b	__guest_exit
132
133el1_irq:
134	stp     x0, x1, [sp, #-16]!
135	mrs	x1, tpidr_el2
136	mov	x0, #ARM_EXCEPTION_IRQ
137	b	__guest_exit
138
139el1_error:
140	stp     x0, x1, [sp, #-16]!
141	mrs	x1, tpidr_el2
142	mov	x0, #ARM_EXCEPTION_EL1_SERROR
143	b	__guest_exit
144
145el2_error:
146	/*
147	 * Only two possibilities:
148	 * 1) Either we come from the exit path, having just unmasked
149	 *    PSTATE.A: change the return code to an EL2 fault, and
150	 *    carry on, as we're already in a sane state to handle it.
151	 * 2) Or we come from anywhere else, and that's a bug: we panic.
152	 *
153	 * For (1), x0 contains the original return code and x1 doesn't
154	 * contain anything meaningful at that stage. We can reuse them
155	 * as temp registers.
156	 * For (2), who cares?
157	 */
158	mrs	x0, elr_el2
159	adr	x1, abort_guest_exit_start
160	cmp	x0, x1
161	adr	x1, abort_guest_exit_end
162	ccmp	x0, x1, #4, ne
163	b.ne	__hyp_panic
164	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
165	eret
166
167ENTRY(__hyp_do_panic)
168	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
169		      PSR_MODE_EL1h)
170	msr	spsr_el2, lr
171	ldr	lr, =panic
172	msr	elr_el2, lr
173	eret
174ENDPROC(__hyp_do_panic)
175
176.macro invalid_vector	label, target = __hyp_panic
177	.align	2
178\label:
179	b \target
180ENDPROC(\label)
181.endm
182
183	/* None of these should ever happen */
184	invalid_vector	el2t_sync_invalid
185	invalid_vector	el2t_irq_invalid
186	invalid_vector	el2t_fiq_invalid
187	invalid_vector	el2t_error_invalid
188	invalid_vector	el2h_sync_invalid
189	invalid_vector	el2h_irq_invalid
190	invalid_vector	el2h_fiq_invalid
191	invalid_vector	el1_sync_invalid
192	invalid_vector	el1_irq_invalid
193	invalid_vector	el1_fiq_invalid
194
195	.ltorg
196
197	.align 11
198
199ENTRY(__kvm_hyp_vector)
200	ventry	el2t_sync_invalid		// Synchronous EL2t
201	ventry	el2t_irq_invalid		// IRQ EL2t
202	ventry	el2t_fiq_invalid		// FIQ EL2t
203	ventry	el2t_error_invalid		// Error EL2t
204
205	ventry	el2h_sync_invalid		// Synchronous EL2h
206	ventry	el2h_irq_invalid		// IRQ EL2h
207	ventry	el2h_fiq_invalid		// FIQ EL2h
208	ventry	el2_error			// Error EL2h
209
210	ventry	el1_sync			// Synchronous 64-bit EL1
211	ventry	el1_irq				// IRQ 64-bit EL1
212	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
213	ventry	el1_error			// Error 64-bit EL1
214
215	ventry	el1_sync			// Synchronous 32-bit EL1
216	ventry	el1_irq				// IRQ 32-bit EL1
217	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
218	ventry	el1_error			// Error 32-bit EL1
219ENDPROC(__kvm_hyp_vector)
220