• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/mmu.h>
16
17.macro save_caller_saved_regs_vect
18	/* x0 and x1 were saved in the vector entry */
19	stp	x2, x3,   [sp, #-16]!
20	stp	x4, x5,   [sp, #-16]!
21	stp	x6, x7,   [sp, #-16]!
22	stp	x8, x9,   [sp, #-16]!
23	stp	x10, x11, [sp, #-16]!
24	stp	x12, x13, [sp, #-16]!
25	stp	x14, x15, [sp, #-16]!
26	stp	x16, x17, [sp, #-16]!
27.endm
28
29.macro restore_caller_saved_regs_vect
30	ldp	x16, x17, [sp], #16
31	ldp	x14, x15, [sp], #16
32	ldp	x12, x13, [sp], #16
33	ldp	x10, x11, [sp], #16
34	ldp	x8, x9,   [sp], #16
35	ldp	x6, x7,   [sp], #16
36	ldp	x4, x5,   [sp], #16
37	ldp	x2, x3,   [sp], #16
38	ldp	x0, x1,   [sp], #16
39.endm
40
41	.text
42
43el1_sync:				// Guest trapped into EL2
44
45	mrs	x0, esr_el2
46	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
47	cmp	x0, #ESR_ELx_EC_HVC64
48	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
49	b.ne	el1_trap
50
51	/*
52	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
53	 * The workaround has already been applied on the host,
54	 * so let's quickly get back to the guest. We don't bother
55	 * restoring x1, as it can be clobbered anyway.
56	 */
57	ldr	x1, [sp]				// Guest's x0
58	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
59	cbz	w1, wa_epilogue
60
61	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
62	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
63			  ARM_SMCCC_ARCH_WORKAROUND_2)
64	cbz	w1, wa_epilogue
65
66	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
67			  ARM_SMCCC_ARCH_WORKAROUND_3)
68	cbnz	w1, el1_trap
69
70wa_epilogue:
71	mov	x0, xzr
72	add	sp, sp, #16
73	eret
74	sb
75
76el1_trap:
77	get_vcpu_ptr	x1, x0
78	mov	x0, #ARM_EXCEPTION_TRAP
79	b	__guest_exit
80
81el1_irq:
82	get_vcpu_ptr	x1, x0
83	mov	x0, #ARM_EXCEPTION_IRQ
84	b	__guest_exit
85
86el1_error:
87	get_vcpu_ptr	x1, x0
88	mov	x0, #ARM_EXCEPTION_EL1_SERROR
89	b	__guest_exit
90
91el2_sync:
92	/* Check for illegal exception return */
93	mrs	x0, spsr_el2
94	tbnz	x0, #20, 1f
95
96	save_caller_saved_regs_vect
97	stp     x29, x30, [sp, #-16]!
98	bl	kvm_unexpected_el2_exception
99	ldp     x29, x30, [sp], #16
100	restore_caller_saved_regs_vect
101
102	eret
103
1041:
105	/* Let's attempt a recovery from the illegal exception return */
106	get_vcpu_ptr	x1, x0
107	mov	x0, #ARM_EXCEPTION_IL
108	b	__guest_exit
109
110
111el2_error:
112	save_caller_saved_regs_vect
113	stp     x29, x30, [sp, #-16]!
114
115	bl	kvm_unexpected_el2_exception
116
117	ldp     x29, x30, [sp], #16
118	restore_caller_saved_regs_vect
119
120	eret
121	sb
122
123.macro invalid_vector	label, target = __guest_exit_panic
124	.align	2
125SYM_CODE_START(\label)
126	b \target
127SYM_CODE_END(\label)
128.endm
129
130	/* None of these should ever happen */
131	invalid_vector	el2t_sync_invalid
132	invalid_vector	el2t_irq_invalid
133	invalid_vector	el2t_fiq_invalid
134	invalid_vector	el2t_error_invalid
135	invalid_vector	el2h_irq_invalid
136	invalid_vector	el2h_fiq_invalid
137	invalid_vector	el1_fiq_invalid
138
139	.ltorg
140
141	.align 11
142
143.macro check_preamble_length start, end
144/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
145.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
146	.error "KVM vector preamble length mismatch"
147.endif
148.endm
149
150.macro valid_vect target
151	.align 7
152661:
153	esb
154	stp	x0, x1, [sp, #-16]!
155662:
156	b	\target
157
158check_preamble_length 661b, 662b
159.endm
160
161.macro invalid_vect target
162	.align 7
163661:
164	nop
165	stp	x0, x1, [sp, #-16]!
166662:
167	b	\target
168
169check_preamble_length 661b, 662b
170.endm
171
172SYM_CODE_START(__kvm_hyp_vector)
173	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
174	invalid_vect	el2t_irq_invalid	// IRQ EL2t
175	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
176	invalid_vect	el2t_error_invalid	// Error EL2t
177
178	valid_vect	el2_sync		// Synchronous EL2h
179	invalid_vect	el2h_irq_invalid	// IRQ EL2h
180	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
181	valid_vect	el2_error		// Error EL2h
182
183	valid_vect	el1_sync		// Synchronous 64-bit EL1
184	valid_vect	el1_irq			// IRQ 64-bit EL1
185	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
186	valid_vect	el1_error		// Error 64-bit EL1
187
188	valid_vect	el1_sync		// Synchronous 32-bit EL1
189	valid_vect	el1_irq			// IRQ 32-bit EL1
190	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
191	valid_vect	el1_error		// Error 32-bit EL1
192SYM_CODE_END(__kvm_hyp_vector)
193
194.macro hyp_ventry
195	.align 7
1961:	esb
197	.rept 26
198	nop
199	.endr
200/*
201 * The default sequence is to directly branch to the KVM vectors,
202 * using the computed offset. This applies for VHE as well as
203 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
204 *
205 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
206 * with:
207 *
208 * stp	x0, x1, [sp, #-16]!
209 * movz	x0, #(addr & 0xffff)
210 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
211 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
212 * br	x0
213 *
214 * Where:
215 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
216 * See kvm_patch_vector_branch for details.
217 */
218alternative_cb	kvm_patch_vector_branch
219	stp	x0, x1, [sp, #-16]!
220	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
221	nop
222	nop
223	nop
224alternative_cb_end
225.endm
226
227.macro generate_vectors
2280:
229	.rept 16
230	hyp_ventry
231	.endr
232	.org 0b + SZ_2K		// Safety measure
233.endm
234
235	.align	11
236SYM_CODE_START(__bp_harden_hyp_vecs)
237	.rept BP_HARDEN_EL2_SLOTS
238	generate_vectors
239	.endr
2401:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
241	.org 1b
242SYM_CODE_END(__bp_harden_hyp_vecs)
243