• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/kvm_mmu.h>
16#include <asm/mmu.h>
17
18.macro save_caller_saved_regs_vect
19	/* x0 and x1 were saved in the vector entry */
20	stp	x2, x3,   [sp, #-16]!
21	stp	x4, x5,   [sp, #-16]!
22	stp	x6, x7,   [sp, #-16]!
23	stp	x8, x9,   [sp, #-16]!
24	stp	x10, x11, [sp, #-16]!
25	stp	x12, x13, [sp, #-16]!
26	stp	x14, x15, [sp, #-16]!
27	stp	x16, x17, [sp, #-16]!
28.endm
29
30.macro restore_caller_saved_regs_vect
31	ldp	x16, x17, [sp], #16
32	ldp	x14, x15, [sp], #16
33	ldp	x12, x13, [sp], #16
34	ldp	x10, x11, [sp], #16
35	ldp	x8, x9,   [sp], #16
36	ldp	x6, x7,   [sp], #16
37	ldp	x4, x5,   [sp], #16
38	ldp	x2, x3,   [sp], #16
39	ldp	x0, x1,   [sp], #16
40.endm
41
42	.text
43	.pushsection	.hyp.text, "ax"
44
45.macro do_el2_call
46	/*
47	 * Shuffle the parameters before calling the function
48	 * pointed to in x0. Assumes parameters in x[1,2,3].
49	 */
50	str	lr, [sp, #-16]!
51	mov	lr, x0
52	mov	x0, x1
53	mov	x1, x2
54	mov	x2, x3
55	blr	lr
56	ldr	lr, [sp], #16
57.endm
58
59el1_sync:				// Guest trapped into EL2
60
61	mrs	x0, esr_el2
62	lsr	x0, x0, #ESR_ELx_EC_SHIFT
63	cmp	x0, #ESR_ELx_EC_HVC64
64	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
65	b.ne	el1_trap
66
67	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
68	cbnz	x1, el1_hvc_guest	// called HVC
69
70	/* Here, we're pretty sure the host called HVC. */
71	ldp	x0, x1, [sp], #16
72
73	/* Check for a stub HVC call */
74	cmp	x0, #HVC_STUB_HCALL_NR
75	b.hs	1f
76
77	/*
78	 * Compute the idmap address of __kvm_handle_stub_hvc and
79	 * jump there. Since we use kimage_voffset, do not use the
80	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
81	 * (by loading it from the constant pool).
82	 *
83	 * Preserve x0-x4, which may contain stub parameters.
84	 */
85	ldr	x5, =__kvm_handle_stub_hvc
86	ldr_l	x6, kimage_voffset
87
88	/* x5 = __pa(x5) */
89	sub	x5, x5, x6
90	br	x5
91
921:
93	/*
94	 * Perform the EL2 call
95	 */
96	kern_hyp_va	x0
97	do_el2_call
98
99	eret
100	sb
101
102el1_hvc_guest:
103	/*
104	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
105	 * The workaround has already been applied on the host,
106	 * so let's quickly get back to the guest. We don't bother
107	 * restoring x1, as it can be clobbered anyway.
108	 */
109	ldr	x1, [sp]				// Guest's x0
110	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
111	cbz	w1, wa_epilogue
112
113	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
114	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
115			  ARM_SMCCC_ARCH_WORKAROUND_2)
116	cbz	w1, wa_epilogue
117
118	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
119			  ARM_SMCCC_ARCH_WORKAROUND_3)
120	cbnz	w1, el1_trap
121
122#ifdef CONFIG_ARM64_SSBD
123alternative_cb	arm64_enable_wa2_handling
124	b	wa2_end
125alternative_cb_end
126	get_vcpu_ptr	x2, x0
127	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
128
129	// Sanitize the argument and update the guest flags
130	ldr	x1, [sp, #8]			// Guest's x1
131	clz	w1, w1				// Murphy's device:
132	lsr	w1, w1, #5			// w1 = !!w1 without using
133	eor	w1, w1, #1			// the flags...
134	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
135	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
136
137	/* Check that we actually need to perform the call */
138	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
139	cbz	x0, wa2_end
140
141	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
142	smc	#0
143
144	/* Don't leak data from the SMC call */
145	mov	x3, xzr
146wa2_end:
147	mov	x2, xzr
148	mov	x1, xzr
149#endif
150
151wa_epilogue:
152	mov	x0, xzr
153	add	sp, sp, #16
154	eret
155	sb
156
157el1_trap:
158	get_vcpu_ptr	x1, x0
159	mov	x0, #ARM_EXCEPTION_TRAP
160	b	__guest_exit
161
162el1_irq:
163	get_vcpu_ptr	x1, x0
164	mov	x0, #ARM_EXCEPTION_IRQ
165	b	__guest_exit
166
167el1_error:
168	get_vcpu_ptr	x1, x0
169	mov	x0, #ARM_EXCEPTION_EL1_SERROR
170	b	__guest_exit
171
172el2_sync:
173	/* Check for illegal exception return */
174	mrs	x0, spsr_el2
175	tbnz	x0, #20, 1f
176
177	save_caller_saved_regs_vect
178	stp     x29, x30, [sp, #-16]!
179	bl	kvm_unexpected_el2_exception
180	ldp     x29, x30, [sp], #16
181	restore_caller_saved_regs_vect
182
183	eret
184
1851:
186	/* Let's attempt a recovery from the illegal exception return */
187	get_vcpu_ptr	x1, x0
188	mov	x0, #ARM_EXCEPTION_IL
189	b	__guest_exit
190
191
192el2_error:
193	save_caller_saved_regs_vect
194	stp     x29, x30, [sp, #-16]!
195
196	bl	kvm_unexpected_el2_exception
197
198	ldp     x29, x30, [sp], #16
199	restore_caller_saved_regs_vect
200
201	eret
202	sb
203
204ENTRY(__hyp_do_panic)
205	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
206		      PSR_MODE_EL1h)
207	msr	spsr_el2, lr
208	ldr	lr, =panic
209	msr	elr_el2, lr
210	eret
211	sb
212ENDPROC(__hyp_do_panic)
213
214ENTRY(__hyp_panic)
215	get_host_ctxt x0, x1
216	b	hyp_panic
217ENDPROC(__hyp_panic)
218
219.macro invalid_vector	label, target = __hyp_panic
220	.align	2
221\label:
222	b \target
223ENDPROC(\label)
224.endm
225
226	/* None of these should ever happen */
227	invalid_vector	el2t_sync_invalid
228	invalid_vector	el2t_irq_invalid
229	invalid_vector	el2t_fiq_invalid
230	invalid_vector	el2t_error_invalid
231	invalid_vector	el2h_sync_invalid
232	invalid_vector	el2h_irq_invalid
233	invalid_vector	el2h_fiq_invalid
234	invalid_vector	el1_fiq_invalid
235
236	.ltorg
237
238	.align 11
239
240.macro check_preamble_length start, end
241/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
242.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
243	.error "KVM vector preamble length mismatch"
244.endif
245.endm
246
247.macro valid_vect target
248	.align 7
249661:
250	esb
251	stp	x0, x1, [sp, #-16]!
252662:
253	b	\target
254
255check_preamble_length 661b, 662b
256.endm
257
258.macro invalid_vect target
259	.align 7
260661:
261	b	\target
262	nop
263662:
264	ldp	x0, x1, [sp], #16
265	b	\target
266
267check_preamble_length 661b, 662b
268.endm
269
270ENTRY(__kvm_hyp_vector)
271	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
272	invalid_vect	el2t_irq_invalid	// IRQ EL2t
273	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
274	invalid_vect	el2t_error_invalid	// Error EL2t
275
276	valid_vect	el2_sync		// Synchronous EL2h
277	invalid_vect	el2h_irq_invalid	// IRQ EL2h
278	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
279	valid_vect	el2_error		// Error EL2h
280
281	valid_vect	el1_sync		// Synchronous 64-bit EL1
282	valid_vect	el1_irq			// IRQ 64-bit EL1
283	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
284	valid_vect	el1_error		// Error 64-bit EL1
285
286	valid_vect	el1_sync		// Synchronous 32-bit EL1
287	valid_vect	el1_irq			// IRQ 32-bit EL1
288	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
289	valid_vect	el1_error		// Error 32-bit EL1
290ENDPROC(__kvm_hyp_vector)
291
292#ifdef CONFIG_KVM_INDIRECT_VECTORS
293.macro hyp_ventry
294	.align 7
2951:	esb
296	.rept 26
297	nop
298	.endr
299/*
300 * The default sequence is to directly branch to the KVM vectors,
301 * using the computed offset. This applies for VHE as well as
302 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
303 *
304 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
305 * with:
306 *
307 * stp	x0, x1, [sp, #-16]!
308 * movz	x0, #(addr & 0xffff)
309 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
310 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
311 * br	x0
312 *
313 * Where:
314 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
315 * See kvm_patch_vector_branch for details.
316 */
317alternative_cb	kvm_patch_vector_branch
318	stp	x0, x1, [sp, #-16]!
319	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
320	nop
321	nop
322	nop
323alternative_cb_end
324.endm
325
326.macro generate_vectors
3270:
328	.rept 16
329	hyp_ventry
330	.endr
331	.org 0b + SZ_2K		// Safety measure
332.endm
333
334	.align	11
335ENTRY(__bp_harden_hyp_vecs_start)
336	.rept BP_HARDEN_EL2_SLOTS
337	generate_vectors
338	.endr
339ENTRY(__bp_harden_hyp_vecs_end)
340
341	.popsection
342
343ENTRY(__smccc_workaround_1_smc_start)
344	esb
345	sub	sp, sp, #(8 * 4)
346	stp	x2, x3, [sp, #(8 * 0)]
347	stp	x0, x1, [sp, #(8 * 2)]
348	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
349	smc	#0
350	ldp	x2, x3, [sp, #(8 * 0)]
351	ldp	x0, x1, [sp, #(8 * 2)]
352	add	sp, sp, #(8 * 4)
353ENTRY(__smccc_workaround_1_smc_end)
354
355ENTRY(__smccc_workaround_3_smc_start)
356	esb
357	sub	sp, sp, #(8 * 4)
358	stp	x2, x3, [sp, #(8 * 0)]
359	stp	x0, x1, [sp, #(8 * 2)]
360	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
361	smc	#0
362	ldp	x2, x3, [sp, #(8 * 0)]
363	ldp	x0, x1, [sp, #(8 * 2)]
364	add	sp, sp, #(8 * 4)
365ENTRY(__smccc_workaround_3_smc_end)
366
367ENTRY(__spectre_bhb_loop_k8_start)
368	esb
369	sub	sp, sp, #(8 * 2)
370	stp	x0, x1, [sp, #(8 * 0)]
371	mov	x0, #8
3722:	b	. + 4
373	subs	x0, x0, #1
374	b.ne	2b
375	dsb	nsh
376	isb
377	ldp	x0, x1, [sp, #(8 * 0)]
378	add	sp, sp, #(8 * 2)
379ENTRY(__spectre_bhb_loop_k8_end)
380
381ENTRY(__spectre_bhb_loop_k24_start)
382	esb
383	sub	sp, sp, #(8 * 2)
384	stp	x0, x1, [sp, #(8 * 0)]
385	mov	x0, #24
3862:	b	. + 4
387	subs	x0, x0, #1
388	b.ne	2b
389	dsb	nsh
390	isb
391	ldp	x0, x1, [sp, #(8 * 0)]
392	add	sp, sp, #(8 * 2)
393ENTRY(__spectre_bhb_loop_k24_end)
394
395ENTRY(__spectre_bhb_loop_k32_start)
396	esb
397	sub	sp, sp, #(8 * 2)
398	stp	x0, x1, [sp, #(8 * 0)]
399	mov     x0, #32
4002:	b	. + 4
401	subs	x0, x0, #1
402	b.ne	2b
403	dsb	nsh
404	isb
405	ldp	x0, x1, [sp, #(8 * 0)]
406	add	sp, sp, #(8 * 2)
407ENTRY(__spectre_bhb_loop_k32_end)
408
409ENTRY(__spectre_bhb_clearbhb_start)
410	esb
411	clearbhb
412	isb
413ENTRY(__spectre_bhb_clearbhb_end)
414#endif
415