• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/cfi_types.h>
9#include <linux/linkage.h>
10
11#include <asm/alternative.h>
12#include <asm/assembler.h>
13#include <asm/el2_setup.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_asm.h>
16#include <asm/kvm_mmu.h>
17#include <asm/pgtable-hwdef.h>
18#include <asm/sysreg.h>
19#include <asm/virt.h>
20
21	.text
22	.pushsection	.idmap.text, "ax"
23
24	.align	11
25
26SYM_CODE_START(__kvm_hyp_init)
27	ventry	.			// Synchronous EL2t
28	ventry	.			// IRQ EL2t
29	ventry	.			// FIQ EL2t
30	ventry	.			// Error EL2t
31
32	ventry	.			// Synchronous EL2h
33	ventry	.			// IRQ EL2h
34	ventry	.			// FIQ EL2h
35	ventry	.			// Error EL2h
36
37	ventry	__do_hyp_init		// Synchronous 64-bit EL1
38	ventry	.			// IRQ 64-bit EL1
39	ventry	.			// FIQ 64-bit EL1
40	ventry	.			// Error 64-bit EL1
41
42	ventry	.			// Synchronous 32-bit EL1
43	ventry	.			// IRQ 32-bit EL1
44	ventry	.			// FIQ 32-bit EL1
45	ventry	.			// Error 32-bit EL1
46
47	/*
48	 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
49	 *
50	 * x0: SMCCC function ID
51	 * x1: struct kvm_nvhe_init_params PA
52	 */
53__do_hyp_init:
54	/* Check for a stub HVC call */
55	cmp	x0, #HVC_STUB_HCALL_NR
56	b.lo	__kvm_handle_stub_hvc
57
58	bic	x0, x0, #ARM_SMCCC_CALL_HINTS
59	mov	x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
60	cmp	x0, x3
61	b.eq	1f
62
63	mov	x0, #SMCCC_RET_NOT_SUPPORTED
64	eret
65
661:	mov	x0, x1
67	mov	x3, lr
68	bl	___kvm_hyp_init			// Clobbers x0..x2
69	mov	lr, x3
70
71	/* Hello, World! */
72	mov	x0, #SMCCC_RET_SUCCESS
73	eret
74SYM_CODE_END(__kvm_hyp_init)
75
76SYM_CODE_START_LOCAL(__kvm_init_el2_state)
77	/* Initialize EL2 CPU state to sane values. */
78	init_el2_state				// Clobbers x0..x2
79	finalise_el2_state
80	ret
81SYM_CODE_END(__kvm_init_el2_state)
82
83/*
84 * Initialize the hypervisor in EL2.
85 *
86 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
87 * and leave x3 for the caller.
88 *
89 * x0: struct kvm_nvhe_init_params PA
90 */
91SYM_CODE_START_LOCAL(___kvm_hyp_init)
92	ldr	x1, [x0, #NVHE_INIT_STACK_HYP_VA]
93	mov	sp, x1
94
95	ldr	x1, [x0, #NVHE_INIT_MAIR_EL2]
96	msr	mair_el2, x1
97
98	ldr	x1, [x0, #NVHE_INIT_HCR_EL2]
99	msr	hcr_el2, x1
100
101	mov	x2, #HCR_E2H
102	and	x2, x1, x2
103	cbz	x2, 1f
104
105	// hVHE: Replay the EL2 setup to account for the E2H bit
106	// TPIDR_EL2 is used to preserve x0 across the macro maze...
107	isb
108	msr	tpidr_el2, x0
109	str	lr, [x0, #NVHE_INIT_TMP]
110
111	bl	__kvm_init_el2_state
112
113	mrs	x0, tpidr_el2
114	ldr	lr, [x0, #NVHE_INIT_TMP]
115
1161:
117	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
118	msr	tpidr_el2, x1
119
120	mrs	x1, ID_AA64MMFR0_EL1
121	and	x1, x1, #(0xf << ID_AA64MMFR0_EL1_FGT_SHIFT)
122	cbz	x1, 2f
123	ldr	x1, [x0, #NVHE_INIT_HFGWTR_EL2]
124	msr_s	SYS_HFGWTR_EL2, x1
125
1262:
127	ldr	x1, [x0, #NVHE_INIT_VTTBR]
128	msr	vttbr_el2, x1
129
130	ldr	x1, [x0, #NVHE_INIT_VTCR]
131	msr	vtcr_el2, x1
132
133	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
134	phys_to_ttbr x2, x1
135alternative_if ARM64_HAS_CNP
136	orr	x2, x2, #TTBR_CNP_BIT
137alternative_else_nop_endif
138	msr	ttbr0_el2, x2
139
140	ldr	x0, [x0, #NVHE_INIT_TCR_EL2]
141	msr	tcr_el2, x0
142
143	isb
144
145	/* Invalidate the stale TLBs from Bootloader */
146	tlbi	alle2
147	tlbi	alle1
148	dsb	sy
149
150	mov_q	x0, INIT_SCTLR_EL2_MMU_ON
151alternative_if ARM64_HAS_ADDRESS_AUTH
152	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
153		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
154	orr	x0, x0, x1
155alternative_else_nop_endif
156
157#ifdef CONFIG_ARM64_BTI_KERNEL
158alternative_if ARM64_BTI
159	orr	x0, x0, #SCTLR_EL2_BT
160alternative_else_nop_endif
161#endif /* CONFIG_ARM64_BTI_KERNEL */
162
163	msr	sctlr_el2, x0
164	isb
165
166	/* Set the host vector */
167	ldr	x0, =__kvm_hyp_host_vector
168	msr	vbar_el2, x0
169
170	ret
171SYM_CODE_END(___kvm_hyp_init)
172
173/*
174 * PSCI CPU_ON entry point
175 *
176 * x0: struct kvm_nvhe_init_params PA
177 */
178SYM_CODE_START(kvm_hyp_cpu_entry)
179	mov	x1, #1				// is_cpu_on = true
180	b	__kvm_hyp_init_cpu
181SYM_CODE_END(kvm_hyp_cpu_entry)
182
183/*
184 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
185 *
186 * x0: struct kvm_nvhe_init_params PA
187 */
188SYM_CODE_START(kvm_hyp_cpu_resume)
189	mov	x1, #0				// is_cpu_on = false
190	b	__kvm_hyp_init_cpu
191SYM_CODE_END(kvm_hyp_cpu_resume)
192
193/*
194 * Common code for CPU entry points. Initializes EL2 state and
195 * installs the hypervisor before handing over to a C handler.
196 *
197 * x0: struct kvm_nvhe_init_params PA
198 * x1: bool is_cpu_on
199 */
200SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
201	mov	x28, x0				// Stash arguments
202	mov	x29, x1
203
204	/* Check that the core was booted in EL2. */
205	mrs	x0, CurrentEL
206	cmp	x0, #CurrentEL_EL2
207	b.eq	2f
208
209	/* The core booted in EL1. KVM cannot be initialized on it. */
2101:	wfe
211	wfi
212	b	1b
213
2142:	msr	SPsel, #1			// We want to use SP_EL{1,2}
215
216	bl	__kvm_init_el2_state
217
218	__init_el2_nvhe_prepare_eret
219
220	/* Enable MMU, set vectors and stack. */
221	mov	x0, x28
222	bl	___kvm_hyp_init			// Clobbers x0..x2
223
224	/* Leave idmap. */
225	mov	x0, x29
226	ldr	x1, =kvm_host_psci_cpu_entry
227	br	x1
228SYM_CODE_END(__kvm_hyp_init_cpu)
229
230SYM_CODE_START(__kvm_handle_stub_hvc)
231	/*
232	 * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
233	 * we need bti j at beginning.
234	 */
235	bti j
236	cmp	x0, #HVC_SOFT_RESTART
237	b.ne	1f
238
239	/* This is where we're about to jump, staying at EL2 */
240	msr	elr_el2, x1
241	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
242	msr	spsr_el2, x0
243
244	/* Shuffle the arguments, and don't come back */
245	mov	x0, x2
246	mov	x1, x3
247	mov	x2, x4
248	b	reset
249
2501:	cmp	x0, #HVC_RESET_VECTORS
251	b.ne	1f
252
253	/*
254	 * Set the HVC_RESET_VECTORS return code before entering the common
255	 * path so that we do not clobber x0-x2 in case we are coming via
256	 * HVC_SOFT_RESTART.
257	 */
258	mov	x0, xzr
259reset:
260	/* Reset kvm back to the hyp stub. */
261	mov_q	x5, INIT_SCTLR_EL2_MMU_OFF
262	pre_disable_mmu_workaround
263	msr	sctlr_el2, x5
264	isb
265
266alternative_if ARM64_KVM_PROTECTED_MODE
267	mov_q	x5, HCR_HOST_NVHE_FLAGS
268	msr	hcr_el2, x5
269alternative_else_nop_endif
270
271	/* Install stub vectors */
272	adr_l	x5, __hyp_stub_vectors
273	msr	vbar_el2, x5
274	eret
275
2761:	/* Bad stub call */
277	mov_q	x0, HVC_STUB_ERR
278	eret
279
280SYM_CODE_END(__kvm_handle_stub_hvc)
281
282/*
283 * void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
284 *                             void (*fn)(void));
285 *
286 * SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
287 * using a physical pointer without triggering a kCFI failure.
288 */
289SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
290	/* Turn the MMU off */
291	pre_disable_mmu_workaround
292	mrs	x3, sctlr_el2
293	bic	x4, x3, #SCTLR_ELx_M
294	msr	sctlr_el2, x4
295	isb
296
297	tlbi	alle2
298
299	/* Install the new pgtables */
300	phys_to_ttbr x5, x0
301alternative_if ARM64_HAS_CNP
302	orr	x5, x5, #TTBR_CNP_BIT
303alternative_else_nop_endif
304	msr	ttbr0_el2, x5
305
306	/* Set the new stack pointer */
307	mov	sp, x1
308
309	/* And turn the MMU back on! */
310	dsb	nsh
311	isb
312	set_sctlr_el2	x3
313	ret	x2
314SYM_FUNC_END(__pkvm_init_switch_pgd)
315
316	.popsection
317