• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl32/tsp/tsp.h>
12#include <lib/xlat_tables/xlat_tables_defs.h>
13
14#include "../tsp_private.h"
15
16
17	.globl	tsp_entrypoint
18	.globl  tsp_vector_table
19
20
21
22	/* ---------------------------------------------
23	 * Populate the params in x0-x7 from the pointer
24	 * to the smc args structure in x0.
25	 * ---------------------------------------------
26	 */
27	.macro restore_args_call_smc
28	ldp	x6, x7, [x0, #TSP_ARG6]
29	ldp	x4, x5, [x0, #TSP_ARG4]
30	ldp	x2, x3, [x0, #TSP_ARG2]
31	ldp	x0, x1, [x0, #TSP_ARG0]
32	smc	#0
33	.endm
34
35	.macro	save_eret_context reg1 reg2
36	mrs	\reg1, elr_el1
37	mrs	\reg2, spsr_el1
38	stp	\reg1, \reg2, [sp, #-0x10]!
39	stp	x30, x18, [sp, #-0x10]!
40	.endm
41
42	.macro restore_eret_context reg1 reg2
43	ldp	x30, x18, [sp], #0x10
44	ldp	\reg1, \reg2, [sp], #0x10
45	msr	elr_el1, \reg1
46	msr	spsr_el1, \reg2
47	.endm
48
49func tsp_entrypoint _align=3
50
51#if ENABLE_PIE
52		/*
53		 * ------------------------------------------------------------
54		 * If PIE is enabled fixup the Global descriptor Table only
55		 * once during primary core cold boot path.
56		 *
57		 * Compile time base address, required for fixup, is calculated
58		 * using "pie_fixup" label present within first page.
59		 * ------------------------------------------------------------
60		 */
61	pie_fixup:
62		ldr	x0, =pie_fixup
63		and	x0, x0, #~(PAGE_SIZE_MASK)
64		mov_imm	x1, (BL32_LIMIT - BL32_BASE)
65		add	x1, x1, x0
66		bl	fixup_gdt_reloc
67#endif /* ENABLE_PIE */
68
69	/* ---------------------------------------------
70	 * Set the exception vector to something sane.
71	 * ---------------------------------------------
72	 */
73	adr	x0, tsp_exceptions
74	msr	vbar_el1, x0
75	isb
76
77	/* ---------------------------------------------
78	 * Enable the SError interrupt now that the
79	 * exception vectors have been setup.
80	 * ---------------------------------------------
81	 */
82	msr	daifclr, #DAIF_ABT_BIT
83
84	/* ---------------------------------------------
85	 * Enable the instruction cache, stack pointer
86	 * and data access alignment checks and disable
87	 * speculative loads.
88	 * ---------------------------------------------
89	 */
90	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
91	mrs	x0, sctlr_el1
92	orr	x0, x0, x1
93	bic	x0, x0, #SCTLR_DSSBS_BIT
94	msr	sctlr_el1, x0
95	isb
96
97	/* ---------------------------------------------
98	 * Invalidate the RW memory used by the BL32
99	 * image. This includes the data and NOBITS
100	 * sections. This is done to safeguard against
101	 * possible corruption of this memory by dirty
102	 * cache lines in a system cache as a result of
103	 * use by an earlier boot loader stage. If PIE
104	 * is enabled however, RO sections including the
105	 * GOT may be modified during pie fixup.
106	 * Therefore, to be on the safe side, invalidate
107	 * the entire image region if PIE is enabled.
108	 * ---------------------------------------------
109	 */
110#if ENABLE_PIE
111#if SEPARATE_CODE_AND_RODATA
112	adrp	x0, __TEXT_START__
113	add	x0, x0, :lo12:__TEXT_START__
114#else
115	adrp	x0, __RO_START__
116	add	x0, x0, :lo12:__RO_START__
117#endif /* SEPARATE_CODE_AND_RODATA */
118#else
119	adrp	x0, __RW_START__
120	add	x0, x0, :lo12:__RW_START__
121#endif /* ENABLE_PIE */
122	adrp	x1, __RW_END__
123	add     x1, x1, :lo12:__RW_END__
124	sub	x1, x1, x0
125	bl	inv_dcache_range
126
127	/* ---------------------------------------------
128	 * Zero out NOBITS sections. There are 2 of them:
129	 *   - the .bss section;
130	 *   - the coherent memory section.
131	 * ---------------------------------------------
132	 */
133	adrp	x0, __BSS_START__
134	add	x0, x0, :lo12:__BSS_START__
135	adrp	x1, __BSS_END__
136	add	x1, x1, :lo12:__BSS_END__
137	sub	x1, x1, x0
138	bl	zeromem
139
140#if USE_COHERENT_MEM
141	adrp	x0, __COHERENT_RAM_START__
142	add	x0, x0, :lo12:__COHERENT_RAM_START__
143	adrp	x1, __COHERENT_RAM_END_UNALIGNED__
144	add	x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
145	sub	x1, x1, x0
146	bl	zeromem
147#endif
148
149	/* --------------------------------------------
150	 * Allocate a stack whose memory will be marked
151	 * as Normal-IS-WBWA when the MMU is enabled.
152	 * There is no risk of reading stale stack
153	 * memory after enabling the MMU as only the
154	 * primary cpu is running at the moment.
155	 * --------------------------------------------
156	 */
157	bl	plat_set_my_stack
158
159	/* ---------------------------------------------
160	 * Initialize the stack protector canary before
161	 * any C code is called.
162	 * ---------------------------------------------
163	 */
164#if STACK_PROTECTOR_ENABLED
165	bl	update_stack_protector_canary
166#endif
167
168	/* ---------------------------------------------
169	 * Perform TSP setup
170	 * ---------------------------------------------
171	 */
172	bl	tsp_setup
173
174#if ENABLE_PAUTH
175	/* ---------------------------------------------
176	 * Program APIAKey_EL1
177	 * and enable pointer authentication
178	 * ---------------------------------------------
179	 */
180	bl	pauth_init_enable_el1
181#endif /* ENABLE_PAUTH */
182
183	/* ---------------------------------------------
184	 * Jump to main function.
185	 * ---------------------------------------------
186	 */
187	bl	tsp_main
188
189	/* ---------------------------------------------
190	 * Tell TSPD that we are done initialising
191	 * ---------------------------------------------
192	 */
193	mov	x1, x0
194	mov	x0, #TSP_ENTRY_DONE
195	smc	#0
196
197tsp_entrypoint_panic:
198	b	tsp_entrypoint_panic
199endfunc tsp_entrypoint
200
201
202	/* -------------------------------------------
203	 * Table of entrypoint vectors provided to the
204	 * TSPD for the various entrypoints
205	 * -------------------------------------------
206	 */
207vector_base tsp_vector_table
208	b	tsp_yield_smc_entry
209	b	tsp_fast_smc_entry
210	b	tsp_cpu_on_entry
211	b	tsp_cpu_off_entry
212	b	tsp_cpu_resume_entry
213	b	tsp_cpu_suspend_entry
214	b	tsp_sel1_intr_entry
215	b	tsp_system_off_entry
216	b	tsp_system_reset_entry
217	b	tsp_abort_yield_smc_entry
218
219	/*---------------------------------------------
220	 * This entrypoint is used by the TSPD when this
221	 * cpu is to be turned off through a CPU_OFF
222	 * psci call to ask the TSP to perform any
223	 * bookeeping necessary. In the current
224	 * implementation, the TSPD expects the TSP to
225	 * re-initialise its state so nothing is done
226	 * here except for acknowledging the request.
227	 * ---------------------------------------------
228	 */
229func tsp_cpu_off_entry
230	bl	tsp_cpu_off_main
231	restore_args_call_smc
232endfunc tsp_cpu_off_entry
233
234	/*---------------------------------------------
235	 * This entrypoint is used by the TSPD when the
236	 * system is about to be switched off (through
237	 * a SYSTEM_OFF psci call) to ask the TSP to
238	 * perform any necessary bookkeeping.
239	 * ---------------------------------------------
240	 */
241func tsp_system_off_entry
242	bl	tsp_system_off_main
243	restore_args_call_smc
244endfunc tsp_system_off_entry
245
246	/*---------------------------------------------
247	 * This entrypoint is used by the TSPD when the
248	 * system is about to be reset (through a
249	 * SYSTEM_RESET psci call) to ask the TSP to
250	 * perform any necessary bookkeeping.
251	 * ---------------------------------------------
252	 */
253func tsp_system_reset_entry
254	bl	tsp_system_reset_main
255	restore_args_call_smc
256endfunc tsp_system_reset_entry
257
258	/*---------------------------------------------
259	 * This entrypoint is used by the TSPD when this
260	 * cpu is turned on using a CPU_ON psci call to
261	 * ask the TSP to initialise itself i.e. setup
262	 * the mmu, stacks etc. Minimal architectural
263	 * state will be initialised by the TSPD when
264	 * this function is entered i.e. Caches and MMU
265	 * will be turned off, the execution state
266	 * will be aarch64 and exceptions masked.
267	 * ---------------------------------------------
268	 */
269func tsp_cpu_on_entry
270	/* ---------------------------------------------
271	 * Set the exception vector to something sane.
272	 * ---------------------------------------------
273	 */
274	adr	x0, tsp_exceptions
275	msr	vbar_el1, x0
276	isb
277
278	/* Enable the SError interrupt */
279	msr	daifclr, #DAIF_ABT_BIT
280
281	/* ---------------------------------------------
282	 * Enable the instruction cache, stack pointer
283	 * and data access alignment checks
284	 * ---------------------------------------------
285	 */
286	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
287	mrs	x0, sctlr_el1
288	orr	x0, x0, x1
289	msr	sctlr_el1, x0
290	isb
291
292	/* --------------------------------------------
293	 * Give ourselves a stack whose memory will be
294	 * marked as Normal-IS-WBWA when the MMU is
295	 * enabled.
296	 * --------------------------------------------
297	 */
298	bl	plat_set_my_stack
299
300	/* --------------------------------------------
301	 * Enable MMU and D-caches together.
302	 * --------------------------------------------
303	 */
304	mov	x0, #0
305	bl	bl32_plat_enable_mmu
306
307#if ENABLE_PAUTH
308	/* ---------------------------------------------
309	 * Program APIAKey_EL1
310	 * and enable pointer authentication
311	 * ---------------------------------------------
312	 */
313	bl	pauth_init_enable_el1
314#endif /* ENABLE_PAUTH */
315
316	/* ---------------------------------------------
317	 * Enter C runtime to perform any remaining
318	 * book keeping
319	 * ---------------------------------------------
320	 */
321	bl	tsp_cpu_on_main
322	restore_args_call_smc
323
324	/* Should never reach here */
325tsp_cpu_on_entry_panic:
326	b	tsp_cpu_on_entry_panic
327endfunc tsp_cpu_on_entry
328
329	/*---------------------------------------------
330	 * This entrypoint is used by the TSPD when this
331	 * cpu is to be suspended through a CPU_SUSPEND
332	 * psci call to ask the TSP to perform any
333	 * bookeeping necessary. In the current
334	 * implementation, the TSPD saves and restores
335	 * the EL1 state.
336	 * ---------------------------------------------
337	 */
338func tsp_cpu_suspend_entry
339	bl	tsp_cpu_suspend_main
340	restore_args_call_smc
341endfunc tsp_cpu_suspend_entry
342
343	/*-------------------------------------------------
344	 * This entrypoint is used by the TSPD to pass
345	 * control for `synchronously` handling a S-EL1
346	 * Interrupt which was triggered while executing
347	 * in normal world. 'x0' contains a magic number
348	 * which indicates this. TSPD expects control to
349	 * be handed back at the end of interrupt
350	 * processing. This is done through an SMC.
351	 * The handover agreement is:
352	 *
353	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
354	 *    the ELR_EL3 from the non-secure state.
355	 * 2. TSP has to preserve the callee saved
356	 *    general purpose registers, SP_EL1/EL0 and
357	 *    LR.
358	 * 3. TSP has to preserve the system and vfp
359	 *    registers (if applicable).
360	 * 4. TSP can use 'x0-x18' to enable its C
361	 *    runtime.
362	 * 5. TSP returns to TSPD using an SMC with
363	 *    'x0' = TSP_HANDLED_S_EL1_INTR
364	 * ------------------------------------------------
365	 */
366func	tsp_sel1_intr_entry
367#if DEBUG
368	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
369	cmp	x0, x2
370	b.ne	tsp_sel1_int_entry_panic
371#endif
372	/*-------------------------------------------------
373	 * Save any previous context needed to perform
374	 * an exception return from S-EL1 e.g. context
375	 * from a previous Non secure Interrupt.
376	 * Update statistics and handle the S-EL1
377	 * interrupt before returning to the TSPD.
378	 * IRQ/FIQs are not enabled since that will
379	 * complicate the implementation. Execution
380	 * will be transferred back to the normal world
381	 * in any case. The handler can return 0
382	 * if the interrupt was handled or TSP_PREEMPTED
383	 * if the expected interrupt was preempted
384	 * by an interrupt that should be handled in EL3
385	 * e.g. Group 0 interrupt in GICv3. In both
386	 * the cases switch to EL3 using SMC with id
387	 * TSP_HANDLED_S_EL1_INTR. Any other return value
388	 * from the handler will result in panic.
389	 * ------------------------------------------------
390	 */
391	save_eret_context x2 x3
392	bl	tsp_update_sync_sel1_intr_stats
393	bl	tsp_common_int_handler
394	/* Check if the S-EL1 interrupt has been handled */
395	cbnz	x0, tsp_sel1_intr_check_preemption
396	b	tsp_sel1_intr_return
397tsp_sel1_intr_check_preemption:
398	/* Check if the S-EL1 interrupt has been preempted */
399	mov_imm	x1, TSP_PREEMPTED
400	cmp	x0, x1
401	b.ne	tsp_sel1_int_entry_panic
402tsp_sel1_intr_return:
403	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
404	restore_eret_context x2 x3
405	smc	#0
406
407	/* Should never reach here */
408tsp_sel1_int_entry_panic:
409	no_ret	plat_panic_handler
410endfunc tsp_sel1_intr_entry
411
412	/*---------------------------------------------
413	 * This entrypoint is used by the TSPD when this
414	 * cpu resumes execution after an earlier
415	 * CPU_SUSPEND psci call to ask the TSP to
416	 * restore its saved context. In the current
417	 * implementation, the TSPD saves and restores
418	 * EL1 state so nothing is done here apart from
419	 * acknowledging the request.
420	 * ---------------------------------------------
421	 */
422func tsp_cpu_resume_entry
423	bl	tsp_cpu_resume_main
424	restore_args_call_smc
425
426	/* Should never reach here */
427	no_ret	plat_panic_handler
428endfunc tsp_cpu_resume_entry
429
430	/*---------------------------------------------
431	 * This entrypoint is used by the TSPD to ask
432	 * the TSP to service a fast smc request.
433	 * ---------------------------------------------
434	 */
435func tsp_fast_smc_entry
436	bl	tsp_smc_handler
437	restore_args_call_smc
438
439	/* Should never reach here */
440	no_ret	plat_panic_handler
441endfunc tsp_fast_smc_entry
442
443	/*---------------------------------------------
444	 * This entrypoint is used by the TSPD to ask
445	 * the TSP to service a Yielding SMC request.
446	 * We will enable preemption during execution
447	 * of tsp_smc_handler.
448	 * ---------------------------------------------
449	 */
450func tsp_yield_smc_entry
451	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
452	bl	tsp_smc_handler
453	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
454	restore_args_call_smc
455
456	/* Should never reach here */
457	no_ret	plat_panic_handler
458endfunc tsp_yield_smc_entry
459
460	/*---------------------------------------------------------------------
461	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
462	 * SMC. It could be on behalf of non-secure world or because a CPU
463	 * suspend/CPU off request needs to abort the preempted SMC.
464	 * --------------------------------------------------------------------
465	 */
466func tsp_abort_yield_smc_entry
467
468	/*
469	 * Exceptions masking is already done by the TSPD when entering this
470	 * hook so there is no need to do it here.
471	 */
472
473	/* Reset the stack used by the pre-empted SMC */
474	bl	plat_set_my_stack
475
476	/*
477	 * Allow some cleanup such as releasing locks.
478	 */
479	bl	tsp_abort_smc_handler
480
481	restore_args_call_smc
482
483	/* Should never reach here */
484	bl	plat_panic_handler
485endfunc tsp_abort_yield_smc_entry
486