1/* 2 * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <asm_macros.S> 11#include <bl32/tsp/tsp.h> 12#include <lib/xlat_tables/xlat_tables_defs.h> 13 14#include "../tsp_private.h" 15 16 17 .globl tsp_entrypoint 18 .globl tsp_vector_table 19 20 21 22 /* --------------------------------------------- 23 * Populate the params in x0-x7 from the pointer 24 * to the smc args structure in x0. 25 * --------------------------------------------- 26 */ 27 .macro restore_args_call_smc 28 ldp x6, x7, [x0, #TSP_ARG6] 29 ldp x4, x5, [x0, #TSP_ARG4] 30 ldp x2, x3, [x0, #TSP_ARG2] 31 ldp x0, x1, [x0, #TSP_ARG0] 32 smc #0 33 .endm 34 35 .macro save_eret_context reg1 reg2 36 mrs \reg1, elr_el1 37 mrs \reg2, spsr_el1 38 stp \reg1, \reg2, [sp, #-0x10]! 39 stp x30, x18, [sp, #-0x10]! 40 .endm 41 42 .macro restore_eret_context reg1 reg2 43 ldp x30, x18, [sp], #0x10 44 ldp \reg1, \reg2, [sp], #0x10 45 msr elr_el1, \reg1 46 msr spsr_el1, \reg2 47 .endm 48 49func tsp_entrypoint _align=3 50 51#if ENABLE_PIE 52 /* 53 * ------------------------------------------------------------ 54 * If PIE is enabled fixup the Global descriptor Table only 55 * once during primary core cold boot path. 56 * 57 * Compile time base address, required for fixup, is calculated 58 * using "pie_fixup" label present within first page. 59 * ------------------------------------------------------------ 60 */ 61 pie_fixup: 62 ldr x0, =pie_fixup 63 and x0, x0, #~(PAGE_SIZE_MASK) 64 mov_imm x1, (BL32_LIMIT - BL32_BASE) 65 add x1, x1, x0 66 bl fixup_gdt_reloc 67#endif /* ENABLE_PIE */ 68 69 /* --------------------------------------------- 70 * Set the exception vector to something sane. 71 * --------------------------------------------- 72 */ 73 adr x0, tsp_exceptions 74 msr vbar_el1, x0 75 isb 76 77 /* --------------------------------------------- 78 * Enable the SError interrupt now that the 79 * exception vectors have been setup. 80 * --------------------------------------------- 81 */ 82 msr daifclr, #DAIF_ABT_BIT 83 84 /* --------------------------------------------- 85 * Enable the instruction cache, stack pointer 86 * and data access alignment checks and disable 87 * speculative loads. 88 * --------------------------------------------- 89 */ 90 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 91 mrs x0, sctlr_el1 92 orr x0, x0, x1 93 bic x0, x0, #SCTLR_DSSBS_BIT 94 msr sctlr_el1, x0 95 isb 96 97 /* --------------------------------------------- 98 * Invalidate the RW memory used by the BL32 99 * image. This includes the data and NOBITS 100 * sections. This is done to safeguard against 101 * possible corruption of this memory by dirty 102 * cache lines in a system cache as a result of 103 * use by an earlier boot loader stage. 104 * --------------------------------------------- 105 */ 106 adr x0, __RW_START__ 107 adr x1, __RW_END__ 108 sub x1, x1, x0 109 bl inv_dcache_range 110 111 /* --------------------------------------------- 112 * Zero out NOBITS sections. There are 2 of them: 113 * - the .bss section; 114 * - the coherent memory section. 115 * --------------------------------------------- 116 */ 117 adrp x0, __BSS_START__ 118 add x0, x0, :lo12:__BSS_START__ 119 adrp x1, __BSS_END__ 120 add x1, x1, :lo12:__BSS_END__ 121 sub x1, x1, x0 122 bl zeromem 123 124#if USE_COHERENT_MEM 125 adrp x0, __COHERENT_RAM_START__ 126 add x0, x0, :lo12:__COHERENT_RAM_START__ 127 adrp x1, __COHERENT_RAM_END_UNALIGNED__ 128 add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__ 129 sub x1, x1, x0 130 bl zeromem 131#endif 132 133 /* -------------------------------------------- 134 * Allocate a stack whose memory will be marked 135 * as Normal-IS-WBWA when the MMU is enabled. 136 * There is no risk of reading stale stack 137 * memory after enabling the MMU as only the 138 * primary cpu is running at the moment. 139 * -------------------------------------------- 140 */ 141 bl plat_set_my_stack 142 143 /* --------------------------------------------- 144 * Initialize the stack protector canary before 145 * any C code is called. 146 * --------------------------------------------- 147 */ 148#if STACK_PROTECTOR_ENABLED 149 bl update_stack_protector_canary 150#endif 151 152 /* --------------------------------------------- 153 * Perform TSP setup 154 * --------------------------------------------- 155 */ 156 bl tsp_setup 157 158#if ENABLE_PAUTH 159 /* --------------------------------------------- 160 * Program APIAKey_EL1 161 * and enable pointer authentication 162 * --------------------------------------------- 163 */ 164 bl pauth_init_enable_el1 165#endif /* ENABLE_PAUTH */ 166 167 /* --------------------------------------------- 168 * Jump to main function. 169 * --------------------------------------------- 170 */ 171 bl tsp_main 172 173 /* --------------------------------------------- 174 * Tell TSPD that we are done initialising 175 * --------------------------------------------- 176 */ 177 mov x1, x0 178 mov x0, #TSP_ENTRY_DONE 179 smc #0 180 181tsp_entrypoint_panic: 182 b tsp_entrypoint_panic 183endfunc tsp_entrypoint 184 185 186 /* ------------------------------------------- 187 * Table of entrypoint vectors provided to the 188 * TSPD for the various entrypoints 189 * ------------------------------------------- 190 */ 191vector_base tsp_vector_table 192 b tsp_yield_smc_entry 193 b tsp_fast_smc_entry 194 b tsp_cpu_on_entry 195 b tsp_cpu_off_entry 196 b tsp_cpu_resume_entry 197 b tsp_cpu_suspend_entry 198 b tsp_sel1_intr_entry 199 b tsp_system_off_entry 200 b tsp_system_reset_entry 201 b tsp_abort_yield_smc_entry 202 203 /*--------------------------------------------- 204 * This entrypoint is used by the TSPD when this 205 * cpu is to be turned off through a CPU_OFF 206 * psci call to ask the TSP to perform any 207 * bookeeping necessary. In the current 208 * implementation, the TSPD expects the TSP to 209 * re-initialise its state so nothing is done 210 * here except for acknowledging the request. 211 * --------------------------------------------- 212 */ 213func tsp_cpu_off_entry 214 bl tsp_cpu_off_main 215 restore_args_call_smc 216endfunc tsp_cpu_off_entry 217 218 /*--------------------------------------------- 219 * This entrypoint is used by the TSPD when the 220 * system is about to be switched off (through 221 * a SYSTEM_OFF psci call) to ask the TSP to 222 * perform any necessary bookkeeping. 223 * --------------------------------------------- 224 */ 225func tsp_system_off_entry 226 bl tsp_system_off_main 227 restore_args_call_smc 228endfunc tsp_system_off_entry 229 230 /*--------------------------------------------- 231 * This entrypoint is used by the TSPD when the 232 * system is about to be reset (through a 233 * SYSTEM_RESET psci call) to ask the TSP to 234 * perform any necessary bookkeeping. 235 * --------------------------------------------- 236 */ 237func tsp_system_reset_entry 238 bl tsp_system_reset_main 239 restore_args_call_smc 240endfunc tsp_system_reset_entry 241 242 /*--------------------------------------------- 243 * This entrypoint is used by the TSPD when this 244 * cpu is turned on using a CPU_ON psci call to 245 * ask the TSP to initialise itself i.e. setup 246 * the mmu, stacks etc. Minimal architectural 247 * state will be initialised by the TSPD when 248 * this function is entered i.e. Caches and MMU 249 * will be turned off, the execution state 250 * will be aarch64 and exceptions masked. 251 * --------------------------------------------- 252 */ 253func tsp_cpu_on_entry 254 /* --------------------------------------------- 255 * Set the exception vector to something sane. 256 * --------------------------------------------- 257 */ 258 adr x0, tsp_exceptions 259 msr vbar_el1, x0 260 isb 261 262 /* Enable the SError interrupt */ 263 msr daifclr, #DAIF_ABT_BIT 264 265 /* --------------------------------------------- 266 * Enable the instruction cache, stack pointer 267 * and data access alignment checks 268 * --------------------------------------------- 269 */ 270 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 271 mrs x0, sctlr_el1 272 orr x0, x0, x1 273 msr sctlr_el1, x0 274 isb 275 276 /* -------------------------------------------- 277 * Give ourselves a stack whose memory will be 278 * marked as Normal-IS-WBWA when the MMU is 279 * enabled. 280 * -------------------------------------------- 281 */ 282 bl plat_set_my_stack 283 284 /* -------------------------------------------- 285 * Enable MMU and D-caches together. 286 * -------------------------------------------- 287 */ 288 mov x0, #0 289 bl bl32_plat_enable_mmu 290 291#if ENABLE_PAUTH 292 /* --------------------------------------------- 293 * Program APIAKey_EL1 294 * and enable pointer authentication 295 * --------------------------------------------- 296 */ 297 bl pauth_init_enable_el1 298#endif /* ENABLE_PAUTH */ 299 300 /* --------------------------------------------- 301 * Enter C runtime to perform any remaining 302 * book keeping 303 * --------------------------------------------- 304 */ 305 bl tsp_cpu_on_main 306 restore_args_call_smc 307 308 /* Should never reach here */ 309tsp_cpu_on_entry_panic: 310 b tsp_cpu_on_entry_panic 311endfunc tsp_cpu_on_entry 312 313 /*--------------------------------------------- 314 * This entrypoint is used by the TSPD when this 315 * cpu is to be suspended through a CPU_SUSPEND 316 * psci call to ask the TSP to perform any 317 * bookeeping necessary. In the current 318 * implementation, the TSPD saves and restores 319 * the EL1 state. 320 * --------------------------------------------- 321 */ 322func tsp_cpu_suspend_entry 323 bl tsp_cpu_suspend_main 324 restore_args_call_smc 325endfunc tsp_cpu_suspend_entry 326 327 /*------------------------------------------------- 328 * This entrypoint is used by the TSPD to pass 329 * control for `synchronously` handling a S-EL1 330 * Interrupt which was triggered while executing 331 * in normal world. 'x0' contains a magic number 332 * which indicates this. TSPD expects control to 333 * be handed back at the end of interrupt 334 * processing. This is done through an SMC. 335 * The handover agreement is: 336 * 337 * 1. PSTATE.DAIF are set upon entry. 'x1' has 338 * the ELR_EL3 from the non-secure state. 339 * 2. TSP has to preserve the callee saved 340 * general purpose registers, SP_EL1/EL0 and 341 * LR. 342 * 3. TSP has to preserve the system and vfp 343 * registers (if applicable). 344 * 4. TSP can use 'x0-x18' to enable its C 345 * runtime. 346 * 5. TSP returns to TSPD using an SMC with 347 * 'x0' = TSP_HANDLED_S_EL1_INTR 348 * ------------------------------------------------ 349 */ 350func tsp_sel1_intr_entry 351#if DEBUG 352 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN 353 cmp x0, x2 354 b.ne tsp_sel1_int_entry_panic 355#endif 356 /*------------------------------------------------- 357 * Save any previous context needed to perform 358 * an exception return from S-EL1 e.g. context 359 * from a previous Non secure Interrupt. 360 * Update statistics and handle the S-EL1 361 * interrupt before returning to the TSPD. 362 * IRQ/FIQs are not enabled since that will 363 * complicate the implementation. Execution 364 * will be transferred back to the normal world 365 * in any case. The handler can return 0 366 * if the interrupt was handled or TSP_PREEMPTED 367 * if the expected interrupt was preempted 368 * by an interrupt that should be handled in EL3 369 * e.g. Group 0 interrupt in GICv3. In both 370 * the cases switch to EL3 using SMC with id 371 * TSP_HANDLED_S_EL1_INTR. Any other return value 372 * from the handler will result in panic. 373 * ------------------------------------------------ 374 */ 375 save_eret_context x2 x3 376 bl tsp_update_sync_sel1_intr_stats 377 bl tsp_common_int_handler 378 /* Check if the S-EL1 interrupt has been handled */ 379 cbnz x0, tsp_sel1_intr_check_preemption 380 b tsp_sel1_intr_return 381tsp_sel1_intr_check_preemption: 382 /* Check if the S-EL1 interrupt has been preempted */ 383 mov_imm x1, TSP_PREEMPTED 384 cmp x0, x1 385 b.ne tsp_sel1_int_entry_panic 386tsp_sel1_intr_return: 387 mov_imm x0, TSP_HANDLED_S_EL1_INTR 388 restore_eret_context x2 x3 389 smc #0 390 391 /* Should never reach here */ 392tsp_sel1_int_entry_panic: 393 no_ret plat_panic_handler 394endfunc tsp_sel1_intr_entry 395 396 /*--------------------------------------------- 397 * This entrypoint is used by the TSPD when this 398 * cpu resumes execution after an earlier 399 * CPU_SUSPEND psci call to ask the TSP to 400 * restore its saved context. In the current 401 * implementation, the TSPD saves and restores 402 * EL1 state so nothing is done here apart from 403 * acknowledging the request. 404 * --------------------------------------------- 405 */ 406func tsp_cpu_resume_entry 407 bl tsp_cpu_resume_main 408 restore_args_call_smc 409 410 /* Should never reach here */ 411 no_ret plat_panic_handler 412endfunc tsp_cpu_resume_entry 413 414 /*--------------------------------------------- 415 * This entrypoint is used by the TSPD to ask 416 * the TSP to service a fast smc request. 417 * --------------------------------------------- 418 */ 419func tsp_fast_smc_entry 420 bl tsp_smc_handler 421 restore_args_call_smc 422 423 /* Should never reach here */ 424 no_ret plat_panic_handler 425endfunc tsp_fast_smc_entry 426 427 /*--------------------------------------------- 428 * This entrypoint is used by the TSPD to ask 429 * the TSP to service a Yielding SMC request. 430 * We will enable preemption during execution 431 * of tsp_smc_handler. 432 * --------------------------------------------- 433 */ 434func tsp_yield_smc_entry 435 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 436 bl tsp_smc_handler 437 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 438 restore_args_call_smc 439 440 /* Should never reach here */ 441 no_ret plat_panic_handler 442endfunc tsp_yield_smc_entry 443 444 /*--------------------------------------------------------------------- 445 * This entrypoint is used by the TSPD to abort a pre-empted Yielding 446 * SMC. It could be on behalf of non-secure world or because a CPU 447 * suspend/CPU off request needs to abort the preempted SMC. 448 * -------------------------------------------------------------------- 449 */ 450func tsp_abort_yield_smc_entry 451 452 /* 453 * Exceptions masking is already done by the TSPD when entering this 454 * hook so there is no need to do it here. 455 */ 456 457 /* Reset the stack used by the pre-empted SMC */ 458 bl plat_set_my_stack 459 460 /* 461 * Allow some cleanup such as releasing locks. 462 */ 463 bl tsp_abort_smc_handler 464 465 restore_args_call_smc 466 467 /* Should never reach here */ 468 bl plat_panic_handler 469endfunc tsp_abort_yield_smc_entry 470