1/* 2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <common/bl_common.h> 10#include <common/runtime_svc.h> 11#include <context.h> 12#include <el3_common_macros.S> 13#include <lib/el3_runtime/cpu_data.h> 14#include <lib/pmf/aarch32/pmf_asm_macros.S> 15#include <lib/runtime_instr.h> 16#include <lib/xlat_tables/xlat_tables_defs.h> 17#include <smccc_helpers.h> 18#include <smccc_macros.S> 19 20 .globl sp_min_vector_table 21 .globl sp_min_entrypoint 22 .globl sp_min_warm_entrypoint 23 .globl sp_min_handle_smc 24 .globl sp_min_handle_fiq 25 26 .macro route_fiq_to_sp_min reg 27 /* ----------------------------------------------------- 28 * FIQs are secure interrupts trapped by Monitor and non 29 * secure is not allowed to mask the FIQs. 30 * ----------------------------------------------------- 31 */ 32 ldcopr \reg, SCR 33 orr \reg, \reg, #SCR_FIQ_BIT 34 bic \reg, \reg, #SCR_FW_BIT 35 stcopr \reg, SCR 36 .endm 37 38 .macro clrex_on_monitor_entry 39#if (ARM_ARCH_MAJOR == 7) 40 /* 41 * ARMv7 architectures need to clear the exclusive access when 42 * entering Monitor mode. 43 */ 44 clrex 45#endif 46 .endm 47 48vector_base sp_min_vector_table 49 b sp_min_entrypoint 50 b plat_panic_handler /* Undef */ 51 b sp_min_handle_smc /* Syscall */ 52 b plat_panic_handler /* Prefetch abort */ 53 b plat_panic_handler /* Data abort */ 54 b plat_panic_handler /* Reserved */ 55 b plat_panic_handler /* IRQ */ 56 b sp_min_handle_fiq /* FIQ */ 57 58 59/* 60 * The Cold boot/Reset entrypoint for SP_MIN 61 */ 62func sp_min_entrypoint 63#if !RESET_TO_SP_MIN 64 /* --------------------------------------------------------------- 65 * Preceding bootloader has populated r0 with a pointer to a 66 * 'bl_params_t' structure & r1 with a pointer to platform 67 * specific structure 68 * --------------------------------------------------------------- 69 */ 70 mov r9, r0 71 mov r10, r1 72 mov r11, r2 73 mov r12, r3 74 75 /* --------------------------------------------------------------------- 76 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 77 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 78 * and primary/secondary CPU logic should not be executed in this case. 79 * 80 * Also, assume that the previous bootloader has already initialised the 81 * SCTLR, including the CPU endianness, and has initialised the memory. 82 * --------------------------------------------------------------------- 83 */ 84 el3_entrypoint_common \ 85 _init_sctlr=0 \ 86 _warm_boot_mailbox=0 \ 87 _secondary_cold_boot=0 \ 88 _init_memory=0 \ 89 _init_c_runtime=1 \ 90 _exception_vectors=sp_min_vector_table 91 92 /* --------------------------------------------------------------------- 93 * Relay the previous bootloader's arguments to the platform layer 94 * --------------------------------------------------------------------- 95 */ 96#else 97 /* --------------------------------------------------------------------- 98 * For RESET_TO_SP_MIN systems which have a programmable reset address, 99 * sp_min_entrypoint() is executed only on the cold boot path so we can 100 * skip the warm boot mailbox mechanism. 101 * --------------------------------------------------------------------- 102 */ 103 el3_entrypoint_common \ 104 _init_sctlr=1 \ 105 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 106 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 107 _init_memory=1 \ 108 _init_c_runtime=1 \ 109 _exception_vectors=sp_min_vector_table 110 111 /* --------------------------------------------------------------------- 112 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 113 * to run so there's no argument to relay from a previous bootloader. 114 * Zero the arguments passed to the platform layer to reflect that. 115 * --------------------------------------------------------------------- 116 */ 117 mov r9, #0 118 mov r10, #0 119 mov r11, #0 120 mov r12, #0 121 122#endif /* RESET_TO_SP_MIN */ 123 124#if SP_MIN_WITH_SECURE_FIQ 125 route_fiq_to_sp_min r4 126#endif 127 128 mov r0, r9 129 mov r1, r10 130 mov r2, r11 131 mov r3, r12 132 bl sp_min_early_platform_setup2 133 bl sp_min_plat_arch_setup 134 135 /* Jump to the main function */ 136 bl sp_min_main 137 138 /* ------------------------------------------------------------- 139 * Clean the .data & .bss sections to main memory. This ensures 140 * that any global data which was initialised by the primary CPU 141 * is visible to secondary CPUs before they enable their data 142 * caches and participate in coherency. 143 * ------------------------------------------------------------- 144 */ 145 ldr r0, =__DATA_START__ 146 ldr r1, =__DATA_END__ 147 sub r1, r1, r0 148 bl clean_dcache_range 149 150 ldr r0, =__BSS_START__ 151 ldr r1, =__BSS_END__ 152 sub r1, r1, r0 153 bl clean_dcache_range 154 155 bl smc_get_next_ctx 156 157 /* r0 points to `smc_ctx_t` */ 158 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 159 b sp_min_exit 160endfunc sp_min_entrypoint 161 162 163/* 164 * SMC handling function for SP_MIN. 165 */ 166func sp_min_handle_smc 167 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 168 str lr, [sp, #SMC_CTX_LR_MON] 169 170#if ENABLE_RUNTIME_INSTRUMENTATION 171 /* 172 * Read the timestamp value and store it on top of the C runtime stack. 173 * The value will be saved to the per-cpu data once the C stack is 174 * available, as a valid stack is needed to call _cpu_data() 175 */ 176 strd r0, r1, [sp, #SMC_CTX_GPREG_R0] 177 ldcopr16 r0, r1, CNTPCT_64 178 ldr lr, [sp, #SMC_CTX_SP_MON] 179 strd r0, r1, [lr, #-8]! 180 str lr, [sp, #SMC_CTX_SP_MON] 181 ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0] 182#endif 183 184 smccc_save_gp_mode_regs 185 186 clrex_on_monitor_entry 187 188 /* 189 * `sp` still points to `smc_ctx_t`. Save it to a register 190 * and restore the C runtime stack pointer to `sp`. 191 */ 192 mov r2, sp /* handle */ 193 ldr sp, [r2, #SMC_CTX_SP_MON] 194 195#if ENABLE_RUNTIME_INSTRUMENTATION 196 /* Save handle to a callee saved register */ 197 mov r6, r2 198 199 /* 200 * Restore the timestamp value and store it in per-cpu data. The value 201 * will be extracted from per-cpu data by the C level SMC handler and 202 * saved to the PMF timestamp region. 203 */ 204 ldrd r4, r5, [sp], #8 205 bl _cpu_data 206 strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET] 207 208 /* Restore handle */ 209 mov r2, r6 210#endif 211 212 ldr r0, [r2, #SMC_CTX_SCR] 213 and r3, r0, #SCR_NS_BIT /* flags */ 214 215 /* Switch to Secure Mode*/ 216 bic r0, #SCR_NS_BIT 217 stcopr r0, SCR 218 isb 219 220 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 221 /* Check whether an SMC64 is issued */ 222 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 223 beq 1f 224 /* SMC32 is not detected. Return error back to caller */ 225 mov r0, #SMC_UNK 226 str r0, [r2, #SMC_CTX_GPREG_R0] 227 mov r0, r2 228 b sp_min_exit 2291: 230 /* SMC32 is detected */ 231 mov r1, #0 /* cookie */ 232 bl handle_runtime_svc 233 234 /* `r0` points to `smc_ctx_t` */ 235 b sp_min_exit 236endfunc sp_min_handle_smc 237 238/* 239 * Secure Interrupts handling function for SP_MIN. 240 */ 241func sp_min_handle_fiq 242#if !SP_MIN_WITH_SECURE_FIQ 243 b plat_panic_handler 244#else 245 /* FIQ has a +4 offset for lr compared to preferred return address */ 246 sub lr, lr, #4 247 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 248 str lr, [sp, #SMC_CTX_LR_MON] 249 250 smccc_save_gp_mode_regs 251 252 clrex_on_monitor_entry 253 254 /* load run-time stack */ 255 mov r2, sp 256 ldr sp, [r2, #SMC_CTX_SP_MON] 257 258 /* Switch to Secure Mode */ 259 ldr r0, [r2, #SMC_CTX_SCR] 260 bic r0, #SCR_NS_BIT 261 stcopr r0, SCR 262 isb 263 264 push {r2, r3} 265 bl sp_min_fiq 266 pop {r0, r3} 267 268 b sp_min_exit 269#endif 270endfunc sp_min_handle_fiq 271 272/* 273 * The Warm boot entrypoint for SP_MIN. 274 */ 275func sp_min_warm_entrypoint 276#if ENABLE_RUNTIME_INSTRUMENTATION 277 /* 278 * This timestamp update happens with cache off. The next 279 * timestamp collection will need to do cache maintenance prior 280 * to timestamp update. 281 */ 282 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 283 ldcopr16 r2, r3, CNTPCT_64 284 strd r2, r3, [r0] 285#endif 286 /* 287 * On the warm boot path, most of the EL3 initialisations performed by 288 * 'el3_entrypoint_common' must be skipped: 289 * 290 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 291 * programming the reset address do we need to initialied the SCTLR. 292 * In other cases, we assume this has been taken care by the 293 * entrypoint code. 294 * 295 * - No need to determine the type of boot, we know it is a warm boot. 296 * 297 * - Do not try to distinguish between primary and secondary CPUs, this 298 * notion only exists for a cold boot. 299 * 300 * - No need to initialise the memory or the C runtime environment, 301 * it has been done once and for all on the cold boot path. 302 */ 303 el3_entrypoint_common \ 304 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 305 _warm_boot_mailbox=0 \ 306 _secondary_cold_boot=0 \ 307 _init_memory=0 \ 308 _init_c_runtime=0 \ 309 _exception_vectors=sp_min_vector_table 310 311 /* 312 * We're about to enable MMU and participate in PSCI state coordination. 313 * 314 * The PSCI implementation invokes platform routines that enable CPUs to 315 * participate in coherency. On a system where CPUs are not 316 * cache-coherent without appropriate platform specific programming, 317 * having caches enabled until such time might lead to coherency issues 318 * (resulting from stale data getting speculatively fetched, among 319 * others). Therefore we keep data caches disabled even after enabling 320 * the MMU for such platforms. 321 * 322 * On systems with hardware-assisted coherency, or on single cluster 323 * platforms, such platform specific programming is not required to 324 * enter coherency (as CPUs already are); and there's no reason to have 325 * caches disabled either. 326 */ 327#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 328 mov r0, #0 329#else 330 mov r0, #DISABLE_DCACHE 331#endif 332 bl bl32_plat_enable_mmu 333 334#if SP_MIN_WITH_SECURE_FIQ 335 route_fiq_to_sp_min r0 336#endif 337 338 bl sp_min_warm_boot 339 bl smc_get_next_ctx 340 /* r0 points to `smc_ctx_t` */ 341 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 342 343#if ENABLE_RUNTIME_INSTRUMENTATION 344 /* Save smc_ctx_t */ 345 mov r5, r0 346 347 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 348 mov r4, r0 349 350 /* 351 * Invalidate before updating timestamp to ensure previous timestamp 352 * updates on the same cache line with caches disabled are properly 353 * seen by the same core. Without the cache invalidate, the core might 354 * write into a stale cache line. 355 */ 356 mov r1, #PMF_TS_SIZE 357 bl inv_dcache_range 358 359 ldcopr16 r0, r1, CNTPCT_64 360 strd r0, r1, [r4] 361 362 /* Restore smc_ctx_t */ 363 mov r0, r5 364#endif 365 366 b sp_min_exit 367endfunc sp_min_warm_entrypoint 368 369/* 370 * The function to restore the registers from SMC context and return 371 * to the mode restored to SPSR. 372 * 373 * Arguments : r0 must point to the SMC context to restore from. 374 */ 375func sp_min_exit 376 monitor_exit 377endfunc sp_min_exit 378