1/* 2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <bl_common.h> 10#include <context.h> 11#include <el3_common_macros.S> 12#include <runtime_svc.h> 13#include <smcc_helpers.h> 14#include <smcc_macros.S> 15#include <xlat_tables_defs.h> 16 17 .globl sp_min_vector_table 18 .globl sp_min_entrypoint 19 .globl sp_min_warm_entrypoint 20 21 .macro route_fiq_to_sp_min reg 22 /* ----------------------------------------------------- 23 * FIQs are secure interrupts trapped by Monitor and non 24 * secure is not allowed to mask the FIQs. 25 * ----------------------------------------------------- 26 */ 27 ldcopr \reg, SCR 28 orr \reg, \reg, #SCR_FIQ_BIT 29 bic \reg, \reg, #SCR_FW_BIT 30 stcopr \reg, SCR 31 .endm 32 33vector_base sp_min_vector_table 34 b sp_min_entrypoint 35 b plat_panic_handler /* Undef */ 36 b handle_smc /* Syscall */ 37 b plat_panic_handler /* Prefetch abort */ 38 b plat_panic_handler /* Data abort */ 39 b plat_panic_handler /* Reserved */ 40 b plat_panic_handler /* IRQ */ 41 b handle_fiq /* FIQ */ 42 43 44/* 45 * The Cold boot/Reset entrypoint for SP_MIN 46 */ 47func sp_min_entrypoint 48#if !RESET_TO_SP_MIN 49 /* --------------------------------------------------------------- 50 * Preceding bootloader has populated r0 with a pointer to a 51 * 'bl_params_t' structure & r1 with a pointer to platform 52 * specific structure 53 * --------------------------------------------------------------- 54 */ 55 mov r11, r0 56 mov r12, r1 57 58 /* --------------------------------------------------------------------- 59 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 60 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 61 * and primary/secondary CPU logic should not be executed in this case. 62 * 63 * Also, assume that the previous bootloader has already initialised the 64 * SCTLR, including the CPU endianness, and has initialised the memory. 65 * --------------------------------------------------------------------- 66 */ 67 el3_entrypoint_common \ 68 _init_sctlr=0 \ 69 _warm_boot_mailbox=0 \ 70 _secondary_cold_boot=0 \ 71 _init_memory=0 \ 72 _init_c_runtime=1 \ 73 _exception_vectors=sp_min_vector_table 74 75 /* --------------------------------------------------------------------- 76 * Relay the previous bootloader's arguments to the platform layer 77 * --------------------------------------------------------------------- 78 */ 79 mov r0, r11 80 mov r1, r12 81#else 82 /* --------------------------------------------------------------------- 83 * For RESET_TO_SP_MIN systems which have a programmable reset address, 84 * sp_min_entrypoint() is executed only on the cold boot path so we can 85 * skip the warm boot mailbox mechanism. 86 * --------------------------------------------------------------------- 87 */ 88 el3_entrypoint_common \ 89 _init_sctlr=1 \ 90 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 91 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 92 _init_memory=1 \ 93 _init_c_runtime=1 \ 94 _exception_vectors=sp_min_vector_table 95 96 /* --------------------------------------------------------------------- 97 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 98 * to run so there's no argument to relay from a previous bootloader. 99 * Zero the arguments passed to the platform layer to reflect that. 100 * --------------------------------------------------------------------- 101 */ 102 mov r0, #0 103 mov r1, #0 104#endif /* RESET_TO_SP_MIN */ 105 106#if SP_MIN_WITH_SECURE_FIQ 107 route_fiq_to_sp_min r4 108#endif 109 110 bl sp_min_early_platform_setup 111 bl sp_min_plat_arch_setup 112 113 /* Jump to the main function */ 114 bl sp_min_main 115 116 /* ------------------------------------------------------------- 117 * Clean the .data & .bss sections to main memory. This ensures 118 * that any global data which was initialised by the primary CPU 119 * is visible to secondary CPUs before they enable their data 120 * caches and participate in coherency. 121 * ------------------------------------------------------------- 122 */ 123 ldr r0, =__DATA_START__ 124 ldr r1, =__DATA_END__ 125 sub r1, r1, r0 126 bl clean_dcache_range 127 128 ldr r0, =__BSS_START__ 129 ldr r1, =__BSS_END__ 130 sub r1, r1, r0 131 bl clean_dcache_range 132 133 bl smc_get_next_ctx 134 135 /* r0 points to `smc_ctx_t` */ 136 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 137 b sp_min_exit 138endfunc sp_min_entrypoint 139 140 141/* 142 * SMC handling function for SP_MIN. 143 */ 144func handle_smc 145 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 146 str lr, [sp, #SMC_CTX_LR_MON] 147 148 smcc_save_gp_mode_regs 149 150 /* 151 * `sp` still points to `smc_ctx_t`. Save it to a register 152 * and restore the C runtime stack pointer to `sp`. 153 */ 154 mov r2, sp /* handle */ 155 ldr sp, [r2, #SMC_CTX_SP_MON] 156 157 ldr r0, [r2, #SMC_CTX_SCR] 158 and r3, r0, #SCR_NS_BIT /* flags */ 159 160 /* Switch to Secure Mode*/ 161 bic r0, #SCR_NS_BIT 162 stcopr r0, SCR 163 isb 164 165 /* 166 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. 167 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset 168 * and so set to 1 as ARM has deprecated use of PMCR.LC=0. 169 */ 170 ldcopr r0, PMCR 171 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) 172 stcopr r0, PMCR 173 174 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 175 /* Check whether an SMC64 is issued */ 176 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 177 beq 1f 178 /* SMC32 is not detected. Return error back to caller */ 179 mov r0, #SMC_UNK 180 str r0, [r2, #SMC_CTX_GPREG_R0] 181 mov r0, r2 182 b sp_min_exit 1831: 184 /* SMC32 is detected */ 185 mov r1, #0 /* cookie */ 186 bl handle_runtime_svc 187 188 /* `r0` points to `smc_ctx_t` */ 189 b sp_min_exit 190endfunc handle_smc 191 192/* 193 * Secure Interrupts handling function for SP_MIN. 194 */ 195func handle_fiq 196#if !SP_MIN_WITH_SECURE_FIQ 197 b plat_panic_handler 198#else 199 /* FIQ has a +4 offset for lr compared to preferred return address */ 200 sub lr, lr, #4 201 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 202 str lr, [sp, #SMC_CTX_LR_MON] 203 204 smcc_save_gp_mode_regs 205 206 /* 207 * AArch32 architectures need to clear the exclusive access when 208 * entering Monitor mode. 209 */ 210 clrex 211 212 /* load run-time stack */ 213 mov r2, sp 214 ldr sp, [r2, #SMC_CTX_SP_MON] 215 216 /* Switch to Secure Mode */ 217 ldr r0, [r2, #SMC_CTX_SCR] 218 bic r0, #SCR_NS_BIT 219 stcopr r0, SCR 220 isb 221 222 /* 223 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. 224 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset 225 * and so set to 1 as ARM has deprecated use of PMCR.LC=0. 226 */ 227 ldcopr r0, PMCR 228 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) 229 stcopr r0, PMCR 230 231 push {r2, r3} 232 bl sp_min_fiq 233 pop {r0, r3} 234 235 b sp_min_exit 236#endif 237endfunc handle_fiq 238 239/* 240 * The Warm boot entrypoint for SP_MIN. 241 */ 242func sp_min_warm_entrypoint 243 /* 244 * On the warm boot path, most of the EL3 initialisations performed by 245 * 'el3_entrypoint_common' must be skipped: 246 * 247 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 248 * programming the reset address do we need to initialied the SCTLR. 249 * In other cases, we assume this has been taken care by the 250 * entrypoint code. 251 * 252 * - No need to determine the type of boot, we know it is a warm boot. 253 * 254 * - Do not try to distinguish between primary and secondary CPUs, this 255 * notion only exists for a cold boot. 256 * 257 * - No need to initialise the memory or the C runtime environment, 258 * it has been done once and for all on the cold boot path. 259 */ 260 el3_entrypoint_common \ 261 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 262 _warm_boot_mailbox=0 \ 263 _secondary_cold_boot=0 \ 264 _init_memory=0 \ 265 _init_c_runtime=0 \ 266 _exception_vectors=sp_min_vector_table 267 268 /* 269 * We're about to enable MMU and participate in PSCI state coordination. 270 * 271 * The PSCI implementation invokes platform routines that enable CPUs to 272 * participate in coherency. On a system where CPUs are not 273 * cache-coherent without appropriate platform specific programming, 274 * having caches enabled until such time might lead to coherency issues 275 * (resulting from stale data getting speculatively fetched, among 276 * others). Therefore we keep data caches disabled even after enabling 277 * the MMU for such platforms. 278 * 279 * On systems with hardware-assisted coherency, or on single cluster 280 * platforms, such platform specific programming is not required to 281 * enter coherency (as CPUs already are); and there's no reason to have 282 * caches disabled either. 283 */ 284 mov r0, #DISABLE_DCACHE 285 bl bl32_plat_enable_mmu 286 287#if SP_MIN_WITH_SECURE_FIQ 288 route_fiq_to_sp_min r0 289#endif 290 291#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 292 ldcopr r0, SCTLR 293 orr r0, r0, #SCTLR_C_BIT 294 stcopr r0, SCTLR 295 isb 296#endif 297 298 bl sp_min_warm_boot 299 bl smc_get_next_ctx 300 /* r0 points to `smc_ctx_t` */ 301 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 302 b sp_min_exit 303endfunc sp_min_warm_entrypoint 304 305/* 306 * The function to restore the registers from SMC context and return 307 * to the mode restored to SPSR. 308 * 309 * Arguments : r0 must point to the SMC context to restore from. 310 */ 311func sp_min_exit 312 monitor_exit 313endfunc sp_min_exit 314