1/* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <bl_common.h> 9#include <el3_common_macros.S> 10#include <pmf_asm_macros.S> 11#include <runtime_instr.h> 12#include <xlat_tables_defs.h> 13 14 .globl bl31_entrypoint 15 .globl bl31_warm_entrypoint 16 17 /* ----------------------------------------------------- 18 * bl31_entrypoint() is the cold boot entrypoint, 19 * executed only by the primary cpu. 20 * ----------------------------------------------------- 21 */ 22 23func bl31_entrypoint 24#if !RESET_TO_BL31 25 /* --------------------------------------------------------------- 26 * Preceding bootloader has populated x0 with a pointer to a 27 * 'bl31_params' structure & x1 with a pointer to platform 28 * specific structure 29 * --------------------------------------------------------------- 30 */ 31 mov x20, x0 32 mov x21, x1 33 34 /* --------------------------------------------------------------------- 35 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 36 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 37 * and primary/secondary CPU logic should not be executed in this case. 38 * 39 * Also, assume that the previous bootloader has already initialised the 40 * SCTLR_EL3, including the endianness, and has initialised the memory. 41 * --------------------------------------------------------------------- 42 */ 43 el3_entrypoint_common \ 44 _init_sctlr=0 \ 45 _warm_boot_mailbox=0 \ 46 _secondary_cold_boot=0 \ 47 _init_memory=0 \ 48 _init_c_runtime=1 \ 49 _exception_vectors=runtime_exceptions 50 51 /* --------------------------------------------------------------------- 52 * Relay the previous bootloader's arguments to the platform layer 53 * --------------------------------------------------------------------- 54 */ 55 mov x0, x20 56 mov x1, x21 57#else 58 /* --------------------------------------------------------------------- 59 * For RESET_TO_BL31 systems which have a programmable reset address, 60 * bl31_entrypoint() is executed only on the cold boot path so we can 61 * skip the warm boot mailbox mechanism. 62 * --------------------------------------------------------------------- 63 */ 64 el3_entrypoint_common \ 65 _init_sctlr=1 \ 66 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 67 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 68 _init_memory=1 \ 69 _init_c_runtime=1 \ 70 _exception_vectors=runtime_exceptions 71 72 /* --------------------------------------------------------------------- 73 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 74 * there's no argument to relay from a previous bootloader. Zero the 75 * arguments passed to the platform layer to reflect that. 76 * --------------------------------------------------------------------- 77 */ 78 mov x0, 0 79 mov x1, 0 80#endif /* RESET_TO_BL31 */ 81 82 /* --------------------------------------------- 83 * Perform platform specific early arch. setup 84 * --------------------------------------------- 85 */ 86 bl bl31_early_platform_setup 87 bl bl31_plat_arch_setup 88 89 /* --------------------------------------------- 90 * Jump to main function. 91 * --------------------------------------------- 92 */ 93 bl bl31_main 94 95 /* ------------------------------------------------------------- 96 * Clean the .data & .bss sections to main memory. This ensures 97 * that any global data which was initialised by the primary CPU 98 * is visible to secondary CPUs before they enable their data 99 * caches and participate in coherency. 100 * ------------------------------------------------------------- 101 */ 102 adr x0, __DATA_START__ 103 adr x1, __DATA_END__ 104 sub x1, x1, x0 105 bl clean_dcache_range 106 107 adr x0, __BSS_START__ 108 adr x1, __BSS_END__ 109 sub x1, x1, x0 110 bl clean_dcache_range 111 112 b el3_exit 113endfunc bl31_entrypoint 114 115 /* -------------------------------------------------------------------- 116 * This CPU has been physically powered up. It is either resuming from 117 * suspend or has simply been turned on. In both cases, call the BL31 118 * warmboot entrypoint 119 * -------------------------------------------------------------------- 120 */ 121func bl31_warm_entrypoint 122#if ENABLE_RUNTIME_INSTRUMENTATION 123 124 /* 125 * This timestamp update happens with cache off. The next 126 * timestamp collection will need to do cache maintenance prior 127 * to timestamp update. 128 */ 129 pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR 130 mrs x1, cntpct_el0 131 str x1, [x0] 132#endif 133 134 /* 135 * On the warm boot path, most of the EL3 initialisations performed by 136 * 'el3_entrypoint_common' must be skipped: 137 * 138 * - Only when the platform bypasses the BL1/BL31 entrypoint by 139 * programming the reset address do we need to initialise SCTLR_EL3. 140 * In other cases, we assume this has been taken care by the 141 * entrypoint code. 142 * 143 * - No need to determine the type of boot, we know it is a warm boot. 144 * 145 * - Do not try to distinguish between primary and secondary CPUs, this 146 * notion only exists for a cold boot. 147 * 148 * - No need to initialise the memory or the C runtime environment, 149 * it has been done once and for all on the cold boot path. 150 */ 151 el3_entrypoint_common \ 152 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 153 _warm_boot_mailbox=0 \ 154 _secondary_cold_boot=0 \ 155 _init_memory=0 \ 156 _init_c_runtime=0 \ 157 _exception_vectors=runtime_exceptions 158 159 /* 160 * We're about to enable MMU and participate in PSCI state coordination. 161 * 162 * The PSCI implementation invokes platform routines that enable CPUs to 163 * participate in coherency. On a system where CPUs are not 164 * cache-coherent without appropriate platform specific programming, 165 * having caches enabled until such time might lead to coherency issues 166 * (resulting from stale data getting speculatively fetched, among 167 * others). Therefore we keep data caches disabled even after enabling 168 * the MMU for such platforms. 169 * 170 * On systems with hardware-assisted coherency, or on single cluster 171 * platforms, such platform specific programming is not required to 172 * enter coherency (as CPUs already are); and there's no reason to have 173 * caches disabled either. 174 */ 175 mov x0, #DISABLE_DCACHE 176 bl bl31_plat_enable_mmu 177 178#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 179 mrs x0, sctlr_el3 180 orr x0, x0, #SCTLR_C_BIT 181 msr sctlr_el3, x0 182 isb 183#endif 184 185 bl psci_warmboot_entrypoint 186 187#if ENABLE_RUNTIME_INSTRUMENTATION 188 pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI 189 mov x19, x0 190 191 /* 192 * Invalidate before updating timestamp to ensure previous timestamp 193 * updates on the same cache line with caches disabled are properly 194 * seen by the same core. Without the cache invalidate, the core might 195 * write into a stale cache line. 196 */ 197 mov x1, #PMF_TS_SIZE 198 mov x20, x30 199 bl inv_dcache_range 200 mov x30, x20 201 202 mrs x0, cntpct_el0 203 str x0, [x19] 204#endif 205 b el3_exit 206endfunc bl31_warm_entrypoint 207