1/* 2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <common/bl_common.h> 11#include <el3_common_macros.S> 12#include <lib/pmf/aarch64/pmf_asm_macros.S> 13#include <lib/runtime_instr.h> 14#include <lib/xlat_tables/xlat_mmu_helpers.h> 15 16 .globl bl31_entrypoint 17 .globl bl31_warm_entrypoint 18 19 /* ----------------------------------------------------- 20 * bl31_entrypoint() is the cold boot entrypoint, 21 * executed only by the primary cpu. 22 * ----------------------------------------------------- 23 */ 24 25func bl31_entrypoint 26 /* --------------------------------------------------------------- 27 * Stash the previous bootloader arguments x0 - x3 for later use. 28 * --------------------------------------------------------------- 29 */ 30 mov x20, x0 31 mov x21, x1 32 mov x22, x2 33 mov x23, x3 34 35#if !RESET_TO_BL31 36 /* --------------------------------------------------------------------- 37 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 38 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 39 * and primary/secondary CPU logic should not be executed in this case. 40 * 41 * Also, assume that the previous bootloader has already initialised the 42 * SCTLR_EL3, including the endianness, and has initialised the memory. 43 * --------------------------------------------------------------------- 44 */ 45 el3_entrypoint_common \ 46 _init_sctlr=0 \ 47 _warm_boot_mailbox=0 \ 48 _secondary_cold_boot=0 \ 49 _init_memory=0 \ 50 _init_c_runtime=1 \ 51 _exception_vectors=runtime_exceptions \ 52 _pie_fixup_size=BL31_LIMIT - BL31_BASE 53#else 54 55 /* --------------------------------------------------------------------- 56 * For RESET_TO_BL31 systems which have a programmable reset address, 57 * bl31_entrypoint() is executed only on the cold boot path so we can 58 * skip the warm boot mailbox mechanism. 59 * --------------------------------------------------------------------- 60 */ 61 el3_entrypoint_common \ 62 _init_sctlr=1 \ 63 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 64 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 65 _init_memory=1 \ 66 _init_c_runtime=1 \ 67 _exception_vectors=runtime_exceptions \ 68 _pie_fixup_size=BL31_LIMIT - BL31_BASE 69 70 /* --------------------------------------------------------------------- 71 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 72 * there's no argument to relay from a previous bootloader. Zero the 73 * arguments passed to the platform layer to reflect that. 74 * --------------------------------------------------------------------- 75 */ 76 mov x20, 0 77 mov x21, 0 78 mov x22, 0 79 mov x23, 0 80#endif /* RESET_TO_BL31 */ 81 82 /* -------------------------------------------------------------------- 83 * Perform BL31 setup 84 * -------------------------------------------------------------------- 85 */ 86 mov x0, x20 87 mov x1, x21 88 mov x2, x22 89 mov x3, x23 90 bl bl31_setup 91 92#if ENABLE_PAUTH 93 /* -------------------------------------------------------------------- 94 * Program APIAKey_EL1 and enable pointer authentication 95 * -------------------------------------------------------------------- 96 */ 97 bl pauth_init_enable_el3 98#endif /* ENABLE_PAUTH */ 99 100 /* -------------------------------------------------------------------- 101 * Jump to main function 102 * -------------------------------------------------------------------- 103 */ 104 bl bl31_main 105 106 /* -------------------------------------------------------------------- 107 * Clean the .data & .bss sections to main memory. This ensures 108 * that any global data which was initialised by the primary CPU 109 * is visible to secondary CPUs before they enable their data 110 * caches and participate in coherency. 111 * -------------------------------------------------------------------- 112 */ 113 adr x0, __DATA_START__ 114 adr x1, __DATA_END__ 115 sub x1, x1, x0 116 bl clean_dcache_range 117 118 adr x0, __BSS_START__ 119 adr x1, __BSS_END__ 120 sub x1, x1, x0 121 bl clean_dcache_range 122 123 b el3_exit 124endfunc bl31_entrypoint 125 126 /* -------------------------------------------------------------------- 127 * This CPU has been physically powered up. It is either resuming from 128 * suspend or has simply been turned on. In both cases, call the BL31 129 * warmboot entrypoint 130 * -------------------------------------------------------------------- 131 */ 132func bl31_warm_entrypoint 133#if ENABLE_RUNTIME_INSTRUMENTATION 134 135 /* 136 * This timestamp update happens with cache off. The next 137 * timestamp collection will need to do cache maintenance prior 138 * to timestamp update. 139 */ 140 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 141 mrs x1, cntpct_el0 142 str x1, [x0] 143#endif 144 145 /* 146 * On the warm boot path, most of the EL3 initialisations performed by 147 * 'el3_entrypoint_common' must be skipped: 148 * 149 * - Only when the platform bypasses the BL1/BL31 entrypoint by 150 * programming the reset address do we need to initialise SCTLR_EL3. 151 * In other cases, we assume this has been taken care by the 152 * entrypoint code. 153 * 154 * - No need to determine the type of boot, we know it is a warm boot. 155 * 156 * - Do not try to distinguish between primary and secondary CPUs, this 157 * notion only exists for a cold boot. 158 * 159 * - No need to initialise the memory or the C runtime environment, 160 * it has been done once and for all on the cold boot path. 161 */ 162 el3_entrypoint_common \ 163 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 164 _warm_boot_mailbox=0 \ 165 _secondary_cold_boot=0 \ 166 _init_memory=0 \ 167 _init_c_runtime=0 \ 168 _exception_vectors=runtime_exceptions \ 169 _pie_fixup_size=0 170 171 /* 172 * We're about to enable MMU and participate in PSCI state coordination. 173 * 174 * The PSCI implementation invokes platform routines that enable CPUs to 175 * participate in coherency. On a system where CPUs are not 176 * cache-coherent without appropriate platform specific programming, 177 * having caches enabled until such time might lead to coherency issues 178 * (resulting from stale data getting speculatively fetched, among 179 * others). Therefore we keep data caches disabled even after enabling 180 * the MMU for such platforms. 181 * 182 * On systems with hardware-assisted coherency, or on single cluster 183 * platforms, such platform specific programming is not required to 184 * enter coherency (as CPUs already are); and there's no reason to have 185 * caches disabled either. 186 */ 187#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 188 mov x0, xzr 189#else 190 mov x0, #DISABLE_DCACHE 191#endif 192 bl bl31_plat_enable_mmu 193 194#if ENABLE_PAUTH 195 /* -------------------------------------------------------------------- 196 * Program APIAKey_EL1 and enable pointer authentication 197 * -------------------------------------------------------------------- 198 */ 199 bl pauth_init_enable_el3 200#endif /* ENABLE_PAUTH */ 201 202 bl psci_warmboot_entrypoint 203 204#if ENABLE_RUNTIME_INSTRUMENTATION 205 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 206 mov x19, x0 207 208 /* 209 * Invalidate before updating timestamp to ensure previous timestamp 210 * updates on the same cache line with caches disabled are properly 211 * seen by the same core. Without the cache invalidate, the core might 212 * write into a stale cache line. 213 */ 214 mov x1, #PMF_TS_SIZE 215 mov x20, x30 216 bl inv_dcache_range 217 mov x30, x20 218 219 mrs x0, cntpct_el0 220 str x0, [x19] 221#endif 222 b el3_exit 223endfunc bl31_warm_entrypoint 224