1/* 2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <common/bl_common.h> 11#include <el3_common_macros.S> 12#include <lib/pmf/pmf_asm_macros.S> 13#include <lib/runtime_instr.h> 14#include <lib/xlat_tables/xlat_mmu_helpers.h> 15 16 .globl bl31_entrypoint 17 .globl bl31_warm_entrypoint 18 19 /* ----------------------------------------------------- 20 * bl31_entrypoint() is the cold boot entrypoint, 21 * executed only by the primary cpu. 22 * ----------------------------------------------------- 23 */ 24 25func bl31_entrypoint 26 /* --------------------------------------------------------------- 27 * Stash the previous bootloader arguments x0 - x3 for later use. 28 * --------------------------------------------------------------- 29 */ 30 mov x20, x0 31 mov x21, x1 32 mov x22, x2 33 mov x23, x3 34 35 /* -------------------------------------------------------------------- 36 * If PIE is enabled, fixup the Global descriptor Table and dynamic 37 * relocations 38 * -------------------------------------------------------------------- 39 */ 40#if ENABLE_PIE 41 mov_imm x0, BL31_BASE 42 mov_imm x1, BL31_LIMIT 43 bl fixup_gdt_reloc 44#endif /* ENABLE_PIE */ 45 46#if !RESET_TO_BL31 47 /* --------------------------------------------------------------------- 48 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 49 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 50 * and primary/secondary CPU logic should not be executed in this case. 51 * 52 * Also, assume that the previous bootloader has already initialised the 53 * SCTLR_EL3, including the endianness, and has initialised the memory. 54 * --------------------------------------------------------------------- 55 */ 56 el3_entrypoint_common \ 57 _init_sctlr=0 \ 58 _warm_boot_mailbox=0 \ 59 _secondary_cold_boot=0 \ 60 _init_memory=0 \ 61 _init_c_runtime=1 \ 62 _exception_vectors=runtime_exceptions 63#else 64 65 /* --------------------------------------------------------------------- 66 * For RESET_TO_BL31 systems which have a programmable reset address, 67 * bl31_entrypoint() is executed only on the cold boot path so we can 68 * skip the warm boot mailbox mechanism. 69 * --------------------------------------------------------------------- 70 */ 71 el3_entrypoint_common \ 72 _init_sctlr=1 \ 73 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 74 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 75 _init_memory=1 \ 76 _init_c_runtime=1 \ 77 _exception_vectors=runtime_exceptions 78 79 /* --------------------------------------------------------------------- 80 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 81 * there's no argument to relay from a previous bootloader. Zero the 82 * arguments passed to the platform layer to reflect that. 83 * --------------------------------------------------------------------- 84 */ 85 mov x20, 0 86 mov x21, 0 87 mov x22, 0 88 mov x23, 0 89#endif /* RESET_TO_BL31 */ 90 91 /* -------------------------------------------------------------------- 92 * Perform BL31 setup 93 * -------------------------------------------------------------------- 94 */ 95 mov x0, x20 96 mov x1, x21 97 mov x2, x22 98 mov x3, x23 99 bl bl31_setup 100 101 /* -------------------------------------------------------------------- 102 * Enable pointer authentication 103 * -------------------------------------------------------------------- 104 */ 105#if ENABLE_PAUTH 106 mrs x0, sctlr_el3 107 orr x0, x0, #SCTLR_EnIA_BIT 108#if ENABLE_BTI 109 /* -------------------------------------------------------------------- 110 * Enable PAC branch type compatibility 111 * -------------------------------------------------------------------- 112 */ 113 bic x0, x0, #SCTLR_BT_BIT 114#endif /* ENABLE_BTI */ 115 msr sctlr_el3, x0 116 isb 117#endif /* ENABLE_PAUTH */ 118 119 /* -------------------------------------------------------------------- 120 * Jump to main function. 121 * -------------------------------------------------------------------- 122 */ 123 bl bl31_main 124 125 /* -------------------------------------------------------------------- 126 * Clean the .data & .bss sections to main memory. This ensures 127 * that any global data which was initialised by the primary CPU 128 * is visible to secondary CPUs before they enable their data 129 * caches and participate in coherency. 130 * -------------------------------------------------------------------- 131 */ 132 adr x0, __DATA_START__ 133 adr x1, __DATA_END__ 134 sub x1, x1, x0 135 bl clean_dcache_range 136 137 adr x0, __BSS_START__ 138 adr x1, __BSS_END__ 139 sub x1, x1, x0 140 bl clean_dcache_range 141 142 b el3_exit 143endfunc bl31_entrypoint 144 145 /* -------------------------------------------------------------------- 146 * This CPU has been physically powered up. It is either resuming from 147 * suspend or has simply been turned on. In both cases, call the BL31 148 * warmboot entrypoint 149 * -------------------------------------------------------------------- 150 */ 151func bl31_warm_entrypoint 152#if ENABLE_RUNTIME_INSTRUMENTATION 153 154 /* 155 * This timestamp update happens with cache off. The next 156 * timestamp collection will need to do cache maintenance prior 157 * to timestamp update. 158 */ 159 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 160 mrs x1, cntpct_el0 161 str x1, [x0] 162#endif 163 164 /* 165 * On the warm boot path, most of the EL3 initialisations performed by 166 * 'el3_entrypoint_common' must be skipped: 167 * 168 * - Only when the platform bypasses the BL1/BL31 entrypoint by 169 * programming the reset address do we need to initialise SCTLR_EL3. 170 * In other cases, we assume this has been taken care by the 171 * entrypoint code. 172 * 173 * - No need to determine the type of boot, we know it is a warm boot. 174 * 175 * - Do not try to distinguish between primary and secondary CPUs, this 176 * notion only exists for a cold boot. 177 * 178 * - No need to initialise the memory or the C runtime environment, 179 * it has been done once and for all on the cold boot path. 180 */ 181 el3_entrypoint_common \ 182 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 183 _warm_boot_mailbox=0 \ 184 _secondary_cold_boot=0 \ 185 _init_memory=0 \ 186 _init_c_runtime=0 \ 187 _exception_vectors=runtime_exceptions 188 189 /* 190 * We're about to enable MMU and participate in PSCI state coordination. 191 * 192 * The PSCI implementation invokes platform routines that enable CPUs to 193 * participate in coherency. On a system where CPUs are not 194 * cache-coherent without appropriate platform specific programming, 195 * having caches enabled until such time might lead to coherency issues 196 * (resulting from stale data getting speculatively fetched, among 197 * others). Therefore we keep data caches disabled even after enabling 198 * the MMU for such platforms. 199 * 200 * On systems with hardware-assisted coherency, or on single cluster 201 * platforms, such platform specific programming is not required to 202 * enter coherency (as CPUs already are); and there's no reason to have 203 * caches disabled either. 204 */ 205#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 206 mov x0, xzr 207#else 208 mov x0, #DISABLE_DCACHE 209#endif 210 bl bl31_plat_enable_mmu 211 212 /* -------------------------------------------------------------------- 213 * Enable pointer authentication 214 * -------------------------------------------------------------------- 215 */ 216#if ENABLE_PAUTH 217 bl pauth_load_bl_apiakey 218 219 mrs x0, sctlr_el3 220 orr x0, x0, #SCTLR_EnIA_BIT 221#if ENABLE_BTI 222 /* -------------------------------------------------------------------- 223 * Enable PAC branch type compatibility 224 * -------------------------------------------------------------------- 225 */ 226 bic x0, x0, #SCTLR_BT_BIT 227#endif /* ENABLE_BTI */ 228 msr sctlr_el3, x0 229 isb 230#endif /* ENABLE_PAUTH */ 231 232 bl psci_warmboot_entrypoint 233 234#if ENABLE_RUNTIME_INSTRUMENTATION 235 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 236 mov x19, x0 237 238 /* 239 * Invalidate before updating timestamp to ensure previous timestamp 240 * updates on the same cache line with caches disabled are properly 241 * seen by the same core. Without the cache invalidate, the core might 242 * write into a stale cache line. 243 */ 244 mov x1, #PMF_TS_SIZE 245 mov x20, x30 246 bl inv_dcache_range 247 mov x30, x20 248 249 mrs x0, cntpct_el0 250 str x0, [x19] 251#endif 252 b el3_exit 253endfunc bl31_warm_entrypoint 254