1/* 2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <common/bl_common.h> 11#include <el3_common_macros.S> 12#include <lib/pmf/pmf_asm_macros.S> 13#include <lib/runtime_instr.h> 14#include <lib/xlat_tables/xlat_mmu_helpers.h> 15 16 .globl bl31_entrypoint 17 .globl bl31_warm_entrypoint 18 19 /* ----------------------------------------------------- 20 * bl31_entrypoint() is the cold boot entrypoint, 21 * executed only by the primary cpu. 22 * ----------------------------------------------------- 23 */ 24 25func bl31_entrypoint 26#if !RESET_TO_BL31 27 /* --------------------------------------------------------------- 28 * Stash the previous bootloader arguments x0 - x3 for later use. 29 * --------------------------------------------------------------- 30 */ 31 mov x20, x0 32 mov x21, x1 33 mov x22, x2 34 mov x23, x3 35 36 /* --------------------------------------------------------------------- 37 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 38 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 39 * and primary/secondary CPU logic should not be executed in this case. 40 * 41 * Also, assume that the previous bootloader has already initialised the 42 * SCTLR_EL3, including the endianness, and has initialised the memory. 43 * --------------------------------------------------------------------- 44 */ 45 el3_entrypoint_common \ 46 _init_sctlr=0 \ 47 _warm_boot_mailbox=0 \ 48 _secondary_cold_boot=0 \ 49 _init_memory=0 \ 50 _init_c_runtime=1 \ 51 _exception_vectors=runtime_exceptions 52#else 53 /* --------------------------------------------------------------------- 54 * For RESET_TO_BL31 systems which have a programmable reset address, 55 * bl31_entrypoint() is executed only on the cold boot path so we can 56 * skip the warm boot mailbox mechanism. 57 * --------------------------------------------------------------------- 58 */ 59 el3_entrypoint_common \ 60 _init_sctlr=1 \ 61 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 62 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 63 _init_memory=1 \ 64 _init_c_runtime=1 \ 65 _exception_vectors=runtime_exceptions 66 67 /* --------------------------------------------------------------------- 68 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 69 * there's no argument to relay from a previous bootloader. Zero the 70 * arguments passed to the platform layer to reflect that. 71 * --------------------------------------------------------------------- 72 */ 73 mov x20, 0 74 mov x21, 0 75 mov x22, 0 76 mov x23, 0 77#endif /* RESET_TO_BL31 */ 78 79 /* -------------------------------------------------------------------- 80 * If PIE is enabled, fixup the Global descriptor Table and dynamic 81 * relocations 82 * -------------------------------------------------------------------- 83 */ 84#if ENABLE_PIE 85 mov_imm x0, BL31_BASE 86 mov_imm x1, BL31_LIMIT 87 bl fixup_gdt_reloc 88#endif /* ENABLE_PIE */ 89 90 /* -------------------------------------------------------------------- 91 * Perform BL31 setup 92 * -------------------------------------------------------------------- 93 */ 94 mov x0, x20 95 mov x1, x21 96 mov x2, x22 97 mov x3, x23 98 bl bl31_setup 99 100 /* -------------------------------------------------------------------- 101 * Enable pointer authentication 102 * -------------------------------------------------------------------- 103 */ 104#if ENABLE_PAUTH 105 mrs x0, sctlr_el3 106 orr x0, x0, #SCTLR_EnIA_BIT 107 msr sctlr_el3, x0 108 isb 109#endif /* ENABLE_PAUTH */ 110 111 /* -------------------------------------------------------------------- 112 * Jump to main function. 113 * -------------------------------------------------------------------- 114 */ 115 bl bl31_main 116 117 /* -------------------------------------------------------------------- 118 * Clean the .data & .bss sections to main memory. This ensures 119 * that any global data which was initialised by the primary CPU 120 * is visible to secondary CPUs before they enable their data 121 * caches and participate in coherency. 122 * -------------------------------------------------------------------- 123 */ 124 adr x0, __DATA_START__ 125 adr x1, __DATA_END__ 126 sub x1, x1, x0 127 bl clean_dcache_range 128 129 adr x0, __BSS_START__ 130 adr x1, __BSS_END__ 131 sub x1, x1, x0 132 bl clean_dcache_range 133 134 b el3_exit 135endfunc bl31_entrypoint 136 137 /* -------------------------------------------------------------------- 138 * This CPU has been physically powered up. It is either resuming from 139 * suspend or has simply been turned on. In both cases, call the BL31 140 * warmboot entrypoint 141 * -------------------------------------------------------------------- 142 */ 143func bl31_warm_entrypoint 144#if ENABLE_RUNTIME_INSTRUMENTATION 145 146 /* 147 * This timestamp update happens with cache off. The next 148 * timestamp collection will need to do cache maintenance prior 149 * to timestamp update. 150 */ 151 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 152 mrs x1, cntpct_el0 153 str x1, [x0] 154#endif 155 156 /* 157 * On the warm boot path, most of the EL3 initialisations performed by 158 * 'el3_entrypoint_common' must be skipped: 159 * 160 * - Only when the platform bypasses the BL1/BL31 entrypoint by 161 * programming the reset address do we need to initialise SCTLR_EL3. 162 * In other cases, we assume this has been taken care by the 163 * entrypoint code. 164 * 165 * - No need to determine the type of boot, we know it is a warm boot. 166 * 167 * - Do not try to distinguish between primary and secondary CPUs, this 168 * notion only exists for a cold boot. 169 * 170 * - No need to initialise the memory or the C runtime environment, 171 * it has been done once and for all on the cold boot path. 172 */ 173 el3_entrypoint_common \ 174 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 175 _warm_boot_mailbox=0 \ 176 _secondary_cold_boot=0 \ 177 _init_memory=0 \ 178 _init_c_runtime=0 \ 179 _exception_vectors=runtime_exceptions 180 181 /* 182 * We're about to enable MMU and participate in PSCI state coordination. 183 * 184 * The PSCI implementation invokes platform routines that enable CPUs to 185 * participate in coherency. On a system where CPUs are not 186 * cache-coherent without appropriate platform specific programming, 187 * having caches enabled until such time might lead to coherency issues 188 * (resulting from stale data getting speculatively fetched, among 189 * others). Therefore we keep data caches disabled even after enabling 190 * the MMU for such platforms. 191 * 192 * On systems with hardware-assisted coherency, or on single cluster 193 * platforms, such platform specific programming is not required to 194 * enter coherency (as CPUs already are); and there's no reason to have 195 * caches disabled either. 196 */ 197#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 198 mov x0, xzr 199#else 200 mov x0, #DISABLE_DCACHE 201#endif 202 bl bl31_plat_enable_mmu 203 204 /* -------------------------------------------------------------------- 205 * Enable pointer authentication 206 * -------------------------------------------------------------------- 207 */ 208#if ENABLE_PAUTH 209 bl pauth_load_bl_apiakey 210 211 mrs x0, sctlr_el3 212 orr x0, x0, #SCTLR_EnIA_BIT 213 msr sctlr_el3, x0 214 isb 215#endif /* ENABLE_PAUTH */ 216 217 bl psci_warmboot_entrypoint 218 219#if ENABLE_RUNTIME_INSTRUMENTATION 220 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 221 mov x19, x0 222 223 /* 224 * Invalidate before updating timestamp to ensure previous timestamp 225 * updates on the same cache line with caches disabled are properly 226 * seen by the same core. Without the cache invalidate, the core might 227 * write into a stale cache line. 228 */ 229 mov x1, #PMF_TS_SIZE 230 mov x20, x30 231 bl inv_dcache_range 232 mov x30, x20 233 234 mrs x0, cntpct_el0 235 str x0, [x19] 236#endif 237 b el3_exit 238endfunc bl31_warm_entrypoint 239