1/* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <bl_common.h> 9#include <el3_common_macros.S> 10#include <platform_def.h> 11#include <pmf_asm_macros.S> 12#include <runtime_instr.h> 13#include <xlat_mmu_helpers.h> 14 15 .globl bl31_entrypoint 16 .globl bl31_warm_entrypoint 17 18 /* ----------------------------------------------------- 19 * bl31_entrypoint() is the cold boot entrypoint, 20 * executed only by the primary cpu. 21 * ----------------------------------------------------- 22 */ 23 24func bl31_entrypoint 25#if !RESET_TO_BL31 26 /* --------------------------------------------------------------- 27 * Stash the previous bootloader arguments x0 - x3 for later use. 28 * --------------------------------------------------------------- 29 */ 30 mov x20, x0 31 mov x21, x1 32 mov x22, x2 33 mov x23, x3 34 35 /* --------------------------------------------------------------------- 36 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 37 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 38 * and primary/secondary CPU logic should not be executed in this case. 39 * 40 * Also, assume that the previous bootloader has already initialised the 41 * SCTLR_EL3, including the endianness, and has initialised the memory. 42 * --------------------------------------------------------------------- 43 */ 44 el3_entrypoint_common \ 45 _init_sctlr=0 \ 46 _warm_boot_mailbox=0 \ 47 _secondary_cold_boot=0 \ 48 _init_memory=0 \ 49 _init_c_runtime=1 \ 50 _exception_vectors=runtime_exceptions 51#else 52 /* --------------------------------------------------------------------- 53 * For RESET_TO_BL31 systems which have a programmable reset address, 54 * bl31_entrypoint() is executed only on the cold boot path so we can 55 * skip the warm boot mailbox mechanism. 56 * --------------------------------------------------------------------- 57 */ 58 el3_entrypoint_common \ 59 _init_sctlr=1 \ 60 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 61 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 62 _init_memory=1 \ 63 _init_c_runtime=1 \ 64 _exception_vectors=runtime_exceptions 65 66 /* --------------------------------------------------------------------- 67 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 68 * there's no argument to relay from a previous bootloader. Zero the 69 * arguments passed to the platform layer to reflect that. 70 * --------------------------------------------------------------------- 71 */ 72 mov x20, 0 73 mov x21, 0 74 mov x22, 0 75 mov x23, 0 76#endif /* RESET_TO_BL31 */ 77 78 /* -------------------------------------------------------------------- 79 * If PIE is enabled, fixup the Global descriptor Table and dynamic 80 * relocations 81 * -------------------------------------------------------------------- 82 */ 83#if ENABLE_PIE 84 mov_imm x0, BL31_BASE 85 mov_imm x1, BL31_LIMIT 86 bl fixup_gdt_reloc 87#endif /* ENABLE_PIE */ 88 89 /* --------------------------------------------- 90 * Perform platform specific early arch. setup 91 * --------------------------------------------- 92 */ 93 mov x0, x20 94 mov x1, x21 95 mov x2, x22 96 mov x3, x23 97 bl bl31_early_platform_setup2 98 bl bl31_plat_arch_setup 99 100 /* --------------------------------------------- 101 * Jump to main function. 102 * --------------------------------------------- 103 */ 104 bl bl31_main 105 106 /* ------------------------------------------------------------- 107 * Clean the .data & .bss sections to main memory. This ensures 108 * that any global data which was initialised by the primary CPU 109 * is visible to secondary CPUs before they enable their data 110 * caches and participate in coherency. 111 * ------------------------------------------------------------- 112 */ 113 adr x0, __DATA_START__ 114 adr x1, __DATA_END__ 115 sub x1, x1, x0 116 bl clean_dcache_range 117 118 adr x0, __BSS_START__ 119 adr x1, __BSS_END__ 120 sub x1, x1, x0 121 bl clean_dcache_range 122 123 b el3_exit 124endfunc bl31_entrypoint 125 126 /* -------------------------------------------------------------------- 127 * This CPU has been physically powered up. It is either resuming from 128 * suspend or has simply been turned on. In both cases, call the BL31 129 * warmboot entrypoint 130 * -------------------------------------------------------------------- 131 */ 132func bl31_warm_entrypoint 133#if ENABLE_RUNTIME_INSTRUMENTATION 134 135 /* 136 * This timestamp update happens with cache off. The next 137 * timestamp collection will need to do cache maintenance prior 138 * to timestamp update. 139 */ 140 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 141 mrs x1, cntpct_el0 142 str x1, [x0] 143#endif 144 145 /* 146 * On the warm boot path, most of the EL3 initialisations performed by 147 * 'el3_entrypoint_common' must be skipped: 148 * 149 * - Only when the platform bypasses the BL1/BL31 entrypoint by 150 * programming the reset address do we need to initialise SCTLR_EL3. 151 * In other cases, we assume this has been taken care by the 152 * entrypoint code. 153 * 154 * - No need to determine the type of boot, we know it is a warm boot. 155 * 156 * - Do not try to distinguish between primary and secondary CPUs, this 157 * notion only exists for a cold boot. 158 * 159 * - No need to initialise the memory or the C runtime environment, 160 * it has been done once and for all on the cold boot path. 161 */ 162 el3_entrypoint_common \ 163 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 164 _warm_boot_mailbox=0 \ 165 _secondary_cold_boot=0 \ 166 _init_memory=0 \ 167 _init_c_runtime=0 \ 168 _exception_vectors=runtime_exceptions 169 170 /* 171 * We're about to enable MMU and participate in PSCI state coordination. 172 * 173 * The PSCI implementation invokes platform routines that enable CPUs to 174 * participate in coherency. On a system where CPUs are not 175 * cache-coherent without appropriate platform specific programming, 176 * having caches enabled until such time might lead to coherency issues 177 * (resulting from stale data getting speculatively fetched, among 178 * others). Therefore we keep data caches disabled even after enabling 179 * the MMU for such platforms. 180 * 181 * On systems with hardware-assisted coherency, or on single cluster 182 * platforms, such platform specific programming is not required to 183 * enter coherency (as CPUs already are); and there's no reason to have 184 * caches disabled either. 185 */ 186#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 187 mov x0, xzr 188#else 189 mov x0, #DISABLE_DCACHE 190#endif 191 bl bl31_plat_enable_mmu 192 193 bl psci_warmboot_entrypoint 194 195#if ENABLE_RUNTIME_INSTRUMENTATION 196 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 197 mov x19, x0 198 199 /* 200 * Invalidate before updating timestamp to ensure previous timestamp 201 * updates on the same cache line with caches disabled are properly 202 * seen by the same core. Without the cache invalidate, the core might 203 * write into a stale cache line. 204 */ 205 mov x1, #PMF_TS_SIZE 206 mov x20, x30 207 bl inv_dcache_range 208 mov x30, x20 209 210 mrs x0, cntpct_el0 211 str x0, [x19] 212#endif 213 b el3_exit 214endfunc bl31_warm_entrypoint 215