1/* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <bl_common.h> 33#include <el3_common_macros.S> 34#include <pmf_asm_macros.S> 35#include <runtime_instr.h> 36#include <xlat_tables.h> 37 38 .globl bl31_entrypoint 39 .globl bl31_warm_entrypoint 40 41 /* ----------------------------------------------------- 42 * bl31_entrypoint() is the cold boot entrypoint, 43 * executed only by the primary cpu. 44 * ----------------------------------------------------- 45 */ 46 47func bl31_entrypoint 48#if !RESET_TO_BL31 49 /* --------------------------------------------------------------- 50 * Preceding bootloader has populated x0 with a pointer to a 51 * 'bl31_params' structure & x1 with a pointer to platform 52 * specific structure 53 * --------------------------------------------------------------- 54 */ 55 mov x20, x0 56 mov x21, x1 57 58 /* --------------------------------------------------------------------- 59 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 60 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 61 * and primary/secondary CPU logic should not be executed in this case. 62 * 63 * Also, assume that the previous bootloader has already set up the CPU 64 * endianness and has initialised the memory. 65 * --------------------------------------------------------------------- 66 */ 67 el3_entrypoint_common \ 68 _set_endian=0 \ 69 _warm_boot_mailbox=0 \ 70 _secondary_cold_boot=0 \ 71 _init_memory=0 \ 72 _init_c_runtime=1 \ 73 _exception_vectors=runtime_exceptions 74 75 /* --------------------------------------------------------------------- 76 * Relay the previous bootloader's arguments to the platform layer 77 * --------------------------------------------------------------------- 78 */ 79 mov x0, x20 80 mov x1, x21 81#else 82 /* --------------------------------------------------------------------- 83 * For RESET_TO_BL31 systems which have a programmable reset address, 84 * bl31_entrypoint() is executed only on the cold boot path so we can 85 * skip the warm boot mailbox mechanism. 86 * --------------------------------------------------------------------- 87 */ 88 el3_entrypoint_common \ 89 _set_endian=1 \ 90 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 91 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 92 _init_memory=1 \ 93 _init_c_runtime=1 \ 94 _exception_vectors=runtime_exceptions 95 96 /* --------------------------------------------------------------------- 97 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 98 * there's no argument to relay from a previous bootloader. Zero the 99 * arguments passed to the platform layer to reflect that. 100 * --------------------------------------------------------------------- 101 */ 102 mov x0, 0 103 mov x1, 0 104#endif /* RESET_TO_BL31 */ 105 106 /* --------------------------------------------- 107 * Perform platform specific early arch. setup 108 * --------------------------------------------- 109 */ 110 bl bl31_early_platform_setup 111 bl bl31_plat_arch_setup 112 113 /* --------------------------------------------- 114 * Jump to main function. 115 * --------------------------------------------- 116 */ 117 bl bl31_main 118 119 /* ------------------------------------------------------------- 120 * Clean the .data & .bss sections to main memory. This ensures 121 * that any global data which was initialised by the primary CPU 122 * is visible to secondary CPUs before they enable their data 123 * caches and participate in coherency. 124 * ------------------------------------------------------------- 125 */ 126 adr x0, __DATA_START__ 127 adr x1, __DATA_END__ 128 sub x1, x1, x0 129 bl clean_dcache_range 130 131 adr x0, __BSS_START__ 132 adr x1, __BSS_END__ 133 sub x1, x1, x0 134 bl clean_dcache_range 135 136 b el3_exit 137endfunc bl31_entrypoint 138 139 /* -------------------------------------------------------------------- 140 * This CPU has been physically powered up. It is either resuming from 141 * suspend or has simply been turned on. In both cases, call the BL31 142 * warmboot entrypoint 143 * -------------------------------------------------------------------- 144 */ 145func bl31_warm_entrypoint 146#if ENABLE_RUNTIME_INSTRUMENTATION 147 148 /* 149 * This timestamp update happens with cache off. The next 150 * timestamp collection will need to do cache maintenance prior 151 * to timestamp update. 152 */ 153 pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR 154 mrs x1, cntpct_el0 155 str x1, [x0] 156#endif 157 158 /* 159 * On the warm boot path, most of the EL3 initialisations performed by 160 * 'el3_entrypoint_common' must be skipped: 161 * 162 * - Only when the platform bypasses the BL1/BL31 entrypoint by 163 * programming the reset address do we need to set the CPU endianness. 164 * In other cases, we assume this has been taken care by the 165 * entrypoint code. 166 * 167 * - No need to determine the type of boot, we know it is a warm boot. 168 * 169 * - Do not try to distinguish between primary and secondary CPUs, this 170 * notion only exists for a cold boot. 171 * 172 * - No need to initialise the memory or the C runtime environment, 173 * it has been done once and for all on the cold boot path. 174 */ 175 el3_entrypoint_common \ 176 _set_endian=PROGRAMMABLE_RESET_ADDRESS \ 177 _warm_boot_mailbox=0 \ 178 _secondary_cold_boot=0 \ 179 _init_memory=0 \ 180 _init_c_runtime=0 \ 181 _exception_vectors=runtime_exceptions 182 183 /* 184 * We're about to enable MMU and participate in PSCI state coordination. 185 * 186 * The PSCI implementation invokes platform routines that enable CPUs to 187 * participate in coherency. On a system where CPUs are not 188 * cache-coherent out of reset, having caches enabled until such time 189 * might lead to coherency issues (resulting from stale data getting 190 * speculatively fetched, among others). Therefore we keep data caches 191 * disabled while enabling the MMU, thereby forcing data accesses to 192 * have non-cacheable, nGnRnE attributes (these will always be coherent 193 * with main memory). 194 * 195 * On systems with hardware-assisted coherency, where CPUs are expected 196 * to be cache-coherent out of reset without needing explicit software 197 * intervention, PSCI need not invoke platform routines to enter 198 * coherency (as CPUs already are); and there's no reason to have caches 199 * disabled either. 200 */ 201#if HW_ASSISTED_COHERENCY 202 mov x0, #0 203#else 204 mov x0, #DISABLE_DCACHE 205#endif 206 bl bl31_plat_enable_mmu 207 208 bl psci_warmboot_entrypoint 209 210#if ENABLE_RUNTIME_INSTRUMENTATION 211 pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI 212 mov x19, x0 213 214 /* 215 * Invalidate before updating timestamp to ensure previous timestamp 216 * updates on the same cache line with caches disabled are properly 217 * seen by the same core. Without the cache invalidate, the core might 218 * write into a stale cache line. 219 */ 220 mov x1, #PMF_TS_SIZE 221 mov x20, x30 222 bl inv_dcache_range 223 mov x30, x20 224 225 mrs x0, cntpct_el0 226 str x0, [x19] 227#endif 228 b el3_exit 229endfunc bl31_warm_entrypoint 230