14f6ad66aSAchin Gupta/* 2d50ece03SAntonio Nino Diaz * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 44f6ad66aSAchin Gupta * Redistribution and use in source and binary forms, with or without 54f6ad66aSAchin Gupta * modification, are permitted provided that the following conditions are met: 64f6ad66aSAchin Gupta * 74f6ad66aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 84f6ad66aSAchin Gupta * list of conditions and the following disclaimer. 94f6ad66aSAchin Gupta * 104f6ad66aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 114f6ad66aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 124f6ad66aSAchin Gupta * and/or other materials provided with the distribution. 134f6ad66aSAchin Gupta * 144f6ad66aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 154f6ad66aSAchin Gupta * to endorse or promote products derived from this software without specific 164f6ad66aSAchin Gupta * prior written permission. 174f6ad66aSAchin Gupta * 184f6ad66aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 194f6ad66aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 204f6ad66aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 214f6ad66aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 224f6ad66aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 234f6ad66aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 244f6ad66aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 254f6ad66aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 264f6ad66aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 274f6ad66aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 284f6ad66aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 294f6ad66aSAchin Gupta */ 304f6ad66aSAchin Gupta 31c10bd2ceSSandrine Bailleux#include <arch.h> 3297043ac9SDan Handley#include <bl_common.h> 3352010cc7SSandrine Bailleux#include <el3_common_macros.S> 34872be88aSdp-arm#include <pmf_asm_macros.S> 35872be88aSdp-arm#include <runtime_instr.h> 36d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h> 374f6ad66aSAchin Gupta 384f6ad66aSAchin Gupta .globl bl31_entrypoint 39cf0b1492SSoby Mathew .globl bl31_warm_entrypoint 404f6ad66aSAchin Gupta 414f6ad66aSAchin Gupta /* ----------------------------------------------------- 424f6ad66aSAchin Gupta * bl31_entrypoint() is the cold boot entrypoint, 434f6ad66aSAchin Gupta * executed only by the primary cpu. 444f6ad66aSAchin Gupta * ----------------------------------------------------- 454f6ad66aSAchin Gupta */ 464f6ad66aSAchin Gupta 470a30cf54SAndrew Thoelkefunc bl31_entrypoint 4852010cc7SSandrine Bailleux#if !RESET_TO_BL31 494112bfa0SVikram Kanigiri /* --------------------------------------------------------------- 504112bfa0SVikram Kanigiri * Preceding bootloader has populated x0 with a pointer to a 514112bfa0SVikram Kanigiri * 'bl31_params' structure & x1 with a pointer to platform 524112bfa0SVikram Kanigiri * specific structure 534112bfa0SVikram Kanigiri * --------------------------------------------------------------- 54c10bd2ceSSandrine Bailleux */ 5529fb905dSVikram Kanigiri mov x20, x0 5629fb905dSVikram Kanigiri mov x21, x1 57c10bd2ceSSandrine Bailleux 584f603683SHarry Liebel /* --------------------------------------------------------------------- 5952010cc7SSandrine Bailleux * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 6052010cc7SSandrine Bailleux * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 6152010cc7SSandrine Bailleux * and primary/secondary CPU logic should not be executed in this case. 624f603683SHarry Liebel * 6352010cc7SSandrine Bailleux * Also, assume that the previous bootloader has already set up the CPU 6452010cc7SSandrine Bailleux * endianness and has initialised the memory. 654f603683SHarry Liebel * --------------------------------------------------------------------- 664f603683SHarry Liebel */ 6752010cc7SSandrine Bailleux el3_entrypoint_common \ 6852010cc7SSandrine Bailleux _set_endian=0 \ 6952010cc7SSandrine Bailleux _warm_boot_mailbox=0 \ 7052010cc7SSandrine Bailleux _secondary_cold_boot=0 \ 7152010cc7SSandrine Bailleux _init_memory=0 \ 7252010cc7SSandrine Bailleux _init_c_runtime=1 \ 7352010cc7SSandrine Bailleux _exception_vectors=runtime_exceptions 744f603683SHarry Liebel 7552010cc7SSandrine Bailleux /* --------------------------------------------------------------------- 7652010cc7SSandrine Bailleux * Relay the previous bootloader's arguments to the platform layer 7752010cc7SSandrine Bailleux * --------------------------------------------------------------------- 7803396c43SVikram Kanigiri */ 7952010cc7SSandrine Bailleux mov x0, x20 8052010cc7SSandrine Bailleux mov x1, x21 8152010cc7SSandrine Bailleux#else 82bf031bbaSSandrine Bailleux /* --------------------------------------------------------------------- 83bf031bbaSSandrine Bailleux * For RESET_TO_BL31 systems which have a programmable reset address, 84bf031bbaSSandrine Bailleux * bl31_entrypoint() is executed only on the cold boot path so we can 85bf031bbaSSandrine Bailleux * skip the warm boot mailbox mechanism. 86bf031bbaSSandrine Bailleux * --------------------------------------------------------------------- 87bf031bbaSSandrine Bailleux */ 8852010cc7SSandrine Bailleux el3_entrypoint_common \ 8952010cc7SSandrine Bailleux _set_endian=1 \ 90bf031bbaSSandrine Bailleux _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 91a9bec67dSSandrine Bailleux _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 9252010cc7SSandrine Bailleux _init_memory=1 \ 9352010cc7SSandrine Bailleux _init_c_runtime=1 \ 9452010cc7SSandrine Bailleux _exception_vectors=runtime_exceptions 954f6ad66aSAchin Gupta 9652010cc7SSandrine Bailleux /* --------------------------------------------------------------------- 97d178637dSJuan Castillo * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 9852010cc7SSandrine Bailleux * there's no argument to relay from a previous bootloader. Zero the 9952010cc7SSandrine Bailleux * arguments passed to the platform layer to reflect that. 10052010cc7SSandrine Bailleux * --------------------------------------------------------------------- 10165f546a1SSandrine Bailleux */ 10252010cc7SSandrine Bailleux mov x0, 0 10352010cc7SSandrine Bailleux mov x1, 0 10452010cc7SSandrine Bailleux#endif /* RESET_TO_BL31 */ 1054f6ad66aSAchin Gupta 1064f6ad66aSAchin Gupta /* --------------------------------------------- 1074f6ad66aSAchin Gupta * Perform platform specific early arch. setup 1084f6ad66aSAchin Gupta * --------------------------------------------- 1094f6ad66aSAchin Gupta */ 1104f6ad66aSAchin Gupta bl bl31_early_platform_setup 1114f6ad66aSAchin Gupta bl bl31_plat_arch_setup 1124f6ad66aSAchin Gupta 1134f6ad66aSAchin Gupta /* --------------------------------------------- 1144f6ad66aSAchin Gupta * Jump to main function. 1154f6ad66aSAchin Gupta * --------------------------------------------- 1164f6ad66aSAchin Gupta */ 1174f6ad66aSAchin Gupta bl bl31_main 1184f6ad66aSAchin Gupta 11954dc71e7SAchin Gupta /* ------------------------------------------------------------- 12054dc71e7SAchin Gupta * Clean the .data & .bss sections to main memory. This ensures 12154dc71e7SAchin Gupta * that any global data which was initialised by the primary CPU 12254dc71e7SAchin Gupta * is visible to secondary CPUs before they enable their data 12354dc71e7SAchin Gupta * caches and participate in coherency. 12454dc71e7SAchin Gupta * ------------------------------------------------------------- 12554dc71e7SAchin Gupta */ 12654dc71e7SAchin Gupta adr x0, __DATA_START__ 12754dc71e7SAchin Gupta adr x1, __DATA_END__ 12854dc71e7SAchin Gupta sub x1, x1, x0 12954dc71e7SAchin Gupta bl clean_dcache_range 13054dc71e7SAchin Gupta 13154dc71e7SAchin Gupta adr x0, __BSS_START__ 13254dc71e7SAchin Gupta adr x1, __BSS_END__ 13354dc71e7SAchin Gupta sub x1, x1, x0 13454dc71e7SAchin Gupta bl clean_dcache_range 13554dc71e7SAchin Gupta 136caa84939SJeenu Viswambharan b el3_exit 1378b779620SKévin Petitendfunc bl31_entrypoint 138cf0b1492SSoby Mathew 139cf0b1492SSoby Mathew /* -------------------------------------------------------------------- 140cf0b1492SSoby Mathew * This CPU has been physically powered up. It is either resuming from 141cf0b1492SSoby Mathew * suspend or has simply been turned on. In both cases, call the BL31 142cf0b1492SSoby Mathew * warmboot entrypoint 143cf0b1492SSoby Mathew * -------------------------------------------------------------------- 144cf0b1492SSoby Mathew */ 145cf0b1492SSoby Mathewfunc bl31_warm_entrypoint 146872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 147872be88aSdp-arm 148872be88aSdp-arm /* 149872be88aSdp-arm * This timestamp update happens with cache off. The next 150872be88aSdp-arm * timestamp collection will need to do cache maintenance prior 151872be88aSdp-arm * to timestamp update. 152872be88aSdp-arm */ 153872be88aSdp-arm pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR 154872be88aSdp-arm mrs x1, cntpct_el0 155872be88aSdp-arm str x1, [x0] 156872be88aSdp-arm#endif 157872be88aSdp-arm 158cf0b1492SSoby Mathew /* 159cf0b1492SSoby Mathew * On the warm boot path, most of the EL3 initialisations performed by 160cf0b1492SSoby Mathew * 'el3_entrypoint_common' must be skipped: 161cf0b1492SSoby Mathew * 162cf0b1492SSoby Mathew * - Only when the platform bypasses the BL1/BL31 entrypoint by 163cf0b1492SSoby Mathew * programming the reset address do we need to set the CPU endianness. 164cf0b1492SSoby Mathew * In other cases, we assume this has been taken care by the 165cf0b1492SSoby Mathew * entrypoint code. 166cf0b1492SSoby Mathew * 167cf0b1492SSoby Mathew * - No need to determine the type of boot, we know it is a warm boot. 168cf0b1492SSoby Mathew * 169cf0b1492SSoby Mathew * - Do not try to distinguish between primary and secondary CPUs, this 170cf0b1492SSoby Mathew * notion only exists for a cold boot. 171cf0b1492SSoby Mathew * 172cf0b1492SSoby Mathew * - No need to initialise the memory or the C runtime environment, 173cf0b1492SSoby Mathew * it has been done once and for all on the cold boot path. 174cf0b1492SSoby Mathew */ 175cf0b1492SSoby Mathew el3_entrypoint_common \ 176cf0b1492SSoby Mathew _set_endian=PROGRAMMABLE_RESET_ADDRESS \ 177cf0b1492SSoby Mathew _warm_boot_mailbox=0 \ 178cf0b1492SSoby Mathew _secondary_cold_boot=0 \ 179cf0b1492SSoby Mathew _init_memory=0 \ 180cf0b1492SSoby Mathew _init_c_runtime=0 \ 181cf0b1492SSoby Mathew _exception_vectors=runtime_exceptions 182cf0b1492SSoby Mathew 18325a93f7cSJeenu Viswambharan /* 18425a93f7cSJeenu Viswambharan * We're about to enable MMU and participate in PSCI state coordination. 18525a93f7cSJeenu Viswambharan * 18625a93f7cSJeenu Viswambharan * The PSCI implementation invokes platform routines that enable CPUs to 18725a93f7cSJeenu Viswambharan * participate in coherency. On a system where CPUs are not 188*bcc3c49cSSoby Mathew * cache-coherent without appropriate platform specific programming, 189*bcc3c49cSSoby Mathew * having caches enabled until such time might lead to coherency issues 190*bcc3c49cSSoby Mathew * (resulting from stale data getting speculatively fetched, among 191*bcc3c49cSSoby Mathew * others). Therefore we keep data caches disabled even after enabling 192*bcc3c49cSSoby Mathew * the MMU for such platforms. 19325a93f7cSJeenu Viswambharan * 194*bcc3c49cSSoby Mathew * On systems with hardware-assisted coherency, or on single cluster 195*bcc3c49cSSoby Mathew * platforms, such platform specific programming is not required to 196*bcc3c49cSSoby Mathew * enter coherency (as CPUs already are); and there's no reason to have 197*bcc3c49cSSoby Mathew * caches disabled either. 198cf0b1492SSoby Mathew */ 199cf0b1492SSoby Mathew mov x0, #DISABLE_DCACHE 200cf0b1492SSoby Mathew bl bl31_plat_enable_mmu 201cf0b1492SSoby Mathew 202*bcc3c49cSSoby Mathew#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 203*bcc3c49cSSoby Mathew mrs x0, sctlr_el3 204*bcc3c49cSSoby Mathew orr x0, x0, #SCTLR_C_BIT 205*bcc3c49cSSoby Mathew msr sctlr_el3, x0 206*bcc3c49cSSoby Mathew isb 207*bcc3c49cSSoby Mathew#endif 208*bcc3c49cSSoby Mathew 209cf0b1492SSoby Mathew bl psci_warmboot_entrypoint 210cf0b1492SSoby Mathew 211872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 212872be88aSdp-arm pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI 213872be88aSdp-arm mov x19, x0 214872be88aSdp-arm 215872be88aSdp-arm /* 216872be88aSdp-arm * Invalidate before updating timestamp to ensure previous timestamp 217872be88aSdp-arm * updates on the same cache line with caches disabled are properly 218872be88aSdp-arm * seen by the same core. Without the cache invalidate, the core might 219872be88aSdp-arm * write into a stale cache line. 220872be88aSdp-arm */ 221872be88aSdp-arm mov x1, #PMF_TS_SIZE 222872be88aSdp-arm mov x20, x30 223872be88aSdp-arm bl inv_dcache_range 224872be88aSdp-arm mov x30, x20 225872be88aSdp-arm 226872be88aSdp-arm mrs x0, cntpct_el0 227872be88aSdp-arm str x0, [x19] 228872be88aSdp-arm#endif 229cf0b1492SSoby Mathew b el3_exit 230cf0b1492SSoby Mathewendfunc bl31_warm_entrypoint 231