14f6ad66aSAchin Gupta/* 2*c367b75eSMadhukar Pappireddy * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 54f6ad66aSAchin Gupta */ 64f6ad66aSAchin Gupta 7931f7c61SSoby Mathew#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 909d40e0eSAntonio Nino Diaz#include <arch.h> 1009d40e0eSAntonio Nino Diaz#include <common/bl_common.h> 1109d40e0eSAntonio Nino Diaz#include <el3_common_macros.S> 120531ada5SBence Szépkúti#include <lib/pmf/aarch64/pmf_asm_macros.S> 1309d40e0eSAntonio Nino Diaz#include <lib/runtime_instr.h> 1409d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_mmu_helpers.h> 154f6ad66aSAchin Gupta 164f6ad66aSAchin Gupta .globl bl31_entrypoint 17cf0b1492SSoby Mathew .globl bl31_warm_entrypoint 184f6ad66aSAchin Gupta 194f6ad66aSAchin Gupta /* ----------------------------------------------------- 204f6ad66aSAchin Gupta * bl31_entrypoint() is the cold boot entrypoint, 214f6ad66aSAchin Gupta * executed only by the primary cpu. 224f6ad66aSAchin Gupta * ----------------------------------------------------- 234f6ad66aSAchin Gupta */ 244f6ad66aSAchin Gupta 250a30cf54SAndrew Thoelkefunc bl31_entrypoint 264112bfa0SVikram Kanigiri /* --------------------------------------------------------------- 27a6f340feSSoby Mathew * Stash the previous bootloader arguments x0 - x3 for later use. 284112bfa0SVikram Kanigiri * --------------------------------------------------------------- 29c10bd2ceSSandrine Bailleux */ 3029fb905dSVikram Kanigiri mov x20, x0 3129fb905dSVikram Kanigiri mov x21, x1 32a6f340feSSoby Mathew mov x22, x2 33a6f340feSSoby Mathew mov x23, x3 34c10bd2ceSSandrine Bailleux 35330ead80SLouis Mayencourt#if !RESET_TO_BL31 364f603683SHarry Liebel /* --------------------------------------------------------------------- 3752010cc7SSandrine Bailleux * For !RESET_TO_BL31 systems, only the primary CPU ever reaches 3852010cc7SSandrine Bailleux * bl31_entrypoint() during the cold boot flow, so the cold/warm boot 3952010cc7SSandrine Bailleux * and primary/secondary CPU logic should not be executed in this case. 404f603683SHarry Liebel * 4118f2efd6SDavid Cunado * Also, assume that the previous bootloader has already initialised the 4218f2efd6SDavid Cunado * SCTLR_EL3, including the endianness, and has initialised the memory. 434f603683SHarry Liebel * --------------------------------------------------------------------- 444f603683SHarry Liebel */ 4552010cc7SSandrine Bailleux el3_entrypoint_common \ 4618f2efd6SDavid Cunado _init_sctlr=0 \ 4752010cc7SSandrine Bailleux _warm_boot_mailbox=0 \ 4852010cc7SSandrine Bailleux _secondary_cold_boot=0 \ 4952010cc7SSandrine Bailleux _init_memory=0 \ 5052010cc7SSandrine Bailleux _init_c_runtime=1 \ 51da90359bSManish Pandey _exception_vectors=runtime_exceptions \ 52da90359bSManish Pandey _pie_fixup_size=BL31_LIMIT - BL31_BASE 5352010cc7SSandrine Bailleux#else 54330ead80SLouis Mayencourt 55bf031bbaSSandrine Bailleux /* --------------------------------------------------------------------- 56bf031bbaSSandrine Bailleux * For RESET_TO_BL31 systems which have a programmable reset address, 57bf031bbaSSandrine Bailleux * bl31_entrypoint() is executed only on the cold boot path so we can 58bf031bbaSSandrine Bailleux * skip the warm boot mailbox mechanism. 59bf031bbaSSandrine Bailleux * --------------------------------------------------------------------- 60bf031bbaSSandrine Bailleux */ 6152010cc7SSandrine Bailleux el3_entrypoint_common \ 6218f2efd6SDavid Cunado _init_sctlr=1 \ 63bf031bbaSSandrine Bailleux _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 64a9bec67dSSandrine Bailleux _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 6552010cc7SSandrine Bailleux _init_memory=1 \ 6652010cc7SSandrine Bailleux _init_c_runtime=1 \ 67da90359bSManish Pandey _exception_vectors=runtime_exceptions \ 68da90359bSManish Pandey _pie_fixup_size=BL31_LIMIT - BL31_BASE 694f6ad66aSAchin Gupta 7052010cc7SSandrine Bailleux /* --------------------------------------------------------------------- 71d178637dSJuan Castillo * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so 7252010cc7SSandrine Bailleux * there's no argument to relay from a previous bootloader. Zero the 7352010cc7SSandrine Bailleux * arguments passed to the platform layer to reflect that. 7452010cc7SSandrine Bailleux * --------------------------------------------------------------------- 7565f546a1SSandrine Bailleux */ 76a6f340feSSoby Mathew mov x20, 0 77a6f340feSSoby Mathew mov x21, 0 78a6f340feSSoby Mathew mov x22, 0 79a6f340feSSoby Mathew mov x23, 0 8052010cc7SSandrine Bailleux#endif /* RESET_TO_BL31 */ 81931f7c61SSoby Mathew 82931f7c61SSoby Mathew /* -------------------------------------------------------------------- 8388cfd9a6SAntonio Nino Diaz * Perform BL31 setup 8488cfd9a6SAntonio Nino Diaz * -------------------------------------------------------------------- 854f6ad66aSAchin Gupta */ 86a6f340feSSoby Mathew mov x0, x20 87a6f340feSSoby Mathew mov x1, x21 88a6f340feSSoby Mathew mov x2, x22 89a6f340feSSoby Mathew mov x3, x23 9088cfd9a6SAntonio Nino Diaz bl bl31_setup 914f6ad66aSAchin Gupta 9288cfd9a6SAntonio Nino Diaz#if ENABLE_PAUTH 939fc59639SAlexei Fedorov /* -------------------------------------------------------------------- 94ed108b56SAlexei Fedorov * Program APIAKey_EL1 and enable pointer authentication 959fc59639SAlexei Fedorov * -------------------------------------------------------------------- 969fc59639SAlexei Fedorov */ 97ed108b56SAlexei Fedorov bl pauth_init_enable_el3 9888cfd9a6SAntonio Nino Diaz#endif /* ENABLE_PAUTH */ 9988cfd9a6SAntonio Nino Diaz 10088cfd9a6SAntonio Nino Diaz /* -------------------------------------------------------------------- 101ed108b56SAlexei Fedorov * Jump to main function 10288cfd9a6SAntonio Nino Diaz * -------------------------------------------------------------------- 1034f6ad66aSAchin Gupta */ 1044f6ad66aSAchin Gupta bl bl31_main 1054f6ad66aSAchin Gupta 10688cfd9a6SAntonio Nino Diaz /* -------------------------------------------------------------------- 10754dc71e7SAchin Gupta * Clean the .data & .bss sections to main memory. This ensures 10854dc71e7SAchin Gupta * that any global data which was initialised by the primary CPU 10954dc71e7SAchin Gupta * is visible to secondary CPUs before they enable their data 11054dc71e7SAchin Gupta * caches and participate in coherency. 11188cfd9a6SAntonio Nino Diaz * -------------------------------------------------------------------- 11254dc71e7SAchin Gupta */ 113*c367b75eSMadhukar Pappireddy adrp x0, __DATA_START__ 114*c367b75eSMadhukar Pappireddy add x0, x0, :lo12:__DATA_START__ 115*c367b75eSMadhukar Pappireddy adrp x1, __DATA_END__ 116*c367b75eSMadhukar Pappireddy add x1, x1, :lo12:__DATA_END__ 11754dc71e7SAchin Gupta sub x1, x1, x0 11854dc71e7SAchin Gupta bl clean_dcache_range 11954dc71e7SAchin Gupta 120*c367b75eSMadhukar Pappireddy adrp x0, __BSS_START__ 121*c367b75eSMadhukar Pappireddy add x0, x0, :lo12:__BSS_START__ 122*c367b75eSMadhukar Pappireddy adrp x1, __BSS_END__ 123*c367b75eSMadhukar Pappireddy add x1, x1, :lo12:__BSS_END__ 12454dc71e7SAchin Gupta sub x1, x1, x0 12554dc71e7SAchin Gupta bl clean_dcache_range 12654dc71e7SAchin Gupta 127caa84939SJeenu Viswambharan b el3_exit 1288b779620SKévin Petitendfunc bl31_entrypoint 129cf0b1492SSoby Mathew 130cf0b1492SSoby Mathew /* -------------------------------------------------------------------- 131cf0b1492SSoby Mathew * This CPU has been physically powered up. It is either resuming from 132cf0b1492SSoby Mathew * suspend or has simply been turned on. In both cases, call the BL31 133cf0b1492SSoby Mathew * warmboot entrypoint 134cf0b1492SSoby Mathew * -------------------------------------------------------------------- 135cf0b1492SSoby Mathew */ 136cf0b1492SSoby Mathewfunc bl31_warm_entrypoint 137872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 138872be88aSdp-arm 139872be88aSdp-arm /* 140872be88aSdp-arm * This timestamp update happens with cache off. The next 141872be88aSdp-arm * timestamp collection will need to do cache maintenance prior 142872be88aSdp-arm * to timestamp update. 143872be88aSdp-arm */ 14481542c00SAntonio Nino Diaz pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 145872be88aSdp-arm mrs x1, cntpct_el0 146872be88aSdp-arm str x1, [x0] 147872be88aSdp-arm#endif 148872be88aSdp-arm 149cf0b1492SSoby Mathew /* 150cf0b1492SSoby Mathew * On the warm boot path, most of the EL3 initialisations performed by 151cf0b1492SSoby Mathew * 'el3_entrypoint_common' must be skipped: 152cf0b1492SSoby Mathew * 153cf0b1492SSoby Mathew * - Only when the platform bypasses the BL1/BL31 entrypoint by 15418f2efd6SDavid Cunado * programming the reset address do we need to initialise SCTLR_EL3. 155cf0b1492SSoby Mathew * In other cases, we assume this has been taken care by the 156cf0b1492SSoby Mathew * entrypoint code. 157cf0b1492SSoby Mathew * 158cf0b1492SSoby Mathew * - No need to determine the type of boot, we know it is a warm boot. 159cf0b1492SSoby Mathew * 160cf0b1492SSoby Mathew * - Do not try to distinguish between primary and secondary CPUs, this 161cf0b1492SSoby Mathew * notion only exists for a cold boot. 162cf0b1492SSoby Mathew * 163cf0b1492SSoby Mathew * - No need to initialise the memory or the C runtime environment, 164cf0b1492SSoby Mathew * it has been done once and for all on the cold boot path. 165cf0b1492SSoby Mathew */ 166cf0b1492SSoby Mathew el3_entrypoint_common \ 16718f2efd6SDavid Cunado _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 168cf0b1492SSoby Mathew _warm_boot_mailbox=0 \ 169cf0b1492SSoby Mathew _secondary_cold_boot=0 \ 170cf0b1492SSoby Mathew _init_memory=0 \ 171cf0b1492SSoby Mathew _init_c_runtime=0 \ 172da90359bSManish Pandey _exception_vectors=runtime_exceptions \ 173da90359bSManish Pandey _pie_fixup_size=0 174cf0b1492SSoby Mathew 17525a93f7cSJeenu Viswambharan /* 17625a93f7cSJeenu Viswambharan * We're about to enable MMU and participate in PSCI state coordination. 17725a93f7cSJeenu Viswambharan * 17825a93f7cSJeenu Viswambharan * The PSCI implementation invokes platform routines that enable CPUs to 17925a93f7cSJeenu Viswambharan * participate in coherency. On a system where CPUs are not 180bcc3c49cSSoby Mathew * cache-coherent without appropriate platform specific programming, 181bcc3c49cSSoby Mathew * having caches enabled until such time might lead to coherency issues 182bcc3c49cSSoby Mathew * (resulting from stale data getting speculatively fetched, among 183bcc3c49cSSoby Mathew * others). Therefore we keep data caches disabled even after enabling 184bcc3c49cSSoby Mathew * the MMU for such platforms. 18525a93f7cSJeenu Viswambharan * 186bcc3c49cSSoby Mathew * On systems with hardware-assisted coherency, or on single cluster 187bcc3c49cSSoby Mathew * platforms, such platform specific programming is not required to 188bcc3c49cSSoby Mathew * enter coherency (as CPUs already are); and there's no reason to have 189bcc3c49cSSoby Mathew * caches disabled either. 190cf0b1492SSoby Mathew */ 191bcc3c49cSSoby Mathew#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 19264ee263eSJeenu Viswambharan mov x0, xzr 19364ee263eSJeenu Viswambharan#else 19464ee263eSJeenu Viswambharan mov x0, #DISABLE_DCACHE 195bcc3c49cSSoby Mathew#endif 19664ee263eSJeenu Viswambharan bl bl31_plat_enable_mmu 197bcc3c49cSSoby Mathew 1987dcbb4f3SAlexei Fedorov#if ENABLE_PAUTH 1999fc59639SAlexei Fedorov /* -------------------------------------------------------------------- 200ed108b56SAlexei Fedorov * Program APIAKey_EL1 and enable pointer authentication 2019fc59639SAlexei Fedorov * -------------------------------------------------------------------- 2029fc59639SAlexei Fedorov */ 203ed108b56SAlexei Fedorov bl pauth_init_enable_el3 2047dcbb4f3SAlexei Fedorov#endif /* ENABLE_PAUTH */ 2057dcbb4f3SAlexei Fedorov 206cf0b1492SSoby Mathew bl psci_warmboot_entrypoint 207cf0b1492SSoby Mathew 208872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 20981542c00SAntonio Nino Diaz pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 210872be88aSdp-arm mov x19, x0 211872be88aSdp-arm 212872be88aSdp-arm /* 213872be88aSdp-arm * Invalidate before updating timestamp to ensure previous timestamp 214872be88aSdp-arm * updates on the same cache line with caches disabled are properly 215872be88aSdp-arm * seen by the same core. Without the cache invalidate, the core might 216872be88aSdp-arm * write into a stale cache line. 217872be88aSdp-arm */ 218872be88aSdp-arm mov x1, #PMF_TS_SIZE 219872be88aSdp-arm mov x20, x30 220872be88aSdp-arm bl inv_dcache_range 221872be88aSdp-arm mov x30, x20 222872be88aSdp-arm 223872be88aSdp-arm mrs x0, cntpct_el0 224872be88aSdp-arm str x0, [x19] 225872be88aSdp-arm#endif 226cf0b1492SSoby Mathew b el3_exit 227cf0b1492SSoby Mathewendfunc bl31_warm_entrypoint 228