1c11ba852SSoby Mathew/* 27343505dSDimitris Papastamos * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 5c11ba852SSoby Mathew */ 6c11ba852SSoby Mathew 7c11ba852SSoby Mathew#include <arch.h> 8c11ba852SSoby Mathew#include <asm_macros.S> 9c11ba852SSoby Mathew#include <bl_common.h> 10c11ba852SSoby Mathew#include <context.h> 113bdf0e5dSYatharth Kochar#include <el3_common_macros.S> 12c11ba852SSoby Mathew#include <runtime_svc.h> 13085e80ecSAntonio Nino Diaz#include <smccc_helpers.h> 14085e80ecSAntonio Nino Diaz#include <smccc_macros.S> 15d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h> 16c11ba852SSoby Mathew 17c11ba852SSoby Mathew .globl sp_min_vector_table 18c11ba852SSoby Mathew .globl sp_min_entrypoint 19c11ba852SSoby Mathew .globl sp_min_warm_entrypoint 207343505dSDimitris Papastamos .globl sp_min_handle_smc 217343505dSDimitris Papastamos .globl sp_min_handle_fiq 22c11ba852SSoby Mathew 2371816096SEtienne Carriere .macro route_fiq_to_sp_min reg 2471816096SEtienne Carriere /* ----------------------------------------------------- 2571816096SEtienne Carriere * FIQs are secure interrupts trapped by Monitor and non 2671816096SEtienne Carriere * secure is not allowed to mask the FIQs. 2771816096SEtienne Carriere * ----------------------------------------------------- 2871816096SEtienne Carriere */ 2971816096SEtienne Carriere ldcopr \reg, SCR 3071816096SEtienne Carriere orr \reg, \reg, #SCR_FIQ_BIT 3171816096SEtienne Carriere bic \reg, \reg, #SCR_FW_BIT 3271816096SEtienne Carriere stcopr \reg, SCR 3371816096SEtienne Carriere .endm 343bdf0e5dSYatharth Kochar 3570896274SEtienne Carriere .macro clrex_on_monitor_entry 3670896274SEtienne Carriere#if (ARM_ARCH_MAJOR == 7) 3770896274SEtienne Carriere /* 3870896274SEtienne Carriere * ARMv7 architectures need to clear the exclusive access when 3970896274SEtienne Carriere * entering Monitor mode. 4070896274SEtienne Carriere */ 4170896274SEtienne Carriere clrex 4270896274SEtienne Carriere#endif 4370896274SEtienne Carriere .endm 4470896274SEtienne Carriere 453bdf0e5dSYatharth Kocharvector_base sp_min_vector_table 46c11ba852SSoby Mathew b sp_min_entrypoint 47c11ba852SSoby Mathew b plat_panic_handler /* Undef */ 487343505dSDimitris Papastamos b sp_min_handle_smc /* Syscall */ 49c11ba852SSoby Mathew b plat_panic_handler /* Prefetch abort */ 50c11ba852SSoby Mathew b plat_panic_handler /* Data abort */ 51c11ba852SSoby Mathew b plat_panic_handler /* Reserved */ 52c11ba852SSoby Mathew b plat_panic_handler /* IRQ */ 537343505dSDimitris Papastamos b sp_min_handle_fiq /* FIQ */ 54c11ba852SSoby Mathew 55c11ba852SSoby Mathew 56c11ba852SSoby Mathew/* 57c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN 58c11ba852SSoby Mathew */ 59c11ba852SSoby Mathewfunc sp_min_entrypoint 603bdf0e5dSYatharth Kochar#if !RESET_TO_SP_MIN 613bdf0e5dSYatharth Kochar /* --------------------------------------------------------------- 623bdf0e5dSYatharth Kochar * Preceding bootloader has populated r0 with a pointer to a 633bdf0e5dSYatharth Kochar * 'bl_params_t' structure & r1 with a pointer to platform 643bdf0e5dSYatharth Kochar * specific structure 653bdf0e5dSYatharth Kochar * --------------------------------------------------------------- 66c11ba852SSoby Mathew */ 67a6f340feSSoby Mathew mov r9, r0 68a6f340feSSoby Mathew mov r10, r1 69a6f340feSSoby Mathew mov r11, r2 70a6f340feSSoby Mathew mov r12, r3 71c11ba852SSoby Mathew 723bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 733bdf0e5dSYatharth Kochar * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 743bdf0e5dSYatharth Kochar * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 753bdf0e5dSYatharth Kochar * and primary/secondary CPU logic should not be executed in this case. 763bdf0e5dSYatharth Kochar * 7718f2efd6SDavid Cunado * Also, assume that the previous bootloader has already initialised the 7818f2efd6SDavid Cunado * SCTLR, including the CPU endianness, and has initialised the memory. 793bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 80c11ba852SSoby Mathew */ 813bdf0e5dSYatharth Kochar el3_entrypoint_common \ 8218f2efd6SDavid Cunado _init_sctlr=0 \ 833bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 843bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 853bdf0e5dSYatharth Kochar _init_memory=0 \ 863bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 873bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 88c11ba852SSoby Mathew 893bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 903bdf0e5dSYatharth Kochar * Relay the previous bootloader's arguments to the platform layer 913bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 92c11ba852SSoby Mathew */ 933bdf0e5dSYatharth Kochar#else 943bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 953bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems which have a programmable reset address, 963bdf0e5dSYatharth Kochar * sp_min_entrypoint() is executed only on the cold boot path so we can 973bdf0e5dSYatharth Kochar * skip the warm boot mailbox mechanism. 983bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 99c11ba852SSoby Mathew */ 1003bdf0e5dSYatharth Kochar el3_entrypoint_common \ 10118f2efd6SDavid Cunado _init_sctlr=1 \ 1023bdf0e5dSYatharth Kochar _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 1033bdf0e5dSYatharth Kochar _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 1043bdf0e5dSYatharth Kochar _init_memory=1 \ 1053bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 1063bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 107c11ba852SSoby Mathew 1083bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 1093bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 1103bdf0e5dSYatharth Kochar * to run so there's no argument to relay from a previous bootloader. 1113bdf0e5dSYatharth Kochar * Zero the arguments passed to the platform layer to reflect that. 1123bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 113c11ba852SSoby Mathew */ 114a6f340feSSoby Mathew mov r9, #0 115a6f340feSSoby Mathew mov r10, #0 116a6f340feSSoby Mathew mov r11, #0 117a6f340feSSoby Mathew mov r12, #0 118a6f340feSSoby Mathew 1193bdf0e5dSYatharth Kochar#endif /* RESET_TO_SP_MIN */ 120c11ba852SSoby Mathew 12171816096SEtienne Carriere#if SP_MIN_WITH_SECURE_FIQ 12271816096SEtienne Carriere route_fiq_to_sp_min r4 12371816096SEtienne Carriere#endif 12471816096SEtienne Carriere 125a6f340feSSoby Mathew mov r0, r9 126a6f340feSSoby Mathew mov r1, r10 127a6f340feSSoby Mathew mov r2, r11 128a6f340feSSoby Mathew mov r3, r12 129a6f340feSSoby Mathew bl sp_min_early_platform_setup2 130c11ba852SSoby Mathew bl sp_min_plat_arch_setup 131c11ba852SSoby Mathew 132c11ba852SSoby Mathew /* Jump to the main function */ 133c11ba852SSoby Mathew bl sp_min_main 134c11ba852SSoby Mathew 135c11ba852SSoby Mathew /* ------------------------------------------------------------- 136c11ba852SSoby Mathew * Clean the .data & .bss sections to main memory. This ensures 137c11ba852SSoby Mathew * that any global data which was initialised by the primary CPU 138c11ba852SSoby Mathew * is visible to secondary CPUs before they enable their data 139c11ba852SSoby Mathew * caches and participate in coherency. 140c11ba852SSoby Mathew * ------------------------------------------------------------- 141c11ba852SSoby Mathew */ 142c11ba852SSoby Mathew ldr r0, =__DATA_START__ 143c11ba852SSoby Mathew ldr r1, =__DATA_END__ 144c11ba852SSoby Mathew sub r1, r1, r0 145c11ba852SSoby Mathew bl clean_dcache_range 146c11ba852SSoby Mathew 147c11ba852SSoby Mathew ldr r0, =__BSS_START__ 148c11ba852SSoby Mathew ldr r1, =__BSS_END__ 149c11ba852SSoby Mathew sub r1, r1, r0 150c11ba852SSoby Mathew bl clean_dcache_range 151c11ba852SSoby Mathew 152c11ba852SSoby Mathew bl smc_get_next_ctx 153b6285d64SSoby Mathew 154b6285d64SSoby Mathew /* r0 points to `smc_ctx_t` */ 155b6285d64SSoby Mathew /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 156c11ba852SSoby Mathew b sp_min_exit 157c11ba852SSoby Mathewendfunc sp_min_entrypoint 158c11ba852SSoby Mathew 1593bdf0e5dSYatharth Kochar 1603bdf0e5dSYatharth Kochar/* 1613bdf0e5dSYatharth Kochar * SMC handling function for SP_MIN. 1623bdf0e5dSYatharth Kochar */ 1637343505dSDimitris Papastamosfunc sp_min_handle_smc 164b6285d64SSoby Mathew /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 165b6285d64SSoby Mathew str lr, [sp, #SMC_CTX_LR_MON] 166b6285d64SSoby Mathew 167085e80ecSAntonio Nino Diaz smccc_save_gp_mode_regs 1683bdf0e5dSYatharth Kochar 16970896274SEtienne Carriere clrex_on_monitor_entry 17070896274SEtienne Carriere 1719f3ee61cSSoby Mathew /* 172b6285d64SSoby Mathew * `sp` still points to `smc_ctx_t`. Save it to a register 173b6285d64SSoby Mathew * and restore the C runtime stack pointer to `sp`. 1749f3ee61cSSoby Mathew */ 175b6285d64SSoby Mathew mov r2, sp /* handle */ 176b6285d64SSoby Mathew ldr sp, [r2, #SMC_CTX_SP_MON] 177b6285d64SSoby Mathew 178b6285d64SSoby Mathew ldr r0, [r2, #SMC_CTX_SCR] 1793bdf0e5dSYatharth Kochar and r3, r0, #SCR_NS_BIT /* flags */ 1803bdf0e5dSYatharth Kochar 1813bdf0e5dSYatharth Kochar /* Switch to Secure Mode*/ 1823bdf0e5dSYatharth Kochar bic r0, #SCR_NS_BIT 1833bdf0e5dSYatharth Kochar stcopr r0, SCR 1843bdf0e5dSYatharth Kochar isb 185b6285d64SSoby Mathew 1863e61b2b5SDavid Cunado /* 1873e61b2b5SDavid Cunado * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. 1883e61b2b5SDavid Cunado * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset 1893e61b2b5SDavid Cunado * and so set to 1 as ARM has deprecated use of PMCR.LC=0. 1903e61b2b5SDavid Cunado */ 1913e61b2b5SDavid Cunado ldcopr r0, PMCR 1923e61b2b5SDavid Cunado orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) 1933e61b2b5SDavid Cunado stcopr r0, PMCR 1943e61b2b5SDavid Cunado 1953bdf0e5dSYatharth Kochar ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 1963bdf0e5dSYatharth Kochar /* Check whether an SMC64 is issued */ 1973bdf0e5dSYatharth Kochar tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 198b6285d64SSoby Mathew beq 1f 199b6285d64SSoby Mathew /* SMC32 is not detected. Return error back to caller */ 2003bdf0e5dSYatharth Kochar mov r0, #SMC_UNK 2013bdf0e5dSYatharth Kochar str r0, [r2, #SMC_CTX_GPREG_R0] 2023bdf0e5dSYatharth Kochar mov r0, r2 203b6285d64SSoby Mathew b sp_min_exit 2043bdf0e5dSYatharth Kochar1: 205b6285d64SSoby Mathew /* SMC32 is detected */ 2063bdf0e5dSYatharth Kochar mov r1, #0 /* cookie */ 2073bdf0e5dSYatharth Kochar bl handle_runtime_svc 2083bdf0e5dSYatharth Kochar 209b6285d64SSoby Mathew /* `r0` points to `smc_ctx_t` */ 2103bdf0e5dSYatharth Kochar b sp_min_exit 2117343505dSDimitris Papastamosendfunc sp_min_handle_smc 2123bdf0e5dSYatharth Kochar 213c11ba852SSoby Mathew/* 21471816096SEtienne Carriere * Secure Interrupts handling function for SP_MIN. 21571816096SEtienne Carriere */ 2167343505dSDimitris Papastamosfunc sp_min_handle_fiq 21771816096SEtienne Carriere#if !SP_MIN_WITH_SECURE_FIQ 21871816096SEtienne Carriere b plat_panic_handler 21971816096SEtienne Carriere#else 22071816096SEtienne Carriere /* FIQ has a +4 offset for lr compared to preferred return address */ 22171816096SEtienne Carriere sub lr, lr, #4 22271816096SEtienne Carriere /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 22371816096SEtienne Carriere str lr, [sp, #SMC_CTX_LR_MON] 22471816096SEtienne Carriere 225085e80ecSAntonio Nino Diaz smccc_save_gp_mode_regs 22671816096SEtienne Carriere 22770896274SEtienne Carriere clrex_on_monitor_entry 22871816096SEtienne Carriere 22971816096SEtienne Carriere /* load run-time stack */ 23071816096SEtienne Carriere mov r2, sp 23171816096SEtienne Carriere ldr sp, [r2, #SMC_CTX_SP_MON] 23271816096SEtienne Carriere 23371816096SEtienne Carriere /* Switch to Secure Mode */ 23471816096SEtienne Carriere ldr r0, [r2, #SMC_CTX_SCR] 23571816096SEtienne Carriere bic r0, #SCR_NS_BIT 23671816096SEtienne Carriere stcopr r0, SCR 23771816096SEtienne Carriere isb 23871816096SEtienne Carriere 2393e61b2b5SDavid Cunado /* 2403e61b2b5SDavid Cunado * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. 2413e61b2b5SDavid Cunado * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset 2423e61b2b5SDavid Cunado * and so set to 1 as ARM has deprecated use of PMCR.LC=0. 2433e61b2b5SDavid Cunado */ 2443e61b2b5SDavid Cunado ldcopr r0, PMCR 2453e61b2b5SDavid Cunado orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) 2463e61b2b5SDavid Cunado stcopr r0, PMCR 2473e61b2b5SDavid Cunado 24871816096SEtienne Carriere push {r2, r3} 24971816096SEtienne Carriere bl sp_min_fiq 25071816096SEtienne Carriere pop {r0, r3} 25171816096SEtienne Carriere 25271816096SEtienne Carriere b sp_min_exit 25371816096SEtienne Carriere#endif 2547343505dSDimitris Papastamosendfunc sp_min_handle_fiq 25571816096SEtienne Carriere 25671816096SEtienne Carriere/* 257c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN. 258c11ba852SSoby Mathew */ 259c11ba852SSoby Mathewfunc sp_min_warm_entrypoint 2603bdf0e5dSYatharth Kochar /* 2613bdf0e5dSYatharth Kochar * On the warm boot path, most of the EL3 initialisations performed by 2623bdf0e5dSYatharth Kochar * 'el3_entrypoint_common' must be skipped: 2633bdf0e5dSYatharth Kochar * 2643bdf0e5dSYatharth Kochar * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 26518f2efd6SDavid Cunado * programming the reset address do we need to initialied the SCTLR. 2663bdf0e5dSYatharth Kochar * In other cases, we assume this has been taken care by the 2673bdf0e5dSYatharth Kochar * entrypoint code. 2683bdf0e5dSYatharth Kochar * 2693bdf0e5dSYatharth Kochar * - No need to determine the type of boot, we know it is a warm boot. 2703bdf0e5dSYatharth Kochar * 2713bdf0e5dSYatharth Kochar * - Do not try to distinguish between primary and secondary CPUs, this 2723bdf0e5dSYatharth Kochar * notion only exists for a cold boot. 2733bdf0e5dSYatharth Kochar * 2743bdf0e5dSYatharth Kochar * - No need to initialise the memory or the C runtime environment, 2753bdf0e5dSYatharth Kochar * it has been done once and for all on the cold boot path. 2763bdf0e5dSYatharth Kochar */ 2773bdf0e5dSYatharth Kochar el3_entrypoint_common \ 27818f2efd6SDavid Cunado _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 2793bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 2803bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 2813bdf0e5dSYatharth Kochar _init_memory=0 \ 2823bdf0e5dSYatharth Kochar _init_c_runtime=0 \ 2833bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 284c11ba852SSoby Mathew 28525a93f7cSJeenu Viswambharan /* 28625a93f7cSJeenu Viswambharan * We're about to enable MMU and participate in PSCI state coordination. 28725a93f7cSJeenu Viswambharan * 28825a93f7cSJeenu Viswambharan * The PSCI implementation invokes platform routines that enable CPUs to 28925a93f7cSJeenu Viswambharan * participate in coherency. On a system where CPUs are not 290bcc3c49cSSoby Mathew * cache-coherent without appropriate platform specific programming, 291bcc3c49cSSoby Mathew * having caches enabled until such time might lead to coherency issues 292bcc3c49cSSoby Mathew * (resulting from stale data getting speculatively fetched, among 293bcc3c49cSSoby Mathew * others). Therefore we keep data caches disabled even after enabling 294bcc3c49cSSoby Mathew * the MMU for such platforms. 29525a93f7cSJeenu Viswambharan * 296bcc3c49cSSoby Mathew * On systems with hardware-assisted coherency, or on single cluster 297bcc3c49cSSoby Mathew * platforms, such platform specific programming is not required to 298bcc3c49cSSoby Mathew * enter coherency (as CPUs already are); and there's no reason to have 299bcc3c49cSSoby Mathew * caches disabled either. 300c11ba852SSoby Mathew */ 301*64ee263eSJeenu Viswambharan#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 302*64ee263eSJeenu Viswambharan mov r0, #0 303*64ee263eSJeenu Viswambharan#else 304c11ba852SSoby Mathew mov r0, #DISABLE_DCACHE 305*64ee263eSJeenu Viswambharan#endif 306c11ba852SSoby Mathew bl bl32_plat_enable_mmu 307c11ba852SSoby Mathew 30871816096SEtienne Carriere#if SP_MIN_WITH_SECURE_FIQ 30971816096SEtienne Carriere route_fiq_to_sp_min r0 31071816096SEtienne Carriere#endif 31171816096SEtienne Carriere 312c11ba852SSoby Mathew bl sp_min_warm_boot 313c11ba852SSoby Mathew bl smc_get_next_ctx 314b6285d64SSoby Mathew /* r0 points to `smc_ctx_t` */ 315b6285d64SSoby Mathew /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 316c11ba852SSoby Mathew b sp_min_exit 317c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint 318c11ba852SSoby Mathew 319c11ba852SSoby Mathew/* 320c11ba852SSoby Mathew * The function to restore the registers from SMC context and return 321c11ba852SSoby Mathew * to the mode restored to SPSR. 322c11ba852SSoby Mathew * 323c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from. 324c11ba852SSoby Mathew */ 325c11ba852SSoby Mathewfunc sp_min_exit 326b6285d64SSoby Mathew monitor_exit 327c11ba852SSoby Mathewendfunc sp_min_exit 328