1c11ba852SSoby Mathew/* 2d50ece03SAntonio Nino Diaz * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 5c11ba852SSoby Mathew */ 6c11ba852SSoby Mathew 7c11ba852SSoby Mathew#include <arch.h> 8c11ba852SSoby Mathew#include <asm_macros.S> 9c11ba852SSoby Mathew#include <bl_common.h> 10c11ba852SSoby Mathew#include <context.h> 113bdf0e5dSYatharth Kochar#include <el3_common_macros.S> 12c11ba852SSoby Mathew#include <runtime_svc.h> 13c11ba852SSoby Mathew#include <smcc_helpers.h> 14c11ba852SSoby Mathew#include <smcc_macros.S> 15d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h> 16c11ba852SSoby Mathew 17c11ba852SSoby Mathew .globl sp_min_vector_table 18c11ba852SSoby Mathew .globl sp_min_entrypoint 19c11ba852SSoby Mathew .globl sp_min_warm_entrypoint 20c11ba852SSoby Mathew 21*71816096SEtienne Carriere .macro route_fiq_to_sp_min reg 22*71816096SEtienne Carriere /* ----------------------------------------------------- 23*71816096SEtienne Carriere * FIQs are secure interrupts trapped by Monitor and non 24*71816096SEtienne Carriere * secure is not allowed to mask the FIQs. 25*71816096SEtienne Carriere * ----------------------------------------------------- 26*71816096SEtienne Carriere */ 27*71816096SEtienne Carriere ldcopr \reg, SCR 28*71816096SEtienne Carriere orr \reg, \reg, #SCR_FIQ_BIT 29*71816096SEtienne Carriere bic \reg, \reg, #SCR_FW_BIT 30*71816096SEtienne Carriere stcopr \reg, SCR 31*71816096SEtienne Carriere .endm 323bdf0e5dSYatharth Kochar 333bdf0e5dSYatharth Kocharvector_base sp_min_vector_table 34c11ba852SSoby Mathew b sp_min_entrypoint 35c11ba852SSoby Mathew b plat_panic_handler /* Undef */ 36c11ba852SSoby Mathew b handle_smc /* Syscall */ 37c11ba852SSoby Mathew b plat_panic_handler /* Prefetch abort */ 38c11ba852SSoby Mathew b plat_panic_handler /* Data abort */ 39c11ba852SSoby Mathew b plat_panic_handler /* Reserved */ 40c11ba852SSoby Mathew b plat_panic_handler /* IRQ */ 41*71816096SEtienne Carriere b handle_fiq /* FIQ */ 42c11ba852SSoby Mathew 43c11ba852SSoby Mathew 44c11ba852SSoby Mathew/* 45c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN 46c11ba852SSoby Mathew */ 47c11ba852SSoby Mathewfunc sp_min_entrypoint 483bdf0e5dSYatharth Kochar#if !RESET_TO_SP_MIN 493bdf0e5dSYatharth Kochar /* --------------------------------------------------------------- 503bdf0e5dSYatharth Kochar * Preceding bootloader has populated r0 with a pointer to a 513bdf0e5dSYatharth Kochar * 'bl_params_t' structure & r1 with a pointer to platform 523bdf0e5dSYatharth Kochar * specific structure 533bdf0e5dSYatharth Kochar * --------------------------------------------------------------- 54c11ba852SSoby Mathew */ 553bdf0e5dSYatharth Kochar mov r11, r0 563bdf0e5dSYatharth Kochar mov r12, r1 57c11ba852SSoby Mathew 583bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 593bdf0e5dSYatharth Kochar * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 603bdf0e5dSYatharth Kochar * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 613bdf0e5dSYatharth Kochar * and primary/secondary CPU logic should not be executed in this case. 623bdf0e5dSYatharth Kochar * 6318f2efd6SDavid Cunado * Also, assume that the previous bootloader has already initialised the 6418f2efd6SDavid Cunado * SCTLR, including the CPU endianness, and has initialised the memory. 653bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 66c11ba852SSoby Mathew */ 673bdf0e5dSYatharth Kochar el3_entrypoint_common \ 6818f2efd6SDavid Cunado _init_sctlr=0 \ 693bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 703bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 713bdf0e5dSYatharth Kochar _init_memory=0 \ 723bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 733bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 74c11ba852SSoby Mathew 753bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 763bdf0e5dSYatharth Kochar * Relay the previous bootloader's arguments to the platform layer 773bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 78c11ba852SSoby Mathew */ 793bdf0e5dSYatharth Kochar mov r0, r11 803bdf0e5dSYatharth Kochar mov r1, r12 813bdf0e5dSYatharth Kochar#else 823bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 833bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems which have a programmable reset address, 843bdf0e5dSYatharth Kochar * sp_min_entrypoint() is executed only on the cold boot path so we can 853bdf0e5dSYatharth Kochar * skip the warm boot mailbox mechanism. 863bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 87c11ba852SSoby Mathew */ 883bdf0e5dSYatharth Kochar el3_entrypoint_common \ 8918f2efd6SDavid Cunado _init_sctlr=1 \ 903bdf0e5dSYatharth Kochar _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 913bdf0e5dSYatharth Kochar _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 923bdf0e5dSYatharth Kochar _init_memory=1 \ 933bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 943bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 95c11ba852SSoby Mathew 963bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 973bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 983bdf0e5dSYatharth Kochar * to run so there's no argument to relay from a previous bootloader. 993bdf0e5dSYatharth Kochar * Zero the arguments passed to the platform layer to reflect that. 1003bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 101c11ba852SSoby Mathew */ 1023bdf0e5dSYatharth Kochar mov r0, #0 1033bdf0e5dSYatharth Kochar mov r1, #0 1043bdf0e5dSYatharth Kochar#endif /* RESET_TO_SP_MIN */ 105c11ba852SSoby Mathew 106*71816096SEtienne Carriere#if SP_MIN_WITH_SECURE_FIQ 107*71816096SEtienne Carriere route_fiq_to_sp_min r4 108*71816096SEtienne Carriere#endif 109*71816096SEtienne Carriere 110c11ba852SSoby Mathew bl sp_min_early_platform_setup 111c11ba852SSoby Mathew bl sp_min_plat_arch_setup 112c11ba852SSoby Mathew 113c11ba852SSoby Mathew /* Jump to the main function */ 114c11ba852SSoby Mathew bl sp_min_main 115c11ba852SSoby Mathew 116c11ba852SSoby Mathew /* ------------------------------------------------------------- 117c11ba852SSoby Mathew * Clean the .data & .bss sections to main memory. This ensures 118c11ba852SSoby Mathew * that any global data which was initialised by the primary CPU 119c11ba852SSoby Mathew * is visible to secondary CPUs before they enable their data 120c11ba852SSoby Mathew * caches and participate in coherency. 121c11ba852SSoby Mathew * ------------------------------------------------------------- 122c11ba852SSoby Mathew */ 123c11ba852SSoby Mathew ldr r0, =__DATA_START__ 124c11ba852SSoby Mathew ldr r1, =__DATA_END__ 125c11ba852SSoby Mathew sub r1, r1, r0 126c11ba852SSoby Mathew bl clean_dcache_range 127c11ba852SSoby Mathew 128c11ba852SSoby Mathew ldr r0, =__BSS_START__ 129c11ba852SSoby Mathew ldr r1, =__BSS_END__ 130c11ba852SSoby Mathew sub r1, r1, r0 131c11ba852SSoby Mathew bl clean_dcache_range 132c11ba852SSoby Mathew 133c11ba852SSoby Mathew bl smc_get_next_ctx 134b6285d64SSoby Mathew 135b6285d64SSoby Mathew /* r0 points to `smc_ctx_t` */ 136b6285d64SSoby Mathew /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 137c11ba852SSoby Mathew b sp_min_exit 138c11ba852SSoby Mathewendfunc sp_min_entrypoint 139c11ba852SSoby Mathew 1403bdf0e5dSYatharth Kochar 1413bdf0e5dSYatharth Kochar/* 1423bdf0e5dSYatharth Kochar * SMC handling function for SP_MIN. 1433bdf0e5dSYatharth Kochar */ 1443bdf0e5dSYatharth Kocharfunc handle_smc 145b6285d64SSoby Mathew /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 146b6285d64SSoby Mathew str lr, [sp, #SMC_CTX_LR_MON] 147b6285d64SSoby Mathew 1483bdf0e5dSYatharth Kochar smcc_save_gp_mode_regs 1493bdf0e5dSYatharth Kochar 1509f3ee61cSSoby Mathew /* 151b6285d64SSoby Mathew * `sp` still points to `smc_ctx_t`. Save it to a register 152b6285d64SSoby Mathew * and restore the C runtime stack pointer to `sp`. 1539f3ee61cSSoby Mathew */ 154b6285d64SSoby Mathew mov r2, sp /* handle */ 155b6285d64SSoby Mathew ldr sp, [r2, #SMC_CTX_SP_MON] 156b6285d64SSoby Mathew 157b6285d64SSoby Mathew ldr r0, [r2, #SMC_CTX_SCR] 1583bdf0e5dSYatharth Kochar and r3, r0, #SCR_NS_BIT /* flags */ 1593bdf0e5dSYatharth Kochar 1603bdf0e5dSYatharth Kochar /* Switch to Secure Mode*/ 1613bdf0e5dSYatharth Kochar bic r0, #SCR_NS_BIT 1623bdf0e5dSYatharth Kochar stcopr r0, SCR 1633bdf0e5dSYatharth Kochar isb 164b6285d64SSoby Mathew 1653bdf0e5dSYatharth Kochar ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 1663bdf0e5dSYatharth Kochar /* Check whether an SMC64 is issued */ 1673bdf0e5dSYatharth Kochar tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 168b6285d64SSoby Mathew beq 1f 169b6285d64SSoby Mathew /* SMC32 is not detected. Return error back to caller */ 1703bdf0e5dSYatharth Kochar mov r0, #SMC_UNK 1713bdf0e5dSYatharth Kochar str r0, [r2, #SMC_CTX_GPREG_R0] 1723bdf0e5dSYatharth Kochar mov r0, r2 173b6285d64SSoby Mathew b sp_min_exit 1743bdf0e5dSYatharth Kochar1: 175b6285d64SSoby Mathew /* SMC32 is detected */ 1763bdf0e5dSYatharth Kochar mov r1, #0 /* cookie */ 1773bdf0e5dSYatharth Kochar bl handle_runtime_svc 1783bdf0e5dSYatharth Kochar 179b6285d64SSoby Mathew /* `r0` points to `smc_ctx_t` */ 1803bdf0e5dSYatharth Kochar b sp_min_exit 1813bdf0e5dSYatharth Kocharendfunc handle_smc 1823bdf0e5dSYatharth Kochar 183c11ba852SSoby Mathew/* 184*71816096SEtienne Carriere * Secure Interrupts handling function for SP_MIN. 185*71816096SEtienne Carriere */ 186*71816096SEtienne Carrierefunc handle_fiq 187*71816096SEtienne Carriere#if !SP_MIN_WITH_SECURE_FIQ 188*71816096SEtienne Carriere b plat_panic_handler 189*71816096SEtienne Carriere#else 190*71816096SEtienne Carriere /* FIQ has a +4 offset for lr compared to preferred return address */ 191*71816096SEtienne Carriere sub lr, lr, #4 192*71816096SEtienne Carriere /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 193*71816096SEtienne Carriere str lr, [sp, #SMC_CTX_LR_MON] 194*71816096SEtienne Carriere 195*71816096SEtienne Carriere smcc_save_gp_mode_regs 196*71816096SEtienne Carriere 197*71816096SEtienne Carriere /* 198*71816096SEtienne Carriere * AArch32 architectures need to clear the exclusive access when 199*71816096SEtienne Carriere * entering Monitor mode. 200*71816096SEtienne Carriere */ 201*71816096SEtienne Carriere clrex 202*71816096SEtienne Carriere 203*71816096SEtienne Carriere /* load run-time stack */ 204*71816096SEtienne Carriere mov r2, sp 205*71816096SEtienne Carriere ldr sp, [r2, #SMC_CTX_SP_MON] 206*71816096SEtienne Carriere 207*71816096SEtienne Carriere /* Switch to Secure Mode */ 208*71816096SEtienne Carriere ldr r0, [r2, #SMC_CTX_SCR] 209*71816096SEtienne Carriere bic r0, #SCR_NS_BIT 210*71816096SEtienne Carriere stcopr r0, SCR 211*71816096SEtienne Carriere isb 212*71816096SEtienne Carriere 213*71816096SEtienne Carriere push {r2, r3} 214*71816096SEtienne Carriere bl sp_min_fiq 215*71816096SEtienne Carriere pop {r0, r3} 216*71816096SEtienne Carriere 217*71816096SEtienne Carriere b sp_min_exit 218*71816096SEtienne Carriere#endif 219*71816096SEtienne Carriereendfunc handle_fiq 220*71816096SEtienne Carriere 221*71816096SEtienne Carriere/* 222c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN. 223c11ba852SSoby Mathew */ 224c11ba852SSoby Mathewfunc sp_min_warm_entrypoint 2253bdf0e5dSYatharth Kochar /* 2263bdf0e5dSYatharth Kochar * On the warm boot path, most of the EL3 initialisations performed by 2273bdf0e5dSYatharth Kochar * 'el3_entrypoint_common' must be skipped: 2283bdf0e5dSYatharth Kochar * 2293bdf0e5dSYatharth Kochar * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 23018f2efd6SDavid Cunado * programming the reset address do we need to initialied the SCTLR. 2313bdf0e5dSYatharth Kochar * In other cases, we assume this has been taken care by the 2323bdf0e5dSYatharth Kochar * entrypoint code. 2333bdf0e5dSYatharth Kochar * 2343bdf0e5dSYatharth Kochar * - No need to determine the type of boot, we know it is a warm boot. 2353bdf0e5dSYatharth Kochar * 2363bdf0e5dSYatharth Kochar * - Do not try to distinguish between primary and secondary CPUs, this 2373bdf0e5dSYatharth Kochar * notion only exists for a cold boot. 2383bdf0e5dSYatharth Kochar * 2393bdf0e5dSYatharth Kochar * - No need to initialise the memory or the C runtime environment, 2403bdf0e5dSYatharth Kochar * it has been done once and for all on the cold boot path. 2413bdf0e5dSYatharth Kochar */ 2423bdf0e5dSYatharth Kochar el3_entrypoint_common \ 24318f2efd6SDavid Cunado _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 2443bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 2453bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 2463bdf0e5dSYatharth Kochar _init_memory=0 \ 2473bdf0e5dSYatharth Kochar _init_c_runtime=0 \ 2483bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 249c11ba852SSoby Mathew 25025a93f7cSJeenu Viswambharan /* 25125a93f7cSJeenu Viswambharan * We're about to enable MMU and participate in PSCI state coordination. 25225a93f7cSJeenu Viswambharan * 25325a93f7cSJeenu Viswambharan * The PSCI implementation invokes platform routines that enable CPUs to 25425a93f7cSJeenu Viswambharan * participate in coherency. On a system where CPUs are not 255bcc3c49cSSoby Mathew * cache-coherent without appropriate platform specific programming, 256bcc3c49cSSoby Mathew * having caches enabled until such time might lead to coherency issues 257bcc3c49cSSoby Mathew * (resulting from stale data getting speculatively fetched, among 258bcc3c49cSSoby Mathew * others). Therefore we keep data caches disabled even after enabling 259bcc3c49cSSoby Mathew * the MMU for such platforms. 26025a93f7cSJeenu Viswambharan * 261bcc3c49cSSoby Mathew * On systems with hardware-assisted coherency, or on single cluster 262bcc3c49cSSoby Mathew * platforms, such platform specific programming is not required to 263bcc3c49cSSoby Mathew * enter coherency (as CPUs already are); and there's no reason to have 264bcc3c49cSSoby Mathew * caches disabled either. 265c11ba852SSoby Mathew */ 266c11ba852SSoby Mathew mov r0, #DISABLE_DCACHE 267c11ba852SSoby Mathew bl bl32_plat_enable_mmu 268c11ba852SSoby Mathew 269*71816096SEtienne Carriere#if SP_MIN_WITH_SECURE_FIQ 270*71816096SEtienne Carriere route_fiq_to_sp_min r0 271*71816096SEtienne Carriere#endif 272*71816096SEtienne Carriere 273bcc3c49cSSoby Mathew#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 274bcc3c49cSSoby Mathew ldcopr r0, SCTLR 275bcc3c49cSSoby Mathew orr r0, r0, #SCTLR_C_BIT 276bcc3c49cSSoby Mathew stcopr r0, SCTLR 277bcc3c49cSSoby Mathew isb 278bcc3c49cSSoby Mathew#endif 279bcc3c49cSSoby Mathew 280c11ba852SSoby Mathew bl sp_min_warm_boot 281c11ba852SSoby Mathew bl smc_get_next_ctx 282b6285d64SSoby Mathew /* r0 points to `smc_ctx_t` */ 283b6285d64SSoby Mathew /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 284c11ba852SSoby Mathew b sp_min_exit 285c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint 286c11ba852SSoby Mathew 287c11ba852SSoby Mathew/* 288c11ba852SSoby Mathew * The function to restore the registers from SMC context and return 289c11ba852SSoby Mathew * to the mode restored to SPSR. 290c11ba852SSoby Mathew * 291c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from. 292c11ba852SSoby Mathew */ 293c11ba852SSoby Mathewfunc sp_min_exit 294b6285d64SSoby Mathew monitor_exit 295c11ba852SSoby Mathewendfunc sp_min_exit 296