1c11ba852SSoby Mathew/* 2d50ece03SAntonio Nino Diaz * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 4*82cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 5c11ba852SSoby Mathew */ 6c11ba852SSoby Mathew 7c11ba852SSoby Mathew#include <arch.h> 8c11ba852SSoby Mathew#include <asm_macros.S> 9c11ba852SSoby Mathew#include <bl_common.h> 10c11ba852SSoby Mathew#include <context.h> 113bdf0e5dSYatharth Kochar#include <el3_common_macros.S> 12c11ba852SSoby Mathew#include <runtime_svc.h> 13c11ba852SSoby Mathew#include <smcc_helpers.h> 14c11ba852SSoby Mathew#include <smcc_macros.S> 15d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h> 16c11ba852SSoby Mathew 17c11ba852SSoby Mathew .globl sp_min_vector_table 18c11ba852SSoby Mathew .globl sp_min_entrypoint 19c11ba852SSoby Mathew .globl sp_min_warm_entrypoint 20c11ba852SSoby Mathew 213bdf0e5dSYatharth Kochar 223bdf0e5dSYatharth Kocharvector_base sp_min_vector_table 23c11ba852SSoby Mathew b sp_min_entrypoint 24c11ba852SSoby Mathew b plat_panic_handler /* Undef */ 25c11ba852SSoby Mathew b handle_smc /* Syscall */ 26c11ba852SSoby Mathew b plat_panic_handler /* Prefetch abort */ 27c11ba852SSoby Mathew b plat_panic_handler /* Data abort */ 28c11ba852SSoby Mathew b plat_panic_handler /* Reserved */ 29c11ba852SSoby Mathew b plat_panic_handler /* IRQ */ 30c11ba852SSoby Mathew b plat_panic_handler /* FIQ */ 31c11ba852SSoby Mathew 32c11ba852SSoby Mathew 33c11ba852SSoby Mathew/* 34c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN 35c11ba852SSoby Mathew */ 36c11ba852SSoby Mathewfunc sp_min_entrypoint 373bdf0e5dSYatharth Kochar#if !RESET_TO_SP_MIN 383bdf0e5dSYatharth Kochar /* --------------------------------------------------------------- 393bdf0e5dSYatharth Kochar * Preceding bootloader has populated r0 with a pointer to a 403bdf0e5dSYatharth Kochar * 'bl_params_t' structure & r1 with a pointer to platform 413bdf0e5dSYatharth Kochar * specific structure 423bdf0e5dSYatharth Kochar * --------------------------------------------------------------- 43c11ba852SSoby Mathew */ 443bdf0e5dSYatharth Kochar mov r11, r0 453bdf0e5dSYatharth Kochar mov r12, r1 46c11ba852SSoby Mathew 473bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 483bdf0e5dSYatharth Kochar * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 493bdf0e5dSYatharth Kochar * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 503bdf0e5dSYatharth Kochar * and primary/secondary CPU logic should not be executed in this case. 513bdf0e5dSYatharth Kochar * 523bdf0e5dSYatharth Kochar * Also, assume that the previous bootloader has already set up the CPU 533bdf0e5dSYatharth Kochar * endianness and has initialised the memory. 543bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 55c11ba852SSoby Mathew */ 563bdf0e5dSYatharth Kochar el3_entrypoint_common \ 573bdf0e5dSYatharth Kochar _set_endian=0 \ 583bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 593bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 603bdf0e5dSYatharth Kochar _init_memory=0 \ 613bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 623bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 63c11ba852SSoby Mathew 643bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 653bdf0e5dSYatharth Kochar * Relay the previous bootloader's arguments to the platform layer 663bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 67c11ba852SSoby Mathew */ 683bdf0e5dSYatharth Kochar mov r0, r11 693bdf0e5dSYatharth Kochar mov r1, r12 703bdf0e5dSYatharth Kochar#else 713bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 723bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems which have a programmable reset address, 733bdf0e5dSYatharth Kochar * sp_min_entrypoint() is executed only on the cold boot path so we can 743bdf0e5dSYatharth Kochar * skip the warm boot mailbox mechanism. 753bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 76c11ba852SSoby Mathew */ 773bdf0e5dSYatharth Kochar el3_entrypoint_common \ 783bdf0e5dSYatharth Kochar _set_endian=1 \ 793bdf0e5dSYatharth Kochar _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 803bdf0e5dSYatharth Kochar _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 813bdf0e5dSYatharth Kochar _init_memory=1 \ 823bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 833bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 84c11ba852SSoby Mathew 853bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 863bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 873bdf0e5dSYatharth Kochar * to run so there's no argument to relay from a previous bootloader. 883bdf0e5dSYatharth Kochar * Zero the arguments passed to the platform layer to reflect that. 893bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 90c11ba852SSoby Mathew */ 913bdf0e5dSYatharth Kochar mov r0, #0 923bdf0e5dSYatharth Kochar mov r1, #0 933bdf0e5dSYatharth Kochar#endif /* RESET_TO_SP_MIN */ 94c11ba852SSoby Mathew 95c11ba852SSoby Mathew bl sp_min_early_platform_setup 96c11ba852SSoby Mathew bl sp_min_plat_arch_setup 97c11ba852SSoby Mathew 98c11ba852SSoby Mathew /* Jump to the main function */ 99c11ba852SSoby Mathew bl sp_min_main 100c11ba852SSoby Mathew 101c11ba852SSoby Mathew /* ------------------------------------------------------------- 102c11ba852SSoby Mathew * Clean the .data & .bss sections to main memory. This ensures 103c11ba852SSoby Mathew * that any global data which was initialised by the primary CPU 104c11ba852SSoby Mathew * is visible to secondary CPUs before they enable their data 105c11ba852SSoby Mathew * caches and participate in coherency. 106c11ba852SSoby Mathew * ------------------------------------------------------------- 107c11ba852SSoby Mathew */ 108c11ba852SSoby Mathew ldr r0, =__DATA_START__ 109c11ba852SSoby Mathew ldr r1, =__DATA_END__ 110c11ba852SSoby Mathew sub r1, r1, r0 111c11ba852SSoby Mathew bl clean_dcache_range 112c11ba852SSoby Mathew 113c11ba852SSoby Mathew ldr r0, =__BSS_START__ 114c11ba852SSoby Mathew ldr r1, =__BSS_END__ 115c11ba852SSoby Mathew sub r1, r1, r0 116c11ba852SSoby Mathew bl clean_dcache_range 117c11ba852SSoby Mathew 118c11ba852SSoby Mathew /* Program the registers in cpu_context and exit monitor mode */ 119c11ba852SSoby Mathew mov r0, #NON_SECURE 120c11ba852SSoby Mathew bl cm_get_context 121c11ba852SSoby Mathew 122c11ba852SSoby Mathew /* Restore the SCR */ 123c11ba852SSoby Mathew ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] 124c11ba852SSoby Mathew stcopr r2, SCR 125c11ba852SSoby Mathew isb 126c11ba852SSoby Mathew 127c11ba852SSoby Mathew /* Restore the SCTLR */ 128c11ba852SSoby Mathew ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR] 129c11ba852SSoby Mathew stcopr r2, SCTLR 130c11ba852SSoby Mathew 131c11ba852SSoby Mathew bl smc_get_next_ctx 132c11ba852SSoby Mathew /* The other cpu_context registers have been copied to smc context */ 133c11ba852SSoby Mathew b sp_min_exit 134c11ba852SSoby Mathewendfunc sp_min_entrypoint 135c11ba852SSoby Mathew 1363bdf0e5dSYatharth Kochar 1373bdf0e5dSYatharth Kochar/* 1383bdf0e5dSYatharth Kochar * SMC handling function for SP_MIN. 1393bdf0e5dSYatharth Kochar */ 1403bdf0e5dSYatharth Kocharfunc handle_smc 1413bdf0e5dSYatharth Kochar smcc_save_gp_mode_regs 1423bdf0e5dSYatharth Kochar 1433bdf0e5dSYatharth Kochar /* r0 points to smc_context */ 1443bdf0e5dSYatharth Kochar mov r2, r0 /* handle */ 1453bdf0e5dSYatharth Kochar ldcopr r0, SCR 1463bdf0e5dSYatharth Kochar 1479f3ee61cSSoby Mathew /* 1489f3ee61cSSoby Mathew * Save SCR in stack. r1 is pushed to meet the 8 byte 1499f3ee61cSSoby Mathew * stack alignment requirement. 1509f3ee61cSSoby Mathew */ 1519f3ee61cSSoby Mathew push {r0, r1} 1523bdf0e5dSYatharth Kochar and r3, r0, #SCR_NS_BIT /* flags */ 1533bdf0e5dSYatharth Kochar 1543bdf0e5dSYatharth Kochar /* Switch to Secure Mode*/ 1553bdf0e5dSYatharth Kochar bic r0, #SCR_NS_BIT 1563bdf0e5dSYatharth Kochar stcopr r0, SCR 1573bdf0e5dSYatharth Kochar isb 1583bdf0e5dSYatharth Kochar ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 1593bdf0e5dSYatharth Kochar /* Check whether an SMC64 is issued */ 1603bdf0e5dSYatharth Kochar tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 1613bdf0e5dSYatharth Kochar beq 1f /* SMC32 is detected */ 1623bdf0e5dSYatharth Kochar mov r0, #SMC_UNK 1633bdf0e5dSYatharth Kochar str r0, [r2, #SMC_CTX_GPREG_R0] 1643bdf0e5dSYatharth Kochar mov r0, r2 1653bdf0e5dSYatharth Kochar b 2f /* Skip handling the SMC */ 1663bdf0e5dSYatharth Kochar1: 1673bdf0e5dSYatharth Kochar mov r1, #0 /* cookie */ 1683bdf0e5dSYatharth Kochar bl handle_runtime_svc 1693bdf0e5dSYatharth Kochar2: 1703bdf0e5dSYatharth Kochar /* r0 points to smc context */ 1713bdf0e5dSYatharth Kochar 1723bdf0e5dSYatharth Kochar /* Restore SCR from stack */ 1739f3ee61cSSoby Mathew pop {r1, r2} 1743bdf0e5dSYatharth Kochar stcopr r1, SCR 1753bdf0e5dSYatharth Kochar isb 1763bdf0e5dSYatharth Kochar 1773bdf0e5dSYatharth Kochar b sp_min_exit 1783bdf0e5dSYatharth Kocharendfunc handle_smc 1793bdf0e5dSYatharth Kochar 1803bdf0e5dSYatharth Kochar 181c11ba852SSoby Mathew/* 182c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN. 183c11ba852SSoby Mathew */ 184c11ba852SSoby Mathewfunc sp_min_warm_entrypoint 1853bdf0e5dSYatharth Kochar /* 1863bdf0e5dSYatharth Kochar * On the warm boot path, most of the EL3 initialisations performed by 1873bdf0e5dSYatharth Kochar * 'el3_entrypoint_common' must be skipped: 1883bdf0e5dSYatharth Kochar * 1893bdf0e5dSYatharth Kochar * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 1903bdf0e5dSYatharth Kochar * programming the reset address do we need to set the CPU endianness. 1913bdf0e5dSYatharth Kochar * In other cases, we assume this has been taken care by the 1923bdf0e5dSYatharth Kochar * entrypoint code. 1933bdf0e5dSYatharth Kochar * 1943bdf0e5dSYatharth Kochar * - No need to determine the type of boot, we know it is a warm boot. 1953bdf0e5dSYatharth Kochar * 1963bdf0e5dSYatharth Kochar * - Do not try to distinguish between primary and secondary CPUs, this 1973bdf0e5dSYatharth Kochar * notion only exists for a cold boot. 1983bdf0e5dSYatharth Kochar * 1993bdf0e5dSYatharth Kochar * - No need to initialise the memory or the C runtime environment, 2003bdf0e5dSYatharth Kochar * it has been done once and for all on the cold boot path. 2013bdf0e5dSYatharth Kochar */ 2023bdf0e5dSYatharth Kochar el3_entrypoint_common \ 2033bdf0e5dSYatharth Kochar _set_endian=PROGRAMMABLE_RESET_ADDRESS \ 2043bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 2053bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 2063bdf0e5dSYatharth Kochar _init_memory=0 \ 2073bdf0e5dSYatharth Kochar _init_c_runtime=0 \ 2083bdf0e5dSYatharth Kochar _exception_vectors=sp_min_vector_table 209c11ba852SSoby Mathew 21025a93f7cSJeenu Viswambharan /* 21125a93f7cSJeenu Viswambharan * We're about to enable MMU and participate in PSCI state coordination. 21225a93f7cSJeenu Viswambharan * 21325a93f7cSJeenu Viswambharan * The PSCI implementation invokes platform routines that enable CPUs to 21425a93f7cSJeenu Viswambharan * participate in coherency. On a system where CPUs are not 215bcc3c49cSSoby Mathew * cache-coherent without appropriate platform specific programming, 216bcc3c49cSSoby Mathew * having caches enabled until such time might lead to coherency issues 217bcc3c49cSSoby Mathew * (resulting from stale data getting speculatively fetched, among 218bcc3c49cSSoby Mathew * others). Therefore we keep data caches disabled even after enabling 219bcc3c49cSSoby Mathew * the MMU for such platforms. 22025a93f7cSJeenu Viswambharan * 221bcc3c49cSSoby Mathew * On systems with hardware-assisted coherency, or on single cluster 222bcc3c49cSSoby Mathew * platforms, such platform specific programming is not required to 223bcc3c49cSSoby Mathew * enter coherency (as CPUs already are); and there's no reason to have 224bcc3c49cSSoby Mathew * caches disabled either. 225c11ba852SSoby Mathew */ 226c11ba852SSoby Mathew mov r0, #DISABLE_DCACHE 227c11ba852SSoby Mathew bl bl32_plat_enable_mmu 228c11ba852SSoby Mathew 229bcc3c49cSSoby Mathew#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 230bcc3c49cSSoby Mathew ldcopr r0, SCTLR 231bcc3c49cSSoby Mathew orr r0, r0, #SCTLR_C_BIT 232bcc3c49cSSoby Mathew stcopr r0, SCTLR 233bcc3c49cSSoby Mathew isb 234bcc3c49cSSoby Mathew#endif 235bcc3c49cSSoby Mathew 236c11ba852SSoby Mathew bl sp_min_warm_boot 237c11ba852SSoby Mathew 238c11ba852SSoby Mathew /* Program the registers in cpu_context and exit monitor mode */ 239c11ba852SSoby Mathew mov r0, #NON_SECURE 240c11ba852SSoby Mathew bl cm_get_context 241c11ba852SSoby Mathew 242c11ba852SSoby Mathew /* Restore the SCR */ 243c11ba852SSoby Mathew ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] 244c11ba852SSoby Mathew stcopr r2, SCR 245c11ba852SSoby Mathew isb 246c11ba852SSoby Mathew 247c11ba852SSoby Mathew /* Restore the SCTLR */ 248c11ba852SSoby Mathew ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR] 249c11ba852SSoby Mathew stcopr r2, SCTLR 250c11ba852SSoby Mathew 251c11ba852SSoby Mathew bl smc_get_next_ctx 252c11ba852SSoby Mathew 253c11ba852SSoby Mathew /* The other cpu_context registers have been copied to smc context */ 254c11ba852SSoby Mathew b sp_min_exit 255c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint 256c11ba852SSoby Mathew 257c11ba852SSoby Mathew/* 258c11ba852SSoby Mathew * The function to restore the registers from SMC context and return 259c11ba852SSoby Mathew * to the mode restored to SPSR. 260c11ba852SSoby Mathew * 261c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from. 262c11ba852SSoby Mathew */ 263c11ba852SSoby Mathewfunc sp_min_exit 264c11ba852SSoby Mathew smcc_restore_gp_mode_regs 265c11ba852SSoby Mathew eret 266c11ba852SSoby Mathewendfunc sp_min_exit 267