1/* 2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <bl_common.h> 10#include <context.h> 11#include <el3_common_macros.S> 12#include <runtime_svc.h> 13#include <smcc_helpers.h> 14#include <smcc_macros.S> 15#include <xlat_tables_defs.h> 16 17 .globl sp_min_vector_table 18 .globl sp_min_entrypoint 19 .globl sp_min_warm_entrypoint 20 21 .macro route_fiq_to_sp_min reg 22 /* ----------------------------------------------------- 23 * FIQs are secure interrupts trapped by Monitor and non 24 * secure is not allowed to mask the FIQs. 25 * ----------------------------------------------------- 26 */ 27 ldcopr \reg, SCR 28 orr \reg, \reg, #SCR_FIQ_BIT 29 bic \reg, \reg, #SCR_FW_BIT 30 stcopr \reg, SCR 31 .endm 32 33vector_base sp_min_vector_table 34 b sp_min_entrypoint 35 b plat_panic_handler /* Undef */ 36 b handle_smc /* Syscall */ 37 b plat_panic_handler /* Prefetch abort */ 38 b plat_panic_handler /* Data abort */ 39 b plat_panic_handler /* Reserved */ 40 b plat_panic_handler /* IRQ */ 41 b handle_fiq /* FIQ */ 42 43 44/* 45 * The Cold boot/Reset entrypoint for SP_MIN 46 */ 47func sp_min_entrypoint 48#if !RESET_TO_SP_MIN 49 /* --------------------------------------------------------------- 50 * Preceding bootloader has populated r0 with a pointer to a 51 * 'bl_params_t' structure & r1 with a pointer to platform 52 * specific structure 53 * --------------------------------------------------------------- 54 */ 55 mov r11, r0 56 mov r12, r1 57 58 /* --------------------------------------------------------------------- 59 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 60 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 61 * and primary/secondary CPU logic should not be executed in this case. 62 * 63 * Also, assume that the previous bootloader has already initialised the 64 * SCTLR, including the CPU endianness, and has initialised the memory. 65 * --------------------------------------------------------------------- 66 */ 67 el3_entrypoint_common \ 68 _init_sctlr=0 \ 69 _warm_boot_mailbox=0 \ 70 _secondary_cold_boot=0 \ 71 _init_memory=0 \ 72 _init_c_runtime=1 \ 73 _exception_vectors=sp_min_vector_table 74 75 /* --------------------------------------------------------------------- 76 * Relay the previous bootloader's arguments to the platform layer 77 * --------------------------------------------------------------------- 78 */ 79 mov r0, r11 80 mov r1, r12 81#else 82 /* --------------------------------------------------------------------- 83 * For RESET_TO_SP_MIN systems which have a programmable reset address, 84 * sp_min_entrypoint() is executed only on the cold boot path so we can 85 * skip the warm boot mailbox mechanism. 86 * --------------------------------------------------------------------- 87 */ 88 el3_entrypoint_common \ 89 _init_sctlr=1 \ 90 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 91 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 92 _init_memory=1 \ 93 _init_c_runtime=1 \ 94 _exception_vectors=sp_min_vector_table 95 96 /* --------------------------------------------------------------------- 97 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 98 * to run so there's no argument to relay from a previous bootloader. 99 * Zero the arguments passed to the platform layer to reflect that. 100 * --------------------------------------------------------------------- 101 */ 102 mov r0, #0 103 mov r1, #0 104#endif /* RESET_TO_SP_MIN */ 105 106#if SP_MIN_WITH_SECURE_FIQ 107 route_fiq_to_sp_min r4 108#endif 109 110 bl sp_min_early_platform_setup 111 bl sp_min_plat_arch_setup 112 113 /* Jump to the main function */ 114 bl sp_min_main 115 116 /* ------------------------------------------------------------- 117 * Clean the .data & .bss sections to main memory. This ensures 118 * that any global data which was initialised by the primary CPU 119 * is visible to secondary CPUs before they enable their data 120 * caches and participate in coherency. 121 * ------------------------------------------------------------- 122 */ 123 ldr r0, =__DATA_START__ 124 ldr r1, =__DATA_END__ 125 sub r1, r1, r0 126 bl clean_dcache_range 127 128 ldr r0, =__BSS_START__ 129 ldr r1, =__BSS_END__ 130 sub r1, r1, r0 131 bl clean_dcache_range 132 133 bl smc_get_next_ctx 134 135 /* r0 points to `smc_ctx_t` */ 136 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 137 b sp_min_exit 138endfunc sp_min_entrypoint 139 140 141/* 142 * SMC handling function for SP_MIN. 143 */ 144func handle_smc 145 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 146 str lr, [sp, #SMC_CTX_LR_MON] 147 148 smcc_save_gp_mode_regs 149 150 /* 151 * `sp` still points to `smc_ctx_t`. Save it to a register 152 * and restore the C runtime stack pointer to `sp`. 153 */ 154 mov r2, sp /* handle */ 155 ldr sp, [r2, #SMC_CTX_SP_MON] 156 157 ldr r0, [r2, #SMC_CTX_SCR] 158 and r3, r0, #SCR_NS_BIT /* flags */ 159 160 /* Switch to Secure Mode*/ 161 bic r0, #SCR_NS_BIT 162 stcopr r0, SCR 163 isb 164 165 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 166 /* Check whether an SMC64 is issued */ 167 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 168 beq 1f 169 /* SMC32 is not detected. Return error back to caller */ 170 mov r0, #SMC_UNK 171 str r0, [r2, #SMC_CTX_GPREG_R0] 172 mov r0, r2 173 b sp_min_exit 1741: 175 /* SMC32 is detected */ 176 mov r1, #0 /* cookie */ 177 bl handle_runtime_svc 178 179 /* `r0` points to `smc_ctx_t` */ 180 b sp_min_exit 181endfunc handle_smc 182 183/* 184 * Secure Interrupts handling function for SP_MIN. 185 */ 186func handle_fiq 187#if !SP_MIN_WITH_SECURE_FIQ 188 b plat_panic_handler 189#else 190 /* FIQ has a +4 offset for lr compared to preferred return address */ 191 sub lr, lr, #4 192 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 193 str lr, [sp, #SMC_CTX_LR_MON] 194 195 smcc_save_gp_mode_regs 196 197 /* 198 * AArch32 architectures need to clear the exclusive access when 199 * entering Monitor mode. 200 */ 201 clrex 202 203 /* load run-time stack */ 204 mov r2, sp 205 ldr sp, [r2, #SMC_CTX_SP_MON] 206 207 /* Switch to Secure Mode */ 208 ldr r0, [r2, #SMC_CTX_SCR] 209 bic r0, #SCR_NS_BIT 210 stcopr r0, SCR 211 isb 212 213 push {r2, r3} 214 bl sp_min_fiq 215 pop {r0, r3} 216 217 b sp_min_exit 218#endif 219endfunc handle_fiq 220 221/* 222 * The Warm boot entrypoint for SP_MIN. 223 */ 224func sp_min_warm_entrypoint 225 /* 226 * On the warm boot path, most of the EL3 initialisations performed by 227 * 'el3_entrypoint_common' must be skipped: 228 * 229 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 230 * programming the reset address do we need to initialied the SCTLR. 231 * In other cases, we assume this has been taken care by the 232 * entrypoint code. 233 * 234 * - No need to determine the type of boot, we know it is a warm boot. 235 * 236 * - Do not try to distinguish between primary and secondary CPUs, this 237 * notion only exists for a cold boot. 238 * 239 * - No need to initialise the memory or the C runtime environment, 240 * it has been done once and for all on the cold boot path. 241 */ 242 el3_entrypoint_common \ 243 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 244 _warm_boot_mailbox=0 \ 245 _secondary_cold_boot=0 \ 246 _init_memory=0 \ 247 _init_c_runtime=0 \ 248 _exception_vectors=sp_min_vector_table 249 250 /* 251 * We're about to enable MMU and participate in PSCI state coordination. 252 * 253 * The PSCI implementation invokes platform routines that enable CPUs to 254 * participate in coherency. On a system where CPUs are not 255 * cache-coherent without appropriate platform specific programming, 256 * having caches enabled until such time might lead to coherency issues 257 * (resulting from stale data getting speculatively fetched, among 258 * others). Therefore we keep data caches disabled even after enabling 259 * the MMU for such platforms. 260 * 261 * On systems with hardware-assisted coherency, or on single cluster 262 * platforms, such platform specific programming is not required to 263 * enter coherency (as CPUs already are); and there's no reason to have 264 * caches disabled either. 265 */ 266 mov r0, #DISABLE_DCACHE 267 bl bl32_plat_enable_mmu 268 269#if SP_MIN_WITH_SECURE_FIQ 270 route_fiq_to_sp_min r0 271#endif 272 273#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 274 ldcopr r0, SCTLR 275 orr r0, r0, #SCTLR_C_BIT 276 stcopr r0, SCTLR 277 isb 278#endif 279 280 bl sp_min_warm_boot 281 bl smc_get_next_ctx 282 /* r0 points to `smc_ctx_t` */ 283 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 284 b sp_min_exit 285endfunc sp_min_warm_entrypoint 286 287/* 288 * The function to restore the registers from SMC context and return 289 * to the mode restored to SPSR. 290 * 291 * Arguments : r0 must point to the SMC context to restore from. 292 */ 293func sp_min_exit 294 monitor_exit 295endfunc sp_min_exit 296