1/* 2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <common/bl_common.h> 10#include <common/runtime_svc.h> 11#include <context.h> 12#include <el3_common_macros.S> 13#include <lib/xlat_tables/xlat_tables_defs.h> 14#include <smccc_helpers.h> 15#include <smccc_macros.S> 16 17 .globl sp_min_vector_table 18 .globl sp_min_entrypoint 19 .globl sp_min_warm_entrypoint 20 .globl sp_min_handle_smc 21 .globl sp_min_handle_fiq 22 23 .macro route_fiq_to_sp_min reg 24 /* ----------------------------------------------------- 25 * FIQs are secure interrupts trapped by Monitor and non 26 * secure is not allowed to mask the FIQs. 27 * ----------------------------------------------------- 28 */ 29 ldcopr \reg, SCR 30 orr \reg, \reg, #SCR_FIQ_BIT 31 bic \reg, \reg, #SCR_FW_BIT 32 stcopr \reg, SCR 33 .endm 34 35 .macro clrex_on_monitor_entry 36#if (ARM_ARCH_MAJOR == 7) 37 /* 38 * ARMv7 architectures need to clear the exclusive access when 39 * entering Monitor mode. 40 */ 41 clrex 42#endif 43 .endm 44 45vector_base sp_min_vector_table 46 b sp_min_entrypoint 47 b plat_panic_handler /* Undef */ 48 b sp_min_handle_smc /* Syscall */ 49 b plat_panic_handler /* Prefetch abort */ 50 b plat_panic_handler /* Data abort */ 51 b plat_panic_handler /* Reserved */ 52 b plat_panic_handler /* IRQ */ 53 b sp_min_handle_fiq /* FIQ */ 54 55 56/* 57 * The Cold boot/Reset entrypoint for SP_MIN 58 */ 59func sp_min_entrypoint 60#if !RESET_TO_SP_MIN 61 /* --------------------------------------------------------------- 62 * Preceding bootloader has populated r0 with a pointer to a 63 * 'bl_params_t' structure & r1 with a pointer to platform 64 * specific structure 65 * --------------------------------------------------------------- 66 */ 67 mov r9, r0 68 mov r10, r1 69 mov r11, r2 70 mov r12, r3 71 72 /* --------------------------------------------------------------------- 73 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 74 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 75 * and primary/secondary CPU logic should not be executed in this case. 76 * 77 * Also, assume that the previous bootloader has already initialised the 78 * SCTLR, including the CPU endianness, and has initialised the memory. 79 * --------------------------------------------------------------------- 80 */ 81 el3_entrypoint_common \ 82 _init_sctlr=0 \ 83 _warm_boot_mailbox=0 \ 84 _secondary_cold_boot=0 \ 85 _init_memory=0 \ 86 _init_c_runtime=1 \ 87 _exception_vectors=sp_min_vector_table 88 89 /* --------------------------------------------------------------------- 90 * Relay the previous bootloader's arguments to the platform layer 91 * --------------------------------------------------------------------- 92 */ 93#else 94 /* --------------------------------------------------------------------- 95 * For RESET_TO_SP_MIN systems which have a programmable reset address, 96 * sp_min_entrypoint() is executed only on the cold boot path so we can 97 * skip the warm boot mailbox mechanism. 98 * --------------------------------------------------------------------- 99 */ 100 el3_entrypoint_common \ 101 _init_sctlr=1 \ 102 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 103 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 104 _init_memory=1 \ 105 _init_c_runtime=1 \ 106 _exception_vectors=sp_min_vector_table 107 108 /* --------------------------------------------------------------------- 109 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 110 * to run so there's no argument to relay from a previous bootloader. 111 * Zero the arguments passed to the platform layer to reflect that. 112 * --------------------------------------------------------------------- 113 */ 114 mov r9, #0 115 mov r10, #0 116 mov r11, #0 117 mov r12, #0 118 119#endif /* RESET_TO_SP_MIN */ 120 121#if SP_MIN_WITH_SECURE_FIQ 122 route_fiq_to_sp_min r4 123#endif 124 125 mov r0, r9 126 mov r1, r10 127 mov r2, r11 128 mov r3, r12 129 bl sp_min_early_platform_setup2 130 bl sp_min_plat_arch_setup 131 132 /* Jump to the main function */ 133 bl sp_min_main 134 135 /* ------------------------------------------------------------- 136 * Clean the .data & .bss sections to main memory. This ensures 137 * that any global data which was initialised by the primary CPU 138 * is visible to secondary CPUs before they enable their data 139 * caches and participate in coherency. 140 * ------------------------------------------------------------- 141 */ 142 ldr r0, =__DATA_START__ 143 ldr r1, =__DATA_END__ 144 sub r1, r1, r0 145 bl clean_dcache_range 146 147 ldr r0, =__BSS_START__ 148 ldr r1, =__BSS_END__ 149 sub r1, r1, r0 150 bl clean_dcache_range 151 152 bl smc_get_next_ctx 153 154 /* r0 points to `smc_ctx_t` */ 155 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 156 b sp_min_exit 157endfunc sp_min_entrypoint 158 159 160/* 161 * SMC handling function for SP_MIN. 162 */ 163func sp_min_handle_smc 164 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 165 str lr, [sp, #SMC_CTX_LR_MON] 166 167 smccc_save_gp_mode_regs 168 169 clrex_on_monitor_entry 170 171 /* 172 * `sp` still points to `smc_ctx_t`. Save it to a register 173 * and restore the C runtime stack pointer to `sp`. 174 */ 175 mov r2, sp /* handle */ 176 ldr sp, [r2, #SMC_CTX_SP_MON] 177 178 ldr r0, [r2, #SMC_CTX_SCR] 179 and r3, r0, #SCR_NS_BIT /* flags */ 180 181 /* Switch to Secure Mode*/ 182 bic r0, #SCR_NS_BIT 183 stcopr r0, SCR 184 isb 185 186 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 187 /* Check whether an SMC64 is issued */ 188 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 189 beq 1f 190 /* SMC32 is not detected. Return error back to caller */ 191 mov r0, #SMC_UNK 192 str r0, [r2, #SMC_CTX_GPREG_R0] 193 mov r0, r2 194 b sp_min_exit 1951: 196 /* SMC32 is detected */ 197 mov r1, #0 /* cookie */ 198 bl handle_runtime_svc 199 200 /* `r0` points to `smc_ctx_t` */ 201 b sp_min_exit 202endfunc sp_min_handle_smc 203 204/* 205 * Secure Interrupts handling function for SP_MIN. 206 */ 207func sp_min_handle_fiq 208#if !SP_MIN_WITH_SECURE_FIQ 209 b plat_panic_handler 210#else 211 /* FIQ has a +4 offset for lr compared to preferred return address */ 212 sub lr, lr, #4 213 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 214 str lr, [sp, #SMC_CTX_LR_MON] 215 216 smccc_save_gp_mode_regs 217 218 clrex_on_monitor_entry 219 220 /* load run-time stack */ 221 mov r2, sp 222 ldr sp, [r2, #SMC_CTX_SP_MON] 223 224 /* Switch to Secure Mode */ 225 ldr r0, [r2, #SMC_CTX_SCR] 226 bic r0, #SCR_NS_BIT 227 stcopr r0, SCR 228 isb 229 230 push {r2, r3} 231 bl sp_min_fiq 232 pop {r0, r3} 233 234 b sp_min_exit 235#endif 236endfunc sp_min_handle_fiq 237 238/* 239 * The Warm boot entrypoint for SP_MIN. 240 */ 241func sp_min_warm_entrypoint 242 /* 243 * On the warm boot path, most of the EL3 initialisations performed by 244 * 'el3_entrypoint_common' must be skipped: 245 * 246 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 247 * programming the reset address do we need to initialied the SCTLR. 248 * In other cases, we assume this has been taken care by the 249 * entrypoint code. 250 * 251 * - No need to determine the type of boot, we know it is a warm boot. 252 * 253 * - Do not try to distinguish between primary and secondary CPUs, this 254 * notion only exists for a cold boot. 255 * 256 * - No need to initialise the memory or the C runtime environment, 257 * it has been done once and for all on the cold boot path. 258 */ 259 el3_entrypoint_common \ 260 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 261 _warm_boot_mailbox=0 \ 262 _secondary_cold_boot=0 \ 263 _init_memory=0 \ 264 _init_c_runtime=0 \ 265 _exception_vectors=sp_min_vector_table 266 267 /* 268 * We're about to enable MMU and participate in PSCI state coordination. 269 * 270 * The PSCI implementation invokes platform routines that enable CPUs to 271 * participate in coherency. On a system where CPUs are not 272 * cache-coherent without appropriate platform specific programming, 273 * having caches enabled until such time might lead to coherency issues 274 * (resulting from stale data getting speculatively fetched, among 275 * others). Therefore we keep data caches disabled even after enabling 276 * the MMU for such platforms. 277 * 278 * On systems with hardware-assisted coherency, or on single cluster 279 * platforms, such platform specific programming is not required to 280 * enter coherency (as CPUs already are); and there's no reason to have 281 * caches disabled either. 282 */ 283#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 284 mov r0, #0 285#else 286 mov r0, #DISABLE_DCACHE 287#endif 288 bl bl32_plat_enable_mmu 289 290#if SP_MIN_WITH_SECURE_FIQ 291 route_fiq_to_sp_min r0 292#endif 293 294 bl sp_min_warm_boot 295 bl smc_get_next_ctx 296 /* r0 points to `smc_ctx_t` */ 297 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 298 b sp_min_exit 299endfunc sp_min_warm_entrypoint 300 301/* 302 * The function to restore the registers from SMC context and return 303 * to the mode restored to SPSR. 304 * 305 * Arguments : r0 must point to the SMC context to restore from. 306 */ 307func sp_min_exit 308 monitor_exit 309endfunc sp_min_exit 310