1/* 2 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <bl_common.h> 10#include <context.h> 11#include <el3_common_macros.S> 12#include <runtime_svc.h> 13#include <smcc_helpers.h> 14#include <smcc_macros.S> 15#include <xlat_tables_defs.h> 16 17 .globl sp_min_vector_table 18 .globl sp_min_entrypoint 19 .globl sp_min_warm_entrypoint 20 .globl sp_min_handle_smc 21 .globl sp_min_handle_fiq 22 23 .macro route_fiq_to_sp_min reg 24 /* ----------------------------------------------------- 25 * FIQs are secure interrupts trapped by Monitor and non 26 * secure is not allowed to mask the FIQs. 27 * ----------------------------------------------------- 28 */ 29 ldcopr \reg, SCR 30 orr \reg, \reg, #SCR_FIQ_BIT 31 bic \reg, \reg, #SCR_FW_BIT 32 stcopr \reg, SCR 33 .endm 34 35 .macro clrex_on_monitor_entry 36#if (ARM_ARCH_MAJOR == 7) 37 /* 38 * ARMv7 architectures need to clear the exclusive access when 39 * entering Monitor mode. 40 */ 41 clrex 42#endif 43 .endm 44 45vector_base sp_min_vector_table 46 b sp_min_entrypoint 47 b plat_panic_handler /* Undef */ 48 b sp_min_handle_smc /* Syscall */ 49 b plat_panic_handler /* Prefetch abort */ 50 b plat_panic_handler /* Data abort */ 51 b plat_panic_handler /* Reserved */ 52 b plat_panic_handler /* IRQ */ 53 b sp_min_handle_fiq /* FIQ */ 54 55 56/* 57 * The Cold boot/Reset entrypoint for SP_MIN 58 */ 59func sp_min_entrypoint 60#if !RESET_TO_SP_MIN 61 /* --------------------------------------------------------------- 62 * Preceding bootloader has populated r0 with a pointer to a 63 * 'bl_params_t' structure & r1 with a pointer to platform 64 * specific structure 65 * --------------------------------------------------------------- 66 */ 67 mov r11, r0 68 mov r12, r1 69 70 /* --------------------------------------------------------------------- 71 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 72 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 73 * and primary/secondary CPU logic should not be executed in this case. 74 * 75 * Also, assume that the previous bootloader has already initialised the 76 * SCTLR, including the CPU endianness, and has initialised the memory. 77 * --------------------------------------------------------------------- 78 */ 79 el3_entrypoint_common \ 80 _init_sctlr=0 \ 81 _warm_boot_mailbox=0 \ 82 _secondary_cold_boot=0 \ 83 _init_memory=0 \ 84 _init_c_runtime=1 \ 85 _exception_vectors=sp_min_vector_table 86 87 /* --------------------------------------------------------------------- 88 * Relay the previous bootloader's arguments to the platform layer 89 * --------------------------------------------------------------------- 90 */ 91 mov r0, r11 92 mov r1, r12 93#else 94 /* --------------------------------------------------------------------- 95 * For RESET_TO_SP_MIN systems which have a programmable reset address, 96 * sp_min_entrypoint() is executed only on the cold boot path so we can 97 * skip the warm boot mailbox mechanism. 98 * --------------------------------------------------------------------- 99 */ 100 el3_entrypoint_common \ 101 _init_sctlr=1 \ 102 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 103 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 104 _init_memory=1 \ 105 _init_c_runtime=1 \ 106 _exception_vectors=sp_min_vector_table 107 108 /* --------------------------------------------------------------------- 109 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 110 * to run so there's no argument to relay from a previous bootloader. 111 * Zero the arguments passed to the platform layer to reflect that. 112 * --------------------------------------------------------------------- 113 */ 114 mov r0, #0 115 mov r1, #0 116#endif /* RESET_TO_SP_MIN */ 117 118#if SP_MIN_WITH_SECURE_FIQ 119 route_fiq_to_sp_min r4 120#endif 121 122 bl sp_min_early_platform_setup 123 bl sp_min_plat_arch_setup 124 125 /* Jump to the main function */ 126 bl sp_min_main 127 128 /* ------------------------------------------------------------- 129 * Clean the .data & .bss sections to main memory. This ensures 130 * that any global data which was initialised by the primary CPU 131 * is visible to secondary CPUs before they enable their data 132 * caches and participate in coherency. 133 * ------------------------------------------------------------- 134 */ 135 ldr r0, =__DATA_START__ 136 ldr r1, =__DATA_END__ 137 sub r1, r1, r0 138 bl clean_dcache_range 139 140 ldr r0, =__BSS_START__ 141 ldr r1, =__BSS_END__ 142 sub r1, r1, r0 143 bl clean_dcache_range 144 145 bl smc_get_next_ctx 146 147 /* r0 points to `smc_ctx_t` */ 148 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 149 b sp_min_exit 150endfunc sp_min_entrypoint 151 152 153/* 154 * SMC handling function for SP_MIN. 155 */ 156func sp_min_handle_smc 157 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 158 str lr, [sp, #SMC_CTX_LR_MON] 159 160 smcc_save_gp_mode_regs 161 162 clrex_on_monitor_entry 163 164 /* 165 * `sp` still points to `smc_ctx_t`. Save it to a register 166 * and restore the C runtime stack pointer to `sp`. 167 */ 168 mov r2, sp /* handle */ 169 ldr sp, [r2, #SMC_CTX_SP_MON] 170 171 ldr r0, [r2, #SMC_CTX_SCR] 172 and r3, r0, #SCR_NS_BIT /* flags */ 173 174 /* Switch to Secure Mode*/ 175 bic r0, #SCR_NS_BIT 176 stcopr r0, SCR 177 isb 178 179 /* 180 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. 181 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset 182 * and so set to 1 as ARM has deprecated use of PMCR.LC=0. 183 */ 184 ldcopr r0, PMCR 185 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) 186 stcopr r0, PMCR 187 188 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 189 /* Check whether an SMC64 is issued */ 190 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 191 beq 1f 192 /* SMC32 is not detected. Return error back to caller */ 193 mov r0, #SMC_UNK 194 str r0, [r2, #SMC_CTX_GPREG_R0] 195 mov r0, r2 196 b sp_min_exit 1971: 198 /* SMC32 is detected */ 199 mov r1, #0 /* cookie */ 200 bl handle_runtime_svc 201 202 /* `r0` points to `smc_ctx_t` */ 203 b sp_min_exit 204endfunc sp_min_handle_smc 205 206/* 207 * Secure Interrupts handling function for SP_MIN. 208 */ 209func sp_min_handle_fiq 210#if !SP_MIN_WITH_SECURE_FIQ 211 b plat_panic_handler 212#else 213 /* FIQ has a +4 offset for lr compared to preferred return address */ 214 sub lr, lr, #4 215 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 216 str lr, [sp, #SMC_CTX_LR_MON] 217 218 smcc_save_gp_mode_regs 219 220 clrex_on_monitor_entry 221 222 /* load run-time stack */ 223 mov r2, sp 224 ldr sp, [r2, #SMC_CTX_SP_MON] 225 226 /* Switch to Secure Mode */ 227 ldr r0, [r2, #SMC_CTX_SCR] 228 bic r0, #SCR_NS_BIT 229 stcopr r0, SCR 230 isb 231 232 /* 233 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. 234 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset 235 * and so set to 1 as ARM has deprecated use of PMCR.LC=0. 236 */ 237 ldcopr r0, PMCR 238 orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) 239 stcopr r0, PMCR 240 241 push {r2, r3} 242 bl sp_min_fiq 243 pop {r0, r3} 244 245 b sp_min_exit 246#endif 247endfunc sp_min_handle_fiq 248 249/* 250 * The Warm boot entrypoint for SP_MIN. 251 */ 252func sp_min_warm_entrypoint 253 /* 254 * On the warm boot path, most of the EL3 initialisations performed by 255 * 'el3_entrypoint_common' must be skipped: 256 * 257 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 258 * programming the reset address do we need to initialied the SCTLR. 259 * In other cases, we assume this has been taken care by the 260 * entrypoint code. 261 * 262 * - No need to determine the type of boot, we know it is a warm boot. 263 * 264 * - Do not try to distinguish between primary and secondary CPUs, this 265 * notion only exists for a cold boot. 266 * 267 * - No need to initialise the memory or the C runtime environment, 268 * it has been done once and for all on the cold boot path. 269 */ 270 el3_entrypoint_common \ 271 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 272 _warm_boot_mailbox=0 \ 273 _secondary_cold_boot=0 \ 274 _init_memory=0 \ 275 _init_c_runtime=0 \ 276 _exception_vectors=sp_min_vector_table 277 278 /* 279 * We're about to enable MMU and participate in PSCI state coordination. 280 * 281 * The PSCI implementation invokes platform routines that enable CPUs to 282 * participate in coherency. On a system where CPUs are not 283 * cache-coherent without appropriate platform specific programming, 284 * having caches enabled until such time might lead to coherency issues 285 * (resulting from stale data getting speculatively fetched, among 286 * others). Therefore we keep data caches disabled even after enabling 287 * the MMU for such platforms. 288 * 289 * On systems with hardware-assisted coherency, or on single cluster 290 * platforms, such platform specific programming is not required to 291 * enter coherency (as CPUs already are); and there's no reason to have 292 * caches disabled either. 293 */ 294 mov r0, #DISABLE_DCACHE 295 bl bl32_plat_enable_mmu 296 297#if SP_MIN_WITH_SECURE_FIQ 298 route_fiq_to_sp_min r0 299#endif 300 301#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 302 ldcopr r0, SCTLR 303 orr r0, r0, #SCTLR_C_BIT 304 stcopr r0, SCTLR 305 isb 306#endif 307 308 bl sp_min_warm_boot 309 bl smc_get_next_ctx 310 /* r0 points to `smc_ctx_t` */ 311 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 312 b sp_min_exit 313endfunc sp_min_warm_entrypoint 314 315/* 316 * The function to restore the registers from SMC context and return 317 * to the mode restored to SPSR. 318 * 319 * Arguments : r0 must point to the SMC context to restore from. 320 */ 321func sp_min_exit 322 monitor_exit 323endfunc sp_min_exit 324