1/* 2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <bl_common.h> 10#include <context.h> 11#include <el3_common_macros.S> 12#include <runtime_svc.h> 13#include <smcc_helpers.h> 14#include <smcc_macros.S> 15#include <xlat_tables_defs.h> 16 17 .globl sp_min_vector_table 18 .globl sp_min_entrypoint 19 .globl sp_min_warm_entrypoint 20 21 22vector_base sp_min_vector_table 23 b sp_min_entrypoint 24 b plat_panic_handler /* Undef */ 25 b handle_smc /* Syscall */ 26 b plat_panic_handler /* Prefetch abort */ 27 b plat_panic_handler /* Data abort */ 28 b plat_panic_handler /* Reserved */ 29 b plat_panic_handler /* IRQ */ 30 b plat_panic_handler /* FIQ */ 31 32 33/* 34 * The Cold boot/Reset entrypoint for SP_MIN 35 */ 36func sp_min_entrypoint 37#if !RESET_TO_SP_MIN 38 /* --------------------------------------------------------------- 39 * Preceding bootloader has populated r0 with a pointer to a 40 * 'bl_params_t' structure & r1 with a pointer to platform 41 * specific structure 42 * --------------------------------------------------------------- 43 */ 44 mov r11, r0 45 mov r12, r1 46 47 /* --------------------------------------------------------------------- 48 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 49 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 50 * and primary/secondary CPU logic should not be executed in this case. 51 * 52 * Also, assume that the previous bootloader has already set up the CPU 53 * endianness and has initialised the memory. 54 * --------------------------------------------------------------------- 55 */ 56 el3_entrypoint_common \ 57 _set_endian=0 \ 58 _warm_boot_mailbox=0 \ 59 _secondary_cold_boot=0 \ 60 _init_memory=0 \ 61 _init_c_runtime=1 \ 62 _exception_vectors=sp_min_vector_table 63 64 /* --------------------------------------------------------------------- 65 * Relay the previous bootloader's arguments to the platform layer 66 * --------------------------------------------------------------------- 67 */ 68 mov r0, r11 69 mov r1, r12 70#else 71 /* --------------------------------------------------------------------- 72 * For RESET_TO_SP_MIN systems which have a programmable reset address, 73 * sp_min_entrypoint() is executed only on the cold boot path so we can 74 * skip the warm boot mailbox mechanism. 75 * --------------------------------------------------------------------- 76 */ 77 el3_entrypoint_common \ 78 _set_endian=1 \ 79 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 80 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 81 _init_memory=1 \ 82 _init_c_runtime=1 \ 83 _exception_vectors=sp_min_vector_table 84 85 /* --------------------------------------------------------------------- 86 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 87 * to run so there's no argument to relay from a previous bootloader. 88 * Zero the arguments passed to the platform layer to reflect that. 89 * --------------------------------------------------------------------- 90 */ 91 mov r0, #0 92 mov r1, #0 93#endif /* RESET_TO_SP_MIN */ 94 95 bl sp_min_early_platform_setup 96 bl sp_min_plat_arch_setup 97 98 /* Jump to the main function */ 99 bl sp_min_main 100 101 /* ------------------------------------------------------------- 102 * Clean the .data & .bss sections to main memory. This ensures 103 * that any global data which was initialised by the primary CPU 104 * is visible to secondary CPUs before they enable their data 105 * caches and participate in coherency. 106 * ------------------------------------------------------------- 107 */ 108 ldr r0, =__DATA_START__ 109 ldr r1, =__DATA_END__ 110 sub r1, r1, r0 111 bl clean_dcache_range 112 113 ldr r0, =__BSS_START__ 114 ldr r1, =__BSS_END__ 115 sub r1, r1, r0 116 bl clean_dcache_range 117 118 /* Program the registers in cpu_context and exit monitor mode */ 119 mov r0, #NON_SECURE 120 bl cm_get_context 121 122 /* Restore the SCR */ 123 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] 124 stcopr r2, SCR 125 isb 126 127 /* Restore the SCTLR */ 128 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR] 129 stcopr r2, SCTLR 130 131 bl smc_get_next_ctx 132 /* The other cpu_context registers have been copied to smc context */ 133 b sp_min_exit 134endfunc sp_min_entrypoint 135 136 137/* 138 * SMC handling function for SP_MIN. 139 */ 140func handle_smc 141 smcc_save_gp_mode_regs 142 143 /* r0 points to smc_context */ 144 mov r2, r0 /* handle */ 145 ldcopr r0, SCR 146 147 /* 148 * Save SCR in stack. r1 is pushed to meet the 8 byte 149 * stack alignment requirement. 150 */ 151 push {r0, r1} 152 and r3, r0, #SCR_NS_BIT /* flags */ 153 154 /* Switch to Secure Mode*/ 155 bic r0, #SCR_NS_BIT 156 stcopr r0, SCR 157 isb 158 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 159 /* Check whether an SMC64 is issued */ 160 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 161 beq 1f /* SMC32 is detected */ 162 mov r0, #SMC_UNK 163 str r0, [r2, #SMC_CTX_GPREG_R0] 164 mov r0, r2 165 b 2f /* Skip handling the SMC */ 1661: 167 mov r1, #0 /* cookie */ 168 bl handle_runtime_svc 1692: 170 /* r0 points to smc context */ 171 172 /* Restore SCR from stack */ 173 pop {r1, r2} 174 stcopr r1, SCR 175 isb 176 177 b sp_min_exit 178endfunc handle_smc 179 180 181/* 182 * The Warm boot entrypoint for SP_MIN. 183 */ 184func sp_min_warm_entrypoint 185 /* 186 * On the warm boot path, most of the EL3 initialisations performed by 187 * 'el3_entrypoint_common' must be skipped: 188 * 189 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 190 * programming the reset address do we need to set the CPU endianness. 191 * In other cases, we assume this has been taken care by the 192 * entrypoint code. 193 * 194 * - No need to determine the type of boot, we know it is a warm boot. 195 * 196 * - Do not try to distinguish between primary and secondary CPUs, this 197 * notion only exists for a cold boot. 198 * 199 * - No need to initialise the memory or the C runtime environment, 200 * it has been done once and for all on the cold boot path. 201 */ 202 el3_entrypoint_common \ 203 _set_endian=PROGRAMMABLE_RESET_ADDRESS \ 204 _warm_boot_mailbox=0 \ 205 _secondary_cold_boot=0 \ 206 _init_memory=0 \ 207 _init_c_runtime=0 \ 208 _exception_vectors=sp_min_vector_table 209 210 /* 211 * We're about to enable MMU and participate in PSCI state coordination. 212 * 213 * The PSCI implementation invokes platform routines that enable CPUs to 214 * participate in coherency. On a system where CPUs are not 215 * cache-coherent without appropriate platform specific programming, 216 * having caches enabled until such time might lead to coherency issues 217 * (resulting from stale data getting speculatively fetched, among 218 * others). Therefore we keep data caches disabled even after enabling 219 * the MMU for such platforms. 220 * 221 * On systems with hardware-assisted coherency, or on single cluster 222 * platforms, such platform specific programming is not required to 223 * enter coherency (as CPUs already are); and there's no reason to have 224 * caches disabled either. 225 */ 226 mov r0, #DISABLE_DCACHE 227 bl bl32_plat_enable_mmu 228 229#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 230 ldcopr r0, SCTLR 231 orr r0, r0, #SCTLR_C_BIT 232 stcopr r0, SCTLR 233 isb 234#endif 235 236 bl sp_min_warm_boot 237 238 /* Program the registers in cpu_context and exit monitor mode */ 239 mov r0, #NON_SECURE 240 bl cm_get_context 241 242 /* Restore the SCR */ 243 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] 244 stcopr r2, SCR 245 isb 246 247 /* Restore the SCTLR */ 248 ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR] 249 stcopr r2, SCTLR 250 251 bl smc_get_next_ctx 252 253 /* The other cpu_context registers have been copied to smc context */ 254 b sp_min_exit 255endfunc sp_min_warm_entrypoint 256 257/* 258 * The function to restore the registers from SMC context and return 259 * to the mode restored to SPSR. 260 * 261 * Arguments : r0 must point to the SMC context to restore from. 262 */ 263func sp_min_exit 264 smcc_restore_gp_mode_regs 265 eret 266endfunc sp_min_exit 267