1/* 2 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <common/bl_common.h> 10#include <common/runtime_svc.h> 11#include <context.h> 12#include <el3_common_macros.S> 13#include <lib/el3_runtime/cpu_data.h> 14#include <lib/pmf/aarch32/pmf_asm_macros.S> 15#include <lib/runtime_instr.h> 16#include <lib/xlat_tables/xlat_tables_defs.h> 17#include <smccc_helpers.h> 18#include <smccc_macros.S> 19 20 .globl sp_min_vector_table 21 .globl sp_min_entrypoint 22 .globl sp_min_warm_entrypoint 23 .globl sp_min_handle_smc 24 .globl sp_min_handle_fiq 25 26#define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE)) 27 28 .macro route_fiq_to_sp_min reg 29 /* ----------------------------------------------------- 30 * FIQs are secure interrupts trapped by Monitor and non 31 * secure is not allowed to mask the FIQs. 32 * ----------------------------------------------------- 33 */ 34 ldcopr \reg, SCR 35 orr \reg, \reg, #SCR_FIQ_BIT 36 bic \reg, \reg, #SCR_FW_BIT 37 stcopr \reg, SCR 38 .endm 39 40 .macro clrex_on_monitor_entry 41#if (ARM_ARCH_MAJOR == 7) 42 /* 43 * ARMv7 architectures need to clear the exclusive access when 44 * entering Monitor mode. 45 */ 46 clrex 47#endif 48 .endm 49 50vector_base sp_min_vector_table 51 b sp_min_entrypoint 52 b plat_panic_handler /* Undef */ 53 b sp_min_handle_smc /* Syscall */ 54 b report_prefetch_abort /* Prefetch abort */ 55 b report_data_abort /* Data abort */ 56 b plat_panic_handler /* Reserved */ 57 b plat_panic_handler /* IRQ */ 58 b sp_min_handle_fiq /* FIQ */ 59 60 61/* 62 * The Cold boot/Reset entrypoint for SP_MIN 63 */ 64func sp_min_entrypoint 65 /* --------------------------------------------------------------- 66 * Stash the previous bootloader arguments r0 - r3 for later use. 67 * --------------------------------------------------------------- 68 */ 69 mov r9, r0 70 mov r10, r1 71 mov r11, r2 72 mov r12, r3 73 74#if !RESET_TO_SP_MIN 75 /* --------------------------------------------------------------------- 76 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 77 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 78 * and primary/secondary CPU logic should not be executed in this case. 79 * 80 * Also, assume that the previous bootloader has already initialised the 81 * SCTLR, including the CPU endianness, and has initialised the memory. 82 * --------------------------------------------------------------------- 83 */ 84 el3_entrypoint_common \ 85 _init_sctlr=0 \ 86 _warm_boot_mailbox=0 \ 87 _secondary_cold_boot=0 \ 88 _init_memory=0 \ 89 _init_c_runtime=1 \ 90 _exception_vectors=sp_min_vector_table \ 91 _pie_fixup_size=FIXUP_SIZE 92#else 93 /* --------------------------------------------------------------------- 94 * For RESET_TO_SP_MIN systems which have a programmable reset address, 95 * sp_min_entrypoint() is executed only on the cold boot path so we can 96 * skip the warm boot mailbox mechanism. 97 * --------------------------------------------------------------------- 98 */ 99 el3_entrypoint_common \ 100 _init_sctlr=1 \ 101 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 102 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 103 _init_memory=1 \ 104 _init_c_runtime=1 \ 105 _exception_vectors=sp_min_vector_table \ 106 _pie_fixup_size=FIXUP_SIZE 107#endif /* RESET_TO_SP_MIN */ 108 109#if SP_MIN_WITH_SECURE_FIQ 110 route_fiq_to_sp_min r4 111#endif 112 113 /* --------------------------------------------------------------------- 114 * Relay the previous bootloader's arguments to the platform layer 115 * --------------------------------------------------------------------- 116 */ 117 mov r0, r9 118 mov r1, r10 119 mov r2, r11 120 mov r3, r12 121 bl sp_min_setup 122 123 /* Jump to the main function */ 124 bl sp_min_main 125 126 /* ------------------------------------------------------------- 127 * Clean the .data & .bss sections to main memory. This ensures 128 * that any global data which was initialised by the primary CPU 129 * is visible to secondary CPUs before they enable their data 130 * caches and participate in coherency. 131 * ------------------------------------------------------------- 132 */ 133 ldr r0, =__DATA_START__ 134 ldr r1, =__DATA_END__ 135 sub r1, r1, r0 136 bl clean_dcache_range 137 138 ldr r0, =__BSS_START__ 139 ldr r1, =__BSS_END__ 140 sub r1, r1, r0 141 bl clean_dcache_range 142 143 ldr r0, =__PER_CPU_START__ 144 ldr r1, =__PER_CPU_END__ 145 sub r1, r1, r0 146 bl clean_dcache_range 147 148 bl smc_get_next_ctx 149 150 /* r0 points to `smc_ctx_t` */ 151 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 152 b sp_min_exit 153endfunc sp_min_entrypoint 154 155 156/* 157 * SMC handling function for SP_MIN. 158 */ 159func sp_min_handle_smc 160 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 161 str lr, [sp, #SMC_CTX_LR_MON] 162 163#if ENABLE_RUNTIME_INSTRUMENTATION 164 /* 165 * Read the timestamp value and store it on top of the C runtime stack. 166 * The value will be saved to the per-cpu data once the C stack is 167 * available, as a valid stack is needed to call _cpu_data() 168 */ 169 strd r0, r1, [sp, #SMC_CTX_GPREG_R0] 170 ldcopr16 r0, r1, CNTPCT_64 171 ldr lr, [sp, #SMC_CTX_SP_MON] 172 strd r0, r1, [lr, #-8]! 173 str lr, [sp, #SMC_CTX_SP_MON] 174 ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0] 175#endif 176 177 smccc_save_gp_mode_regs 178 179 clrex_on_monitor_entry 180 181 /* 182 * `sp` still points to `smc_ctx_t`. Save it to a register 183 * and restore the C runtime stack pointer to `sp`. 184 */ 185 mov r2, sp /* handle */ 186 ldr sp, [r2, #SMC_CTX_SP_MON] 187 188#if ENABLE_RUNTIME_INSTRUMENTATION 189 /* Save handle to a callee saved register */ 190 mov r6, r2 191 192 /* 193 * Restore the timestamp value and store it in per-cpu data. The value 194 * will be extracted from per-cpu data by the C level SMC handler and 195 * saved to the PMF timestamp region. 196 */ 197 ldrd r4, r5, [sp], #8 198 bl _cpu_data 199 strd r4, r5, [r0, #CPU_DATA_CPU_DATA_PMF_TS] 200 201 /* Restore handle */ 202 mov r2, r6 203#endif 204 205 ldr r0, [r2, #SMC_CTX_SCR] 206 and r3, r0, #SCR_NS_BIT /* flags */ 207 208 /* Switch to Secure Mode*/ 209 bic r0, #SCR_NS_BIT 210 stcopr r0, SCR 211 isb 212 213 ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 214 /* Check whether an SMC64 is issued */ 215 tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 216 beq 1f 217 /* SMC32 is not detected. Return error back to caller */ 218 mov r0, #SMC_UNK 219 str r0, [r2, #SMC_CTX_GPREG_R0] 220 mov r0, r2 221 b sp_min_exit 2221: 223 /* SMC32 is detected */ 224 mov r1, #0 /* cookie */ 225 bl handle_runtime_svc 226 227 /* `r0` points to `smc_ctx_t` */ 228 b sp_min_exit 229endfunc sp_min_handle_smc 230 231/* 232 * Secure Interrupts handling function for SP_MIN. 233 */ 234func sp_min_handle_fiq 235#if !SP_MIN_WITH_SECURE_FIQ 236 b plat_panic_handler 237#else 238 /* FIQ has a +4 offset for lr compared to preferred return address */ 239 sub lr, lr, #4 240 /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 241 str lr, [sp, #SMC_CTX_LR_MON] 242 243 smccc_save_gp_mode_regs 244 245 clrex_on_monitor_entry 246 247 /* load run-time stack */ 248 mov r2, sp 249 ldr sp, [r2, #SMC_CTX_SP_MON] 250 251 /* Switch to Secure Mode */ 252 ldr r0, [r2, #SMC_CTX_SCR] 253 bic r0, #SCR_NS_BIT 254 stcopr r0, SCR 255 isb 256 257 push {r2, r3} 258 bl sp_min_fiq 259 pop {r0, r3} 260 261 b sp_min_exit 262#endif 263endfunc sp_min_handle_fiq 264 265/* 266 * The Warm boot entrypoint for SP_MIN. 267 */ 268func sp_min_warm_entrypoint 269#if ENABLE_RUNTIME_INSTRUMENTATION 270 /* 271 * This timestamp update happens with cache off. The next 272 * timestamp collection will need to do cache maintenance prior 273 * to timestamp update. 274 */ 275 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 276 ldcopr16 r2, r3, CNTPCT_64 277 strd r2, r3, [r0] 278#endif 279 /* 280 * On the warm boot path, most of the EL3 initialisations performed by 281 * 'el3_entrypoint_common' must be skipped: 282 * 283 * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 284 * programming the reset address do we need to initialied the SCTLR. 285 * In other cases, we assume this has been taken care by the 286 * entrypoint code. 287 * 288 * - No need to determine the type of boot, we know it is a warm boot. 289 * 290 * - Do not try to distinguish between primary and secondary CPUs, this 291 * notion only exists for a cold boot. 292 * 293 * - No need to initialise the memory or the C runtime environment, 294 * it has been done once and for all on the cold boot path. 295 */ 296 el3_entrypoint_common \ 297 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 298 _warm_boot_mailbox=0 \ 299 _secondary_cold_boot=0 \ 300 _init_memory=0 \ 301 _init_c_runtime=0 \ 302 _exception_vectors=sp_min_vector_table \ 303 _pie_fixup_size=0 304 305 /* 306 * We're about to enable MMU and participate in PSCI state coordination. 307 * 308 * The PSCI implementation invokes platform routines that enable CPUs to 309 * participate in coherency. On a system where CPUs are not 310 * cache-coherent without appropriate platform specific programming, 311 * having caches enabled until such time might lead to coherency issues 312 * (resulting from stale data getting speculatively fetched, among 313 * others). Therefore we keep data caches disabled even after enabling 314 * the MMU for such platforms. 315 * 316 * On systems with hardware-assisted coherency, or on single cluster 317 * platforms, such platform specific programming is not required to 318 * enter coherency (as CPUs already are); and there's no reason to have 319 * caches disabled either. 320 */ 321#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 322 mov r0, #0 323#else 324 mov r0, #DISABLE_DCACHE 325#endif 326 bl bl32_plat_enable_mmu 327 328#if SP_MIN_WITH_SECURE_FIQ 329 route_fiq_to_sp_min r0 330#endif 331 332 bl sp_min_warm_boot 333 bl smc_get_next_ctx 334 /* r0 points to `smc_ctx_t` */ 335 /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 336 337#if ENABLE_RUNTIME_INSTRUMENTATION 338 /* Save smc_ctx_t */ 339 mov r5, r0 340 341 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 342 mov r4, r0 343 344 /* 345 * Invalidate before updating timestamp to ensure previous timestamp 346 * updates on the same cache line with caches disabled are properly 347 * seen by the same core. Without the cache invalidate, the core might 348 * write into a stale cache line. 349 */ 350 mov r1, #PMF_TS_SIZE 351 bl inv_dcache_range 352 353 ldcopr16 r0, r1, CNTPCT_64 354 strd r0, r1, [r4] 355 356 /* Restore smc_ctx_t */ 357 mov r0, r5 358#endif 359 360 b sp_min_exit 361endfunc sp_min_warm_entrypoint 362 363/* 364 * The function to restore the registers from SMC context and return 365 * to the mode restored to SPSR. 366 * 367 * Arguments : r0 must point to the SMC context to restore from. 368 */ 369func sp_min_exit 370 monitor_exit 371endfunc sp_min_exit 372