1/* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#ifndef EL3_COMMON_MACROS_S 8#define EL3_COMMON_MACROS_S 9 10#include <arch.h> 11#include <asm_macros.S> 12#include <assert_macros.S> 13#include <context.h> 14#include <lib/xlat_tables/xlat_tables_defs.h> 15 16 /* 17 * Helper macro to initialise EL3 registers we care about. 18 */ 19 .macro el3_arch_init_common 20 /* --------------------------------------------------------------------- 21 * SCTLR_EL3 has already been initialised - read current value before 22 * modifying. 23 * 24 * SCTLR_EL3.I: Enable the instruction cache. 25 * 26 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault 27 * exception is generated if a load or store instruction executed at 28 * EL3 uses the SP as the base address and the SP is not aligned to a 29 * 16-byte boundary. 30 * 31 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that 32 * load or store one or more registers have an alignment check that the 33 * address being accessed is aligned to the size of the data element(s) 34 * being accessed. 35 * --------------------------------------------------------------------- 36 */ 37 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 38 mrs x0, sctlr_el3 39 orr x0, x0, x1 40 msr sctlr_el3, x0 41 isb 42 43#ifdef IMAGE_BL31 44 /* --------------------------------------------------------------------- 45 * Initialise the per-cpu cache pointer to the CPU. 46 * This is done early to enable crash reporting to have access to crash 47 * stack. Since crash reporting depends on cpu_data to report the 48 * unhandled exception, not doing so can lead to recursive exceptions 49 * due to a NULL TPIDR_EL3. 50 * --------------------------------------------------------------------- 51 */ 52 bl init_cpu_data_ptr 53#endif /* IMAGE_BL31 */ 54 55 /* --------------------------------------------------------------------- 56 * Initialise SCR_EL3, setting all fields rather than relying on hw. 57 * All fields are architecturally UNKNOWN on reset. The following fields 58 * do not change during the TF lifetime. The remaining fields are set to 59 * zero here but are updated ahead of transitioning to a lower EL in the 60 * function cm_init_context_common(). 61 * 62 * SCR_EL3.SIF: Set to one to disable instruction fetches from 63 * Non-secure memory. 64 * 65 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts 66 * to EL3 when executing at any EL. 67 * 68 * SCR_EL3.EEL2: Set to one if S-EL2 is present and enabled. 69 * 70 * NOTE: Modifying EEL2 bit along with EA bit ensures that we mitigate 71 * against ERRATA_V2_3099206. 72 * --------------------------------------------------------------------- 73 */ 74 mov_imm x0, (SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) 75#if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 76 mrs x1, id_aa64pfr0_el1 77 and x1, x1, #(ID_AA64PFR0_SEL2_MASK << ID_AA64PFR0_SEL2_SHIFT) 78 cbz x1, 1f 79 orr x0, x0, #SCR_EEL2_BIT 80#endif 811: 82 msr scr_el3, x0 83 84 /* --------------------------------------------------------------------- 85 * Initialise MDCR_EL3, setting all fields rather than relying on hw. 86 * Some fields are architecturally UNKNOWN on reset. 87 * 88 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug. 89 * Debug exceptions, other than Breakpoint Instruction exceptions, are 90 * disabled from all ELs in Secure state. 91 */ 92 mov_imm x0, (MDCR_EL3_RESET_VAL | MDCR_SDD_BIT) 93 msr mdcr_el3, x0 94 95 /* --------------------------------------------------------------------- 96 * Enable External Aborts and SError Interrupts now that the exception 97 * vectors have been setup. 98 * --------------------------------------------------------------------- 99 */ 100 msr daifclr, #DAIF_ABT_BIT 101 102 /* --------------------------------------------------------------------- 103 * Initialise CPTR_EL3, setting all fields rather than relying on hw. 104 * All fields are architecturally UNKNOWN on reset. 105 * --------------------------------------------------------------------- 106 */ 107 mov_imm x0, CPTR_EL3_RESET_VAL 108 msr cptr_el3, x0 109 110 /* 111 * If Data Independent Timing (DIT) functionality is implemented, 112 * always enable DIT in EL3. 113 * First assert that the FEAT_DIT build flag matches the feature id 114 * register value for DIT. 115 */ 116#if ENABLE_FEAT_DIT 117#if ENABLE_ASSERTIONS || ENABLE_FEAT_DIT > 1 118 mrs x0, id_aa64pfr0_el1 119 ubfx x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH 120#if ENABLE_FEAT_DIT > 1 121 cbz x0, 1f 122#else 123 cmp x0, #DIT_IMPLEMENTED 124 ASM_ASSERT(eq) 125#endif 126 127#endif /* ENABLE_ASSERTIONS */ 128 mov x0, #DIT_BIT 129 msr DIT, x0 1301: 131#endif 132 .endm 133 134/* ----------------------------------------------------------------------------- 135 * This is the super set of actions that need to be performed during a cold boot 136 * or a warm boot in EL3. This code is shared by BL1 and BL31. 137 * 138 * This macro will always perform reset handling, architectural initialisations 139 * and stack setup. The rest of the actions are optional because they might not 140 * be needed, depending on the context in which this macro is called. This is 141 * why this macro is parameterised ; each parameter allows to enable/disable 142 * some actions. 143 * 144 * _init_sctlr: 145 * Whether the macro needs to initialise SCTLR_EL3, including configuring 146 * the endianness of data accesses. 147 * 148 * _warm_boot_mailbox: 149 * Whether the macro needs to detect the type of boot (cold/warm). The 150 * detection is based on the platform entrypoint address : if it is zero 151 * then it is a cold boot, otherwise it is a warm boot. In the latter case, 152 * this macro jumps on the platform entrypoint address. 153 * 154 * _secondary_cold_boot: 155 * Whether the macro needs to identify the CPU that is calling it: primary 156 * CPU or secondary CPU. The primary CPU will be allowed to carry on with 157 * the platform initialisations, while the secondaries will be put in a 158 * platform-specific state in the meantime. 159 * 160 * If the caller knows this macro will only be called by the primary CPU 161 * then this parameter can be defined to 0 to skip this step. 162 * 163 * _init_memory: 164 * Whether the macro needs to initialise the memory. 165 * 166 * _init_c_runtime: 167 * Whether the macro needs to initialise the C runtime environment. 168 * 169 * _exception_vectors: 170 * Address of the exception vectors to program in the VBAR_EL3 register. 171 * 172 * _pie_fixup_size: 173 * Size of memory region to fixup Global Descriptor Table (GDT). 174 * 175 * A non-zero value is expected when firmware needs GDT to be fixed-up. 176 * 177 * ----------------------------------------------------------------------------- 178 */ 179 .macro el3_entrypoint_common \ 180 _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \ 181 _init_memory, _init_c_runtime, _exception_vectors, \ 182 _pie_fixup_size 183 184 .if \_init_sctlr 185 /* ------------------------------------------------------------- 186 * This is the initialisation of SCTLR_EL3 and so must ensure 187 * that all fields are explicitly set rather than relying on hw. 188 * Some fields reset to an IMPLEMENTATION DEFINED value and 189 * others are architecturally UNKNOWN on reset. 190 * 191 * SCTLR.EE: Set the CPU endianness before doing anything that 192 * might involve memory reads or writes. Set to zero to select 193 * Little Endian. 194 * 195 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can 196 * force all memory regions that are writeable to be treated as 197 * XN (Execute-never). Set to zero so that this control has no 198 * effect on memory access permissions. 199 * 200 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check. 201 * 202 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking. 203 * 204 * SCTLR.DSSBS: Set to zero to disable speculation store bypass 205 * safe behaviour upon exception entry to EL3. 206 * ------------------------------------------------------------- 207 */ 208 mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \ 209 | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT)) 210#if ENABLE_FEAT_RAS 211 /* If FEAT_RAS is present assume FEAT_IESB is also present */ 212 orr x0, x0, #SCTLR_IESB_BIT 213#endif 214 msr sctlr_el3, x0 215 isb 216 .endif /* _init_sctlr */ 217 218 .if \_warm_boot_mailbox 219 /* ------------------------------------------------------------- 220 * This code will be executed for both warm and cold resets. 221 * Now is the time to distinguish between the two. 222 * Query the platform entrypoint address and if it is not zero 223 * then it means it is a warm boot so jump to this address. 224 * ------------------------------------------------------------- 225 */ 226 bl plat_get_my_entrypoint 227 cbz x0, do_cold_boot 228 br x0 229 230 do_cold_boot: 231 .endif /* _warm_boot_mailbox */ 232 233 .if \_pie_fixup_size 234#if ENABLE_PIE 235 /* 236 * ------------------------------------------------------------ 237 * If PIE is enabled fixup the Global descriptor Table only 238 * once during primary core cold boot path. 239 * 240 * Compile time base address, required for fixup, is calculated 241 * using "pie_fixup" label present within first page. 242 * ------------------------------------------------------------ 243 */ 244 pie_fixup: 245 ldr x0, =pie_fixup 246 and x0, x0, #~(PAGE_SIZE_MASK) 247 mov_imm x1, \_pie_fixup_size 248 add x1, x1, x0 249 bl fixup_gdt_reloc 250#endif /* ENABLE_PIE */ 251 .endif /* _pie_fixup_size */ 252 253 /* --------------------------------------------------------------------- 254 * Set the exception vectors. 255 * --------------------------------------------------------------------- 256 */ 257 adr x0, \_exception_vectors 258 msr vbar_el3, x0 259 isb 260 261#if !(defined(IMAGE_BL2) && ENABLE_RME) 262 /* --------------------------------------------------------------------- 263 * It is a cold boot. 264 * Perform any processor specific actions upon reset e.g. cache, TLB 265 * invalidations etc. 266 * --------------------------------------------------------------------- 267 */ 268 bl reset_handler 269#endif 270 271 el3_arch_init_common 272 273 .if \_secondary_cold_boot 274 /* ------------------------------------------------------------- 275 * Check if this is a primary or secondary CPU cold boot. 276 * The primary CPU will set up the platform while the 277 * secondaries are placed in a platform-specific state until the 278 * primary CPU performs the necessary actions to bring them out 279 * of that state and allows entry into the OS. 280 * ------------------------------------------------------------- 281 */ 282 bl plat_is_my_cpu_primary 283 cbnz w0, do_primary_cold_boot 284 285 /* This is a cold boot on a secondary CPU */ 286 bl plat_secondary_cold_boot_setup 287 /* plat_secondary_cold_boot_setup() is not supposed to return */ 288 bl el3_panic 289 290 do_primary_cold_boot: 291 .endif /* _secondary_cold_boot */ 292 293 /* --------------------------------------------------------------------- 294 * Initialize memory now. Secondary CPU initialization won't get to this 295 * point. 296 * --------------------------------------------------------------------- 297 */ 298 299 .if \_init_memory 300 bl platform_mem_init 301 .endif /* _init_memory */ 302 303 /* --------------------------------------------------------------------- 304 * Init C runtime environment: 305 * - Zero-initialise the NOBITS sections. There are 2 of them: 306 * - the .bss section; 307 * - the coherent memory section (if any). 308 * - Relocate the data section from ROM to RAM, if required. 309 * --------------------------------------------------------------------- 310 */ 311 .if \_init_c_runtime 312#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \ 313 ((RESET_TO_BL2 && BL2_INV_DCACHE) || ENABLE_RME)) 314 /* ------------------------------------------------------------- 315 * Invalidate the RW memory used by the BL31 image. This 316 * includes the data and NOBITS sections. This is done to 317 * safeguard against possible corruption of this memory by 318 * dirty cache lines in a system cache as a result of use by 319 * an earlier boot loader stage. If PIE is enabled however, 320 * RO sections including the GOT may be modified during 321 * pie fixup. Therefore, to be on the safe side, invalidate 322 * the entire image region if PIE is enabled. 323 * ------------------------------------------------------------- 324 */ 325#if ENABLE_PIE 326#if SEPARATE_CODE_AND_RODATA 327 adrp x0, __TEXT_START__ 328 add x0, x0, :lo12:__TEXT_START__ 329#else 330 adrp x0, __RO_START__ 331 add x0, x0, :lo12:__RO_START__ 332#endif /* SEPARATE_CODE_AND_RODATA */ 333#else 334 adrp x0, __RW_START__ 335 add x0, x0, :lo12:__RW_START__ 336#endif /* ENABLE_PIE */ 337 adrp x1, __RW_END__ 338 add x1, x1, :lo12:__RW_END__ 339 sub x1, x1, x0 340 bl inv_dcache_range 341#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION 342 adrp x0, __NOBITS_START__ 343 add x0, x0, :lo12:__NOBITS_START__ 344 adrp x1, __NOBITS_END__ 345 add x1, x1, :lo12:__NOBITS_END__ 346 sub x1, x1, x0 347 bl inv_dcache_range 348#endif 349#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION 350 adrp x0, __BL2_NOLOAD_START__ 351 add x0, x0, :lo12:__BL2_NOLOAD_START__ 352 adrp x1, __BL2_NOLOAD_END__ 353 add x1, x1, :lo12:__BL2_NOLOAD_END__ 354 sub x1, x1, x0 355 bl inv_dcache_range 356#endif 357#endif 358 adrp x0, __BSS_START__ 359 add x0, x0, :lo12:__BSS_START__ 360 361 adrp x1, __BSS_END__ 362 add x1, x1, :lo12:__BSS_END__ 363 sub x1, x1, x0 364 bl zeromem 365 366#if USE_COHERENT_MEM 367 adrp x0, __COHERENT_RAM_START__ 368 add x0, x0, :lo12:__COHERENT_RAM_START__ 369 adrp x1, __COHERENT_RAM_END_UNALIGNED__ 370 add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__ 371 sub x1, x1, x0 372 bl zeromem 373#endif 374 375#if defined(IMAGE_BL1) || \ 376 (defined(IMAGE_BL2) && RESET_TO_BL2 && BL2_IN_XIP_MEM) 377 adrp x0, __DATA_RAM_START__ 378 add x0, x0, :lo12:__DATA_RAM_START__ 379 adrp x1, __DATA_ROM_START__ 380 add x1, x1, :lo12:__DATA_ROM_START__ 381 adrp x2, __DATA_RAM_END__ 382 add x2, x2, :lo12:__DATA_RAM_END__ 383 sub x2, x2, x0 384 bl memcpy16 385#endif 386 .endif /* _init_c_runtime */ 387 388 /* --------------------------------------------------------------------- 389 * Use SP_EL0 for the C runtime stack. 390 * --------------------------------------------------------------------- 391 */ 392 msr spsel, #0 393 394 /* --------------------------------------------------------------------- 395 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when 396 * the MMU is enabled. There is no risk of reading stale stack memory 397 * after enabling the MMU as only the primary CPU is running at the 398 * moment. 399 * --------------------------------------------------------------------- 400 */ 401 bl plat_set_my_stack 402 403#if STACK_PROTECTOR_ENABLED 404 .if \_init_c_runtime 405 bl update_stack_protector_canary 406 .endif /* _init_c_runtime */ 407#endif 408 .endm 409 410 .macro apply_at_speculative_wa 411#if ERRATA_SPECULATIVE_AT 412 /* 413 * This function expects x30 has been saved. 414 * Also, save x29 which will be used in the called function. 415 */ 416 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 417 bl save_and_update_ptw_el1_sys_regs 418 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 419#endif 420 .endm 421 422 .macro restore_ptw_el1_sys_regs 423#if ERRATA_SPECULATIVE_AT 424 /* ----------------------------------------------------------- 425 * In case of ERRATA_SPECULATIVE_AT, must follow below order 426 * to ensure that page table walk is not enabled until 427 * restoration of all EL1 system registers. TCR_EL1 register 428 * should be updated at the end which restores previous page 429 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB 430 * ensures that CPU does below steps in order. 431 * 432 * 1. Ensure all other system registers are written before 433 * updating SCTLR_EL1 using ISB. 434 * 2. Restore SCTLR_EL1 register. 435 * 3. Ensure SCTLR_EL1 written successfully using ISB. 436 * 4. Restore TCR_EL1 register. 437 * ----------------------------------------------------------- 438 */ 439 isb 440 ldp x28, x29, [sp, #CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1] 441 msr sctlr_el1, x28 442 isb 443 msr tcr_el1, x29 444#endif 445 .endm 446 447/* ----------------------------------------------------------------- 448 * The below macro reads SCR_EL3 from the context structure to 449 * determine the security state of the context upon ERET. 450 * ------------------------------------------------------------------ 451 */ 452 .macro get_security_state _ret:req, _scr_reg:req 453 ubfx \_ret, \_scr_reg, #SCR_NSE_SHIFT, #1 454 cmp \_ret, #1 455 beq realm_state 456 bfi \_ret, \_scr_reg, #0, #1 457 b end 458 realm_state: 459 mov \_ret, #2 460 end: 461 .endm 462 463#endif /* EL3_COMMON_MACROS_S */ 464