1/* 2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#ifndef EL3_COMMON_MACROS_S 8#define EL3_COMMON_MACROS_S 9 10#include <arch.h> 11#include <asm_macros.S> 12#include <assert_macros.S> 13#include <context.h> 14 15 /* 16 * Helper macro to initialise EL3 registers we care about. 17 */ 18 .macro el3_arch_init_common 19 /* --------------------------------------------------------------------- 20 * SCTLR_EL3 has already been initialised - read current value before 21 * modifying. 22 * 23 * SCTLR_EL3.I: Enable the instruction cache. 24 * 25 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault 26 * exception is generated if a load or store instruction executed at 27 * EL3 uses the SP as the base address and the SP is not aligned to a 28 * 16-byte boundary. 29 * 30 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that 31 * load or store one or more registers have an alignment check that the 32 * address being accessed is aligned to the size of the data element(s) 33 * being accessed. 34 * 35 * SCTLR_EL3.BT: PAuth instructions are compatible with bti jc 36 * --------------------------------------------------------------------- 37 */ 38 mov_imm x1, (SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 39 mrs x0, sctlr_el3 40#if ENABLE_BTI 41 bic x0, x0, #SCTLR_BT_BIT 42#endif 43 orr x0, x0, x1 44 msr sctlr_el3, x0 45 isb 46 47#if ENABLE_FEAT_SCTLR2 48#if ENABLE_FEAT_SCTLR2 > 1 49 is_feat_sctlr2_present_asm x1 50 beq feat_sctlr2_not_supported\@ 51#endif 52 mov x1, #SCTLR2_RESET_VAL 53 msr SCTLR2_EL3, x1 54feat_sctlr2_not_supported\@: 55#endif 56 57#ifdef IMAGE_BL31 58 /* --------------------------------------------------------------------- 59 * Initialise the per-cpu cache pointer to the CPU. 60 * This is done early to enable crash reporting to have access to crash 61 * stack. Since crash reporting depends on cpu_data to report the 62 * unhandled exception, not doing so can lead to recursive exceptions 63 * due to a NULL TPIDR_EL3. 64 * --------------------------------------------------------------------- 65 */ 66 bl plat_my_core_pos 67 /* index into the cpu_data */ 68 mov_imm x1, CPU_DATA_SIZE 69 mul x0, x0, x1 70 adr_l x1, percpu_data 71 add x0, x0, x1 72 msr tpidr_el3, x0 73#endif /* IMAGE_BL31 */ 74 75 /* --------------------------------------------------------------------- 76 * Initialise SCR_EL3, setting all fields rather than relying on hw. 77 * All fields are architecturally UNKNOWN on reset. The following fields 78 * do not change during the TF lifetime. The remaining fields are set to 79 * zero here but are updated ahead of transitioning to a lower EL in the 80 * function cm_init_context_common(). 81 * 82 * SCR_EL3.EEL2: Set to one if S-EL2 is present and enabled. 83 * 84 * NOTE: Modifying EEL2 bit along with EA bit ensures that we mitigate 85 * against ERRATA_V2_3099206. 86 * --------------------------------------------------------------------- 87 */ 88 mov_imm x0, SCR_RESET_VAL 89#if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 90 mrs x1, id_aa64pfr0_el1 91 and x1, x1, #(ID_AA64PFR0_SEL2_MASK << ID_AA64PFR0_SEL2_SHIFT) 92 cbz x1, 1f 93 orr x0, x0, #SCR_EEL2_BIT 94#endif 951: 96 msr scr_el3, x0 97 98 /* --------------------------------------------------------------------- 99 * Initialise MDCR_EL3, setting all fields rather than relying on hw. 100 * Some fields are architecturally UNKNOWN on reset. 101 */ 102 mov_imm x0, MDCR_EL3_RESET_VAL 103 msr mdcr_el3, x0 104 105 /* --------------------------------------------------------------------- 106 * Initialise CPTR_EL3, setting all fields rather than relying on hw. 107 * All fields are architecturally UNKNOWN on reset. 108 * --------------------------------------------------------------------- 109 */ 110 mov_imm x0, CPTR_EL3_RESET_VAL 111 msr cptr_el3, x0 112 113 .endm 114 115/* ----------------------------------------------------------------------------- 116 * This is the super set of actions that need to be performed during a cold boot 117 * or a warm boot in EL3. This code is shared by BL1 and BL31. 118 * 119 * This macro will always perform reset handling, architectural initialisations 120 * and stack setup. The rest of the actions are optional because they might not 121 * be needed, depending on the context in which this macro is called. This is 122 * why this macro is parameterised ; each parameter allows to enable/disable 123 * some actions. 124 * 125 * _init_sctlr: 126 * Whether the macro needs to initialise SCTLR_EL3, including configuring 127 * the endianness of data accesses. 128 * 129 * _warm_boot_mailbox: 130 * Whether the macro needs to detect the type of boot (cold/warm). The 131 * detection is based on the platform entrypoint address : if it is zero 132 * then it is a cold boot, otherwise it is a warm boot. In the latter case, 133 * this macro jumps on the platform entrypoint address. 134 * 135 * _secondary_cold_boot: 136 * Whether the macro needs to identify the CPU that is calling it: primary 137 * CPU or secondary CPU. The primary CPU will be allowed to carry on with 138 * the platform initialisations, while the secondaries will be put in a 139 * platform-specific state in the meantime. 140 * 141 * If the caller knows this macro will only be called by the primary CPU 142 * then this parameter can be defined to 0 to skip this step. 143 * 144 * _init_memory: 145 * Whether the macro needs to initialise the memory. 146 * 147 * _init_c_runtime: 148 * Whether the macro needs to initialise the C runtime environment. 149 * 150 * _exception_vectors: 151 * Address of the exception vectors to program in the VBAR_EL3 register. 152 * 153 * _pie_fixup_size: 154 * Size of memory region to fixup Global Descriptor Table (GDT). 155 * 156 * A non-zero value is expected when firmware needs GDT to be fixed-up. 157 * 158 * ----------------------------------------------------------------------------- 159 */ 160 .macro el3_entrypoint_common \ 161 _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \ 162 _init_memory, _init_c_runtime, _exception_vectors, \ 163 _pie_fixup_size 164 165 .if \_init_sctlr 166 /* ------------------------------------------------------------- 167 * This is the initialisation of SCTLR_EL3 and so must ensure 168 * that all fields are explicitly set rather than relying on hw. 169 * Some fields reset to an IMPLEMENTATION DEFINED value and 170 * others are architecturally UNKNOWN on reset. 171 * 172 * SCTLR.EE: Set the CPU endianness before doing anything that 173 * might involve memory reads or writes. Set to zero to select 174 * Little Endian. 175 * 176 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can 177 * force all memory regions that are writeable to be treated as 178 * XN (Execute-never). Set to zero so that this control has no 179 * effect on memory access permissions. 180 * 181 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check. 182 * 183 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking. 184 * 185 * SCTLR.DSSBS: Set to zero to disable speculation store bypass 186 * safe behaviour upon exception entry to EL3. 187 * ------------------------------------------------------------- 188 */ 189 mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \ 190 | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT)) 191#if ENABLE_FEAT_RAS 192 /* If FEAT_RAS is present assume FEAT_IESB is also present */ 193 orr x0, x0, #SCTLR_IESB_BIT 194#endif 195 msr sctlr_el3, x0 196 isb 197 .endif /* _init_sctlr */ 198 199 .if \_warm_boot_mailbox 200 /* ------------------------------------------------------------- 201 * This code will be executed for both warm and cold resets. 202 * Now is the time to distinguish between the two. 203 * Query the platform entrypoint address and if it is not zero 204 * then it means it is a warm boot so jump to this address. 205 * ------------------------------------------------------------- 206 */ 207 bl plat_get_my_entrypoint 208 cbz x0, do_cold_boot 209 br x0 210 211 do_cold_boot: 212 .endif /* _warm_boot_mailbox */ 213 214 .if \_pie_fixup_size 215#if ENABLE_PIE 216 /* 217 * ------------------------------------------------------------ 218 * If PIE is enabled fixup the Global descriptor Table only 219 * once during primary core cold boot path. 220 * 221 * Compile time base address, required for fixup, is calculated 222 * using "pie_fixup" label present within first page. 223 * ------------------------------------------------------------ 224 */ 225 pie_fixup: 226 ldr x0, =pie_fixup 227 and x0, x0, #~(PAGE_SIZE_MASK) 228 mov_imm x1, \_pie_fixup_size 229 add x1, x1, x0 230 bl fixup_gdt_reloc 231#endif /* ENABLE_PIE */ 232 .endif /* _pie_fixup_size */ 233 234 /* --------------------------------------------------------------------- 235 * Set the exception vectors. 236 * --------------------------------------------------------------------- 237 */ 238 adr x0, \_exception_vectors 239 msr vbar_el3, x0 240 isb 241 242 call_reset_handler 243 244 el3_arch_init_common 245 246 /* --------------------------------------------------------------------- 247 * Set the el3 execution context(i.e. root_context). 248 * --------------------------------------------------------------------- 249 */ 250 setup_el3_execution_context 251 252 .if \_secondary_cold_boot 253 /* ------------------------------------------------------------- 254 * Check if this is a primary or secondary CPU cold boot. 255 * The primary CPU will set up the platform while the 256 * secondaries are placed in a platform-specific state until the 257 * primary CPU performs the necessary actions to bring them out 258 * of that state and allows entry into the OS. 259 * ------------------------------------------------------------- 260 */ 261 bl plat_is_my_cpu_primary 262 cbnz w0, do_primary_cold_boot 263 264 /* This is a cold boot on a secondary CPU */ 265 bl plat_secondary_cold_boot_setup 266 /* plat_secondary_cold_boot_setup() is not supposed to return */ 267 bl el3_panic 268 269 do_primary_cold_boot: 270 .endif /* _secondary_cold_boot */ 271 272 /* --------------------------------------------------------------------- 273 * Initialize memory now. Secondary CPU initialization won't get to this 274 * point. 275 * --------------------------------------------------------------------- 276 */ 277 278 .if \_init_memory 279 bl platform_mem_init 280 .endif /* _init_memory */ 281 282 /* --------------------------------------------------------------------- 283 * Init C runtime environment: 284 * - Zero-initialise the NOBITS sections. There are 2 of them: 285 * - the .bss section; 286 * - the coherent memory section (if any). 287 * - Relocate the data section from ROM to RAM, if required. 288 * --------------------------------------------------------------------- 289 */ 290 .if \_init_c_runtime 291#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \ 292 ((RESET_TO_BL2 && BL2_INV_DCACHE) || ENABLE_RME)) 293 /* ------------------------------------------------------------- 294 * Invalidate the RW memory used by the BL31 image. This 295 * includes the data and NOBITS sections. This is done to 296 * safeguard against possible corruption of this memory by 297 * dirty cache lines in a system cache as a result of use by 298 * an earlier boot loader stage. If PIE is enabled however, 299 * RO sections including the GOT may be modified during 300 * pie fixup. Therefore, to be on the safe side, invalidate 301 * the entire image region if PIE is enabled. 302 * ------------------------------------------------------------- 303 */ 304#if ENABLE_PIE 305#if SEPARATE_CODE_AND_RODATA 306 adrp x0, __TEXT_START__ 307 add x0, x0, :lo12:__TEXT_START__ 308#else 309 adrp x0, __RO_START__ 310 add x0, x0, :lo12:__RO_START__ 311#endif /* SEPARATE_CODE_AND_RODATA */ 312#else 313 adrp x0, __RW_START__ 314 add x0, x0, :lo12:__RW_START__ 315#endif /* ENABLE_PIE */ 316 adrp x1, __RW_END__ 317 add x1, x1, :lo12:__RW_END__ 318 sub x1, x1, x0 319 bl inv_dcache_range 320#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION 321 adrp x0, __NOBITS_START__ 322 add x0, x0, :lo12:__NOBITS_START__ 323 adrp x1, __NOBITS_END__ 324 add x1, x1, :lo12:__NOBITS_END__ 325 sub x1, x1, x0 326 bl inv_dcache_range 327#endif 328#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION 329 adrp x0, __BL2_NOLOAD_START__ 330 add x0, x0, :lo12:__BL2_NOLOAD_START__ 331 adrp x1, __BL2_NOLOAD_END__ 332 add x1, x1, :lo12:__BL2_NOLOAD_END__ 333 sub x1, x1, x0 334 bl inv_dcache_range 335#endif 336#endif 337 adrp x0, __BSS_START__ 338 add x0, x0, :lo12:__BSS_START__ 339 340 adrp x1, __BSS_END__ 341 add x1, x1, :lo12:__BSS_END__ 342 sub x1, x1, x0 343 bl zeromem 344 345#if USE_COHERENT_MEM 346 adrp x0, __COHERENT_RAM_START__ 347 add x0, x0, :lo12:__COHERENT_RAM_START__ 348 adrp x1, __COHERENT_RAM_END_UNALIGNED__ 349 add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__ 350 sub x1, x1, x0 351 bl zeromem 352#endif 353 354#if defined(IMAGE_BL1) || \ 355 (defined(IMAGE_BL2) && RESET_TO_BL2 && BL2_IN_XIP_MEM) || \ 356 (defined(IMAGE_BL31) && SEPARATE_RWDATA_REGION) 357 358 adrp x0, __DATA_RAM_START__ 359 add x0, x0, :lo12:__DATA_RAM_START__ 360 adrp x1, __DATA_ROM_START__ 361 add x1, x1, :lo12:__DATA_ROM_START__ 362 adrp x2, __DATA_RAM_END__ 363 add x2, x2, :lo12:__DATA_RAM_END__ 364 sub x2, x2, x0 365 bl memcpy16 366#endif 367 .endif /* _init_c_runtime */ 368 369 /* --------------------------------------------------------------------- 370 * Use SP_EL0 for the C runtime stack. 371 * --------------------------------------------------------------------- 372 */ 373 msr spsel, #0 374 375 /* --------------------------------------------------------------------- 376 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when 377 * the MMU is enabled. There is no risk of reading stale stack memory 378 * after enabling the MMU as only the primary CPU is running at the 379 * moment. 380 * --------------------------------------------------------------------- 381 */ 382 bl plat_set_my_stack 383 384#if STACK_PROTECTOR_ENABLED 385 .if \_init_c_runtime 386 bl update_stack_protector_canary 387 .endif /* _init_c_runtime */ 388#endif 389 .endm 390 391 .macro apply_at_speculative_wa 392#if ERRATA_SPECULATIVE_AT 393 /* 394 * This function expects x30 has been saved. 395 * Also, save x29 which will be used in the called function. 396 */ 397 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 398 bl save_and_update_ptw_el1_sys_regs 399 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 400#endif 401 .endm 402 403 .macro restore_ptw_el1_sys_regs 404#if ERRATA_SPECULATIVE_AT 405 /* ----------------------------------------------------------- 406 * In case of ERRATA_SPECULATIVE_AT, must follow below order 407 * to ensure that page table walk is not enabled until 408 * restoration of all EL1 system registers. TCR_EL1 register 409 * should be updated at the end which restores previous page 410 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB 411 * ensures that CPU does below steps in order. 412 * 413 * 1. Ensure all other system registers are written before 414 * updating SCTLR_EL1 using ISB. 415 * 2. Restore SCTLR_EL1 register. 416 * 3. Ensure SCTLR_EL1 written successfully using ISB. 417 * 4. Restore TCR_EL1 register. 418 * ----------------------------------------------------------- 419 */ 420 isb 421 ldp x28, x29, [sp, #CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1] 422 msr sctlr_el1, x28 423 isb 424 msr tcr_el1, x29 425#endif 426 .endm 427 428/* ----------------------------------------------------------------- 429 * The below macro reads SCR_EL3 from the context structure to 430 * determine the security state of the context upon ERET. 431 * ------------------------------------------------------------------ 432 */ 433 .macro get_security_state _ret:req, _scr_reg:req 434 ubfx \_ret, \_scr_reg, #SCR_NSE_SHIFT, #1 435 cmp \_ret, #1 436 beq realm_state 437 bfi \_ret, \_scr_reg, #0, #1 438 b end 439 realm_state: 440 mov \_ret, #2 441 end: 442 .endm 443 444/*----------------------------------------------------------------------------- 445 * Helper macro to configure EL3 registers we care about, while executing 446 * at EL3/Root world. Root world has its own execution environment and 447 * needs to have its settings configured to be independent of other worlds. 448 * ----------------------------------------------------------------------------- 449 */ 450 .macro setup_el3_execution_context 451 452 /* --------------------------------------------------------------------- 453 * The following registers need to be part of separate root context 454 * as their values are of importance during EL3 execution. 455 * Hence these registers are overwritten to their intital values, 456 * irrespective of whichever world they return from to ensure EL3 has a 457 * consistent execution context throughout the lifetime of TF-A. 458 * 459 * DAIF.A: Enable External Aborts and SError Interrupts at EL3. 460 * 461 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug. 462 * Debug exceptions, other than Breakpoint Instruction exceptions, are 463 * disabled from all ELs in Secure state. 464 * 465 * SCR_EL3.EA: Set to one to enable SError interrupts at EL3. 466 * 467 * SCR_EL3.SIF: Set to one to disable instruction fetches from 468 * Non-secure memory. 469 * 470 * PMCR_EL0.DP: Set to one so that the cycle counter, 471 * PMCCNTR_EL0 does not count when event counting is prohibited. 472 * Necessary on PMUv3 <= p7 where MDCR_EL3.{SCCD,MCCD} are not 473 * available. 474 * 475 * CPTR_EL3.EZ: Set to one so that accesses to ZCR_EL3 do not trap 476 * CPTR_EL3.TFP: Set to zero so that advanced SIMD operations don't trap 477 * CPTR_EL3.ESM: Set to one so that SME related registers don't trap 478 * 479 * PSTATE.DIT: Set to one to enable the Data Independent Timing (DIT) 480 * functionality, if implemented in EL3. 481 * --------------------------------------------------------------------- 482 */ 483 msr daifclr, #DAIF_ABT_BIT 484 485 mrs x15, mdcr_el3 486 orr x15, x15, #MDCR_SDD_BIT 487 msr mdcr_el3, x15 488 489 mrs x15, scr_el3 490 orr x15, x15, #SCR_EA_BIT 491 orr x15, x15, #SCR_SIF_BIT 492 msr scr_el3, x15 493 494 mrs x15, pmcr_el0 495 orr x15, x15, #PMCR_EL0_DP_BIT 496 msr pmcr_el0, x15 497 498 mrs x15, cptr_el3 499 orr x15, x15, #CPTR_EZ_BIT 500 orr x15, x15, #ESM_BIT 501 bic x15, x15, #TFP_BIT 502 msr cptr_el3, x15 503 504#if ENABLE_FEAT_DIT 505#if ENABLE_FEAT_DIT > 1 506 mrs x15, id_aa64pfr0_el1 507 ubfx x15, x15, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH 508 cbz x15, 1f 509#endif 510 mov x15, #DIT_BIT 511 msr DIT, x15 512 1: 513#endif 514 515 isb 516 .endm 517 518#endif /* EL3_COMMON_MACROS_S */ 519