1/* 2 * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#ifndef EL3_COMMON_MACROS_S 8#define EL3_COMMON_MACROS_S 9 10#include <arch.h> 11#include <asm_macros.S> 12#include <context.h> 13#include <lib/xlat_tables/xlat_tables_defs.h> 14 15 /* 16 * Helper macro to initialise EL3 registers we care about. 17 */ 18 .macro el3_arch_init_common 19 /* --------------------------------------------------------------------- 20 * SCTLR_EL3 has already been initialised - read current value before 21 * modifying. 22 * 23 * SCTLR_EL3.I: Enable the instruction cache. 24 * 25 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault 26 * exception is generated if a load or store instruction executed at 27 * EL3 uses the SP as the base address and the SP is not aligned to a 28 * 16-byte boundary. 29 * 30 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that 31 * load or store one or more registers have an alignment check that the 32 * address being accessed is aligned to the size of the data element(s) 33 * being accessed. 34 * --------------------------------------------------------------------- 35 */ 36 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 37 mrs x0, sctlr_el3 38 orr x0, x0, x1 39 msr sctlr_el3, x0 40 isb 41 42#ifdef IMAGE_BL31 43 /* --------------------------------------------------------------------- 44 * Initialise the per-cpu cache pointer to the CPU. 45 * This is done early to enable crash reporting to have access to crash 46 * stack. Since crash reporting depends on cpu_data to report the 47 * unhandled exception, not doing so can lead to recursive exceptions 48 * due to a NULL TPIDR_EL3. 49 * --------------------------------------------------------------------- 50 */ 51 bl init_cpu_data_ptr 52#endif /* IMAGE_BL31 */ 53 54 /* --------------------------------------------------------------------- 55 * Initialise SCR_EL3, setting all fields rather than relying on hw. 56 * All fields are architecturally UNKNOWN on reset. The following fields 57 * do not change during the TF lifetime. The remaining fields are set to 58 * zero here but are updated ahead of transitioning to a lower EL in the 59 * function cm_init_context_common(). 60 * 61 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 62 * EL2, EL1 and EL0 are not trapped to EL3. 63 * 64 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 65 * EL2, EL1 and EL0 are not trapped to EL3. 66 * 67 * SCR_EL3.SIF: Set to one to disable instruction fetches from 68 * Non-secure memory. 69 * 70 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 71 * both Security states and both Execution states. 72 * 73 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts 74 * to EL3 when executing at any EL. 75 * 76 * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature, 77 * disable traps to EL3 when accessing key registers or using pointer 78 * authentication instructions from lower ELs. 79 * --------------------------------------------------------------------- 80 */ 81 mov_imm x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \ 82 & ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT)) 83#if CTX_INCLUDE_PAUTH_REGS 84 /* 85 * If the pointer authentication registers are saved during world 86 * switches, enable pointer authentication everywhere, as it is safe to 87 * do so. 88 */ 89 orr x0, x0, #(SCR_API_BIT | SCR_APK_BIT) 90#endif 91#if ENABLE_RME 92 /* 93 * TODO: Settting the EEL2 bit to allow EL3 access to secure only registers 94 * in context management. This will need to be refactored. 95 */ 96 orr x0, x0, #SCR_EEL2_BIT 97#endif 98 msr scr_el3, x0 99 100 /* --------------------------------------------------------------------- 101 * Initialise MDCR_EL3, setting all fields rather than relying on hw. 102 * Some fields are architecturally UNKNOWN on reset. 103 * 104 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug. 105 * Debug exceptions, other than Breakpoint Instruction exceptions, are 106 * disabled from all ELs in Secure state. 107 * 108 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted 109 * privileged debug from S-EL1. 110 * 111 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register 112 * access to the powerdown debug registers do not trap to EL3. 113 * 114 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the 115 * debug registers, other than those registers that are controlled by 116 * MDCR_EL3.TDOSA. 117 * 118 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register 119 * accesses to all Performance Monitors registers do not trap to EL3. 120 * 121 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is 122 * prohibited in Secure state. This bit is RES0 in versions of the 123 * architecture with FEAT_PMUv3p5 not implemented, setting it to 1 124 * doesn't have any effect on them. 125 * 126 * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is 127 * prohibited in EL3. This bit is RES0 in versions of the 128 * architecture with FEAT_PMUv3p7 not implemented, setting it to 1 129 * doesn't have any effect on them. 130 * 131 * MDCR_EL3.SPME: Set to zero so that event counting by the programmable 132 * counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2 133 * Debug is not implemented this bit does not have any effect on the 134 * counters unless there is support for the implementation defined 135 * authentication interface ExternalSecureNoninvasiveDebugEnabled(). 136 * 137 * MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer 138 * owning security state is Secure state. If FEAT_TRBE is implemented, 139 * accesses to Trace Buffer control registers at EL2 and EL1 in any 140 * security state generates trap exceptions to EL3. 141 * If FEAT_TRBE is not implemented, these bits are RES0. 142 * 143 * MDCR_EL3.TTRF: Set to one so that access to trace filter control 144 * registers in non-monitor mode generate EL3 trap exception, 145 * unless the access generates a higher priority exception when trace 146 * filter control(FEAT_TRF) is implemented. 147 * When FEAT_TRF is not implemented, this bit is RES0. 148 * --------------------------------------------------------------------- 149 */ 150 mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \ 151 MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \ 152 MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \ 153 MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \ 154 MDCR_NSTBE | MDCR_TTRF_BIT)) 155 156 mrs x1, id_aa64dfr0_el1 157 ubfx x1, x1, #ID_AA64DFR0_TRACEFILT_SHIFT, #ID_AA64DFR0_TRACEFILT_LENGTH 158 cbz x1, 1f 159 orr x0, x0, #MDCR_TTRF_BIT 1601: 161 msr mdcr_el3, x0 162 163 /* --------------------------------------------------------------------- 164 * Initialise PMCR_EL0 setting all fields rather than relying 165 * on hw. Some fields are architecturally UNKNOWN on reset. 166 * 167 * PMCR_EL0.LP: Set to one so that event counter overflow, that 168 * is recorded in PMOVSCLR_EL0[0-30], occurs on the increment 169 * that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU 170 * is implemented. This bit is RES0 in versions of the architecture 171 * earlier than ARMv8.5, setting it to 1 doesn't have any effect 172 * on them. 173 * 174 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that 175 * is recorded in PMOVSCLR_EL0[31], occurs on the increment 176 * that changes PMCCNTR_EL0[63] from 1 to 0. 177 * 178 * PMCR_EL0.DP: Set to one so that the cycle counter, 179 * PMCCNTR_EL0 does not count when event counting is prohibited. 180 * 181 * PMCR_EL0.X: Set to zero to disable export of events. 182 * 183 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0 184 * counts on every clock cycle. 185 * --------------------------------------------------------------------- 186 */ 187 mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \ 188 PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \ 189 ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT)) 190 191 msr pmcr_el0, x0 192 193 /* --------------------------------------------------------------------- 194 * Enable External Aborts and SError Interrupts now that the exception 195 * vectors have been setup. 196 * --------------------------------------------------------------------- 197 */ 198 msr daifclr, #DAIF_ABT_BIT 199 200 /* --------------------------------------------------------------------- 201 * Initialise CPTR_EL3, setting all fields rather than relying on hw. 202 * All fields are architecturally UNKNOWN on reset. 203 * 204 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1, 205 * CPTR_EL2, CPACR, or HCPTR do not trap to EL3. 206 * 207 * CPTR_EL3.TTA: Set to one so that accesses to the trace system 208 * registers trap to EL3 from all exception levels and security 209 * states when system register trace is implemented. 210 * When system register trace is not implemented, this bit is RES0 and 211 * hence set to zero. 212 * 213 * CPTR_EL3.TTA: Set to zero so that System register accesses to the 214 * trace registers do not trap to EL3. 215 * 216 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers 217 * by Advanced SIMD, floating-point or SVE instructions (if implemented) 218 * do not trap to EL3. 219 * 220 * CPTR_EL3.TAM: Set to one so that Activity Monitor access is 221 * trapped to EL3 by default. 222 * 223 * CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped 224 * to EL3 by default. 225 */ 226 227 mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT)) 228 mrs x1, id_aa64dfr0_el1 229 ubfx x1, x1, #ID_AA64DFR0_TRACEVER_SHIFT, #ID_AA64DFR0_TRACEVER_LENGTH 230 cbz x1, 1f 231 orr x0, x0, #TTA_BIT 2321: 233 msr cptr_el3, x0 234 235 /* 236 * If Data Independent Timing (DIT) functionality is implemented, 237 * always enable DIT in EL3 238 */ 239 mrs x0, id_aa64pfr0_el1 240 ubfx x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH 241 cmp x0, #ID_AA64PFR0_DIT_SUPPORTED 242 bne 1f 243 mov x0, #DIT_BIT 244 msr DIT, x0 2451: 246 .endm 247 248/* ----------------------------------------------------------------------------- 249 * This is the super set of actions that need to be performed during a cold boot 250 * or a warm boot in EL3. This code is shared by BL1 and BL31. 251 * 252 * This macro will always perform reset handling, architectural initialisations 253 * and stack setup. The rest of the actions are optional because they might not 254 * be needed, depending on the context in which this macro is called. This is 255 * why this macro is parameterised ; each parameter allows to enable/disable 256 * some actions. 257 * 258 * _init_sctlr: 259 * Whether the macro needs to initialise SCTLR_EL3, including configuring 260 * the endianness of data accesses. 261 * 262 * _warm_boot_mailbox: 263 * Whether the macro needs to detect the type of boot (cold/warm). The 264 * detection is based on the platform entrypoint address : if it is zero 265 * then it is a cold boot, otherwise it is a warm boot. In the latter case, 266 * this macro jumps on the platform entrypoint address. 267 * 268 * _secondary_cold_boot: 269 * Whether the macro needs to identify the CPU that is calling it: primary 270 * CPU or secondary CPU. The primary CPU will be allowed to carry on with 271 * the platform initialisations, while the secondaries will be put in a 272 * platform-specific state in the meantime. 273 * 274 * If the caller knows this macro will only be called by the primary CPU 275 * then this parameter can be defined to 0 to skip this step. 276 * 277 * _init_memory: 278 * Whether the macro needs to initialise the memory. 279 * 280 * _init_c_runtime: 281 * Whether the macro needs to initialise the C runtime environment. 282 * 283 * _exception_vectors: 284 * Address of the exception vectors to program in the VBAR_EL3 register. 285 * 286 * _pie_fixup_size: 287 * Size of memory region to fixup Global Descriptor Table (GDT). 288 * 289 * A non-zero value is expected when firmware needs GDT to be fixed-up. 290 * 291 * ----------------------------------------------------------------------------- 292 */ 293 .macro el3_entrypoint_common \ 294 _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \ 295 _init_memory, _init_c_runtime, _exception_vectors, \ 296 _pie_fixup_size 297 298 .if \_init_sctlr 299 /* ------------------------------------------------------------- 300 * This is the initialisation of SCTLR_EL3 and so must ensure 301 * that all fields are explicitly set rather than relying on hw. 302 * Some fields reset to an IMPLEMENTATION DEFINED value and 303 * others are architecturally UNKNOWN on reset. 304 * 305 * SCTLR.EE: Set the CPU endianness before doing anything that 306 * might involve memory reads or writes. Set to zero to select 307 * Little Endian. 308 * 309 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can 310 * force all memory regions that are writeable to be treated as 311 * XN (Execute-never). Set to zero so that this control has no 312 * effect on memory access permissions. 313 * 314 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check. 315 * 316 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking. 317 * 318 * SCTLR.DSSBS: Set to zero to disable speculation store bypass 319 * safe behaviour upon exception entry to EL3. 320 * ------------------------------------------------------------- 321 */ 322 mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \ 323 | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT)) 324 msr sctlr_el3, x0 325 isb 326 .endif /* _init_sctlr */ 327 328#if DISABLE_MTPMU 329 bl mtpmu_disable 330#endif 331 332 .if \_warm_boot_mailbox 333 /* ------------------------------------------------------------- 334 * This code will be executed for both warm and cold resets. 335 * Now is the time to distinguish between the two. 336 * Query the platform entrypoint address and if it is not zero 337 * then it means it is a warm boot so jump to this address. 338 * ------------------------------------------------------------- 339 */ 340 bl plat_get_my_entrypoint 341 cbz x0, do_cold_boot 342 br x0 343 344 do_cold_boot: 345 .endif /* _warm_boot_mailbox */ 346 347 .if \_pie_fixup_size 348#if ENABLE_PIE 349 /* 350 * ------------------------------------------------------------ 351 * If PIE is enabled fixup the Global descriptor Table only 352 * once during primary core cold boot path. 353 * 354 * Compile time base address, required for fixup, is calculated 355 * using "pie_fixup" label present within first page. 356 * ------------------------------------------------------------ 357 */ 358 pie_fixup: 359 ldr x0, =pie_fixup 360 and x0, x0, #~(PAGE_SIZE_MASK) 361 mov_imm x1, \_pie_fixup_size 362 add x1, x1, x0 363 bl fixup_gdt_reloc 364#endif /* ENABLE_PIE */ 365 .endif /* _pie_fixup_size */ 366 367 /* --------------------------------------------------------------------- 368 * Set the exception vectors. 369 * --------------------------------------------------------------------- 370 */ 371 adr x0, \_exception_vectors 372 msr vbar_el3, x0 373 isb 374 375#if !(defined(IMAGE_BL2) && ENABLE_RME) 376 /* --------------------------------------------------------------------- 377 * It is a cold boot. 378 * Perform any processor specific actions upon reset e.g. cache, TLB 379 * invalidations etc. 380 * --------------------------------------------------------------------- 381 */ 382 bl reset_handler 383#endif 384 385 el3_arch_init_common 386 387 .if \_secondary_cold_boot 388 /* ------------------------------------------------------------- 389 * Check if this is a primary or secondary CPU cold boot. 390 * The primary CPU will set up the platform while the 391 * secondaries are placed in a platform-specific state until the 392 * primary CPU performs the necessary actions to bring them out 393 * of that state and allows entry into the OS. 394 * ------------------------------------------------------------- 395 */ 396 bl plat_is_my_cpu_primary 397 cbnz w0, do_primary_cold_boot 398 399 /* This is a cold boot on a secondary CPU */ 400 bl plat_secondary_cold_boot_setup 401 /* plat_secondary_cold_boot_setup() is not supposed to return */ 402 bl el3_panic 403 404 do_primary_cold_boot: 405 .endif /* _secondary_cold_boot */ 406 407 /* --------------------------------------------------------------------- 408 * Initialize memory now. Secondary CPU initialization won't get to this 409 * point. 410 * --------------------------------------------------------------------- 411 */ 412 413 .if \_init_memory 414 bl platform_mem_init 415 .endif /* _init_memory */ 416 417 /* --------------------------------------------------------------------- 418 * Init C runtime environment: 419 * - Zero-initialise the NOBITS sections. There are 2 of them: 420 * - the .bss section; 421 * - the coherent memory section (if any). 422 * - Relocate the data section from ROM to RAM, if required. 423 * --------------------------------------------------------------------- 424 */ 425 .if \_init_c_runtime 426#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \ 427 ((BL2_AT_EL3 && BL2_INV_DCACHE) || ENABLE_RME)) 428 /* ------------------------------------------------------------- 429 * Invalidate the RW memory used by the BL31 image. This 430 * includes the data and NOBITS sections. This is done to 431 * safeguard against possible corruption of this memory by 432 * dirty cache lines in a system cache as a result of use by 433 * an earlier boot loader stage. If PIE is enabled however, 434 * RO sections including the GOT may be modified during 435 * pie fixup. Therefore, to be on the safe side, invalidate 436 * the entire image region if PIE is enabled. 437 * ------------------------------------------------------------- 438 */ 439#if ENABLE_PIE 440#if SEPARATE_CODE_AND_RODATA 441 adrp x0, __TEXT_START__ 442 add x0, x0, :lo12:__TEXT_START__ 443#else 444 adrp x0, __RO_START__ 445 add x0, x0, :lo12:__RO_START__ 446#endif /* SEPARATE_CODE_AND_RODATA */ 447#else 448 adrp x0, __RW_START__ 449 add x0, x0, :lo12:__RW_START__ 450#endif /* ENABLE_PIE */ 451 adrp x1, __RW_END__ 452 add x1, x1, :lo12:__RW_END__ 453 sub x1, x1, x0 454 bl inv_dcache_range 455#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION 456 adrp x0, __NOBITS_START__ 457 add x0, x0, :lo12:__NOBITS_START__ 458 adrp x1, __NOBITS_END__ 459 add x1, x1, :lo12:__NOBITS_END__ 460 sub x1, x1, x0 461 bl inv_dcache_range 462#endif 463#endif 464 adrp x0, __BSS_START__ 465 add x0, x0, :lo12:__BSS_START__ 466 467 adrp x1, __BSS_END__ 468 add x1, x1, :lo12:__BSS_END__ 469 sub x1, x1, x0 470 bl zeromem 471 472#if USE_COHERENT_MEM 473 adrp x0, __COHERENT_RAM_START__ 474 add x0, x0, :lo12:__COHERENT_RAM_START__ 475 adrp x1, __COHERENT_RAM_END_UNALIGNED__ 476 add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__ 477 sub x1, x1, x0 478 bl zeromem 479#endif 480 481#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM) 482 adrp x0, __DATA_RAM_START__ 483 add x0, x0, :lo12:__DATA_RAM_START__ 484 adrp x1, __DATA_ROM_START__ 485 add x1, x1, :lo12:__DATA_ROM_START__ 486 adrp x2, __DATA_RAM_END__ 487 add x2, x2, :lo12:__DATA_RAM_END__ 488 sub x2, x2, x0 489 bl memcpy16 490#endif 491 .endif /* _init_c_runtime */ 492 493 /* --------------------------------------------------------------------- 494 * Use SP_EL0 for the C runtime stack. 495 * --------------------------------------------------------------------- 496 */ 497 msr spsel, #0 498 499 /* --------------------------------------------------------------------- 500 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when 501 * the MMU is enabled. There is no risk of reading stale stack memory 502 * after enabling the MMU as only the primary CPU is running at the 503 * moment. 504 * --------------------------------------------------------------------- 505 */ 506 bl plat_set_my_stack 507 508#if STACK_PROTECTOR_ENABLED 509 .if \_init_c_runtime 510 bl update_stack_protector_canary 511 .endif /* _init_c_runtime */ 512#endif 513 .endm 514 515 .macro apply_at_speculative_wa 516#if ERRATA_SPECULATIVE_AT 517 /* 518 * Explicitly save x30 so as to free up a register and to enable 519 * branching and also, save x29 which will be used in the called 520 * function 521 */ 522 stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 523 bl save_and_update_ptw_el1_sys_regs 524 ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 525#endif 526 .endm 527 528 .macro restore_ptw_el1_sys_regs 529#if ERRATA_SPECULATIVE_AT 530 /* ----------------------------------------------------------- 531 * In case of ERRATA_SPECULATIVE_AT, must follow below order 532 * to ensure that page table walk is not enabled until 533 * restoration of all EL1 system registers. TCR_EL1 register 534 * should be updated at the end which restores previous page 535 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB 536 * ensures that CPU does below steps in order. 537 * 538 * 1. Ensure all other system registers are written before 539 * updating SCTLR_EL1 using ISB. 540 * 2. Restore SCTLR_EL1 register. 541 * 3. Ensure SCTLR_EL1 written successfully using ISB. 542 * 4. Restore TCR_EL1 register. 543 * ----------------------------------------------------------- 544 */ 545 isb 546 ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1] 547 msr sctlr_el1, x28 548 isb 549 msr tcr_el1, x29 550#endif 551 .endm 552 553#endif /* EL3_COMMON_MACROS_S */ 554