1 /* 2 * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 #include <string.h> 10 11 #include <platform_def.h> 12 13 #include <arch.h> 14 #include <arch_helpers.h> 15 #include <arch_features.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/bl_common.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/pubsub_events.h> 21 #include <lib/extensions/amu.h> 22 #include <lib/extensions/mpam.h> 23 #include <lib/extensions/spe.h> 24 #include <lib/extensions/sve.h> 25 #include <lib/extensions/sys_reg_trace.h> 26 #include <lib/extensions/trbe.h> 27 #include <lib/extensions/trf.h> 28 #include <lib/extensions/twed.h> 29 #include <lib/utils.h> 30 31 static void enable_extensions_secure(cpu_context_t *ctx); 32 33 /******************************************************************************* 34 * Context management library initialisation routine. This library is used by 35 * runtime services to share pointers to 'cpu_context' structures for the secure 36 * and non-secure states. Management of the structures and their associated 37 * memory is not done by the context management library e.g. the PSCI service 38 * manages the cpu context used for entry from and exit to the non-secure state. 39 * The Secure payload dispatcher service manages the context(s) corresponding to 40 * the secure state. It also uses this library to get access to the non-secure 41 * state cpu context pointers. 42 * Lastly, this library provides the api to make SP_EL3 point to the cpu context 43 * which will used for programming an entry into a lower EL. The same context 44 * will used to save state upon exception entry from that EL. 45 ******************************************************************************/ 46 void __init cm_init(void) 47 { 48 /* 49 * The context management library has only global data to intialize, but 50 * that will be done when the BSS is zeroed out 51 */ 52 } 53 54 /******************************************************************************* 55 * The following function initializes the cpu_context 'ctx' for 56 * first use, and sets the initial entrypoint state as specified by the 57 * entry_point_info structure. 58 * 59 * The security state to initialize is determined by the SECURE attribute 60 * of the entry_point_info. 61 * 62 * The EE and ST attributes are used to configure the endianness and secure 63 * timer availability for the new execution context. 64 * 65 * To prepare the register state for entry call cm_prepare_el3_exit() and 66 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to 67 * cm_el1_sysregs_context_restore(). 68 ******************************************************************************/ 69 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 70 { 71 unsigned int security_state; 72 u_register_t scr_el3; 73 el3_state_t *state; 74 gp_regs_t *gp_regs; 75 u_register_t sctlr_elx, actlr_elx; 76 77 assert(ctx != NULL); 78 79 security_state = GET_SECURITY_STATE(ep->h.attr); 80 81 /* Clear any residual register values from the context */ 82 zeromem(ctx, sizeof(*ctx)); 83 84 /* 85 * SCR_EL3 was initialised during reset sequence in macro 86 * el3_arch_init_common. This code modifies the SCR_EL3 fields that 87 * affect the next EL. 88 * 89 * The following fields are initially set to zero and then updated to 90 * the required value depending on the state of the SPSR_EL3 and the 91 * Security state and entrypoint attributes of the next EL. 92 */ 93 scr_el3 = read_scr(); 94 scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | 95 SCR_ST_BIT | SCR_HCE_BIT); 96 /* 97 * SCR_NS: Set the security state of the next EL. 98 */ 99 if (security_state != SECURE) 100 scr_el3 |= SCR_NS_BIT; 101 /* 102 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 103 * Exception level as specified by SPSR. 104 */ 105 if (GET_RW(ep->spsr) == MODE_RW_64) 106 scr_el3 |= SCR_RW_BIT; 107 /* 108 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 109 * Secure timer registers to EL3, from AArch64 state only, if specified 110 * by the entrypoint attributes. 111 */ 112 if (EP_GET_ST(ep->h.attr) != 0U) 113 scr_el3 |= SCR_ST_BIT; 114 115 /* 116 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 117 * SCR_EL3.HXEn. 118 */ 119 #if ENABLE_FEAT_HCX 120 scr_el3 |= SCR_HXEn_BIT; 121 #endif 122 123 #if RAS_TRAP_LOWER_EL_ERR_ACCESS 124 /* 125 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 126 * and RAS ERX registers from EL1 and EL2 are trapped to EL3. 127 */ 128 scr_el3 |= SCR_TERR_BIT; 129 #endif 130 131 #if !HANDLE_EA_EL3_FIRST 132 /* 133 * SCR_EL3.EA: Do not route External Abort and SError Interrupt External 134 * to EL3 when executing at a lower EL. When executing at EL3, External 135 * Aborts are taken to EL3. 136 */ 137 scr_el3 &= ~SCR_EA_BIT; 138 #endif 139 140 #if FAULT_INJECTION_SUPPORT 141 /* Enable fault injection from lower ELs */ 142 scr_el3 |= SCR_FIEN_BIT; 143 #endif 144 145 #if !CTX_INCLUDE_PAUTH_REGS 146 /* 147 * If the pointer authentication registers aren't saved during world 148 * switches the value of the registers can be leaked from the Secure to 149 * the Non-secure world. To prevent this, rather than enabling pointer 150 * authentication everywhere, we only enable it in the Non-secure world. 151 * 152 * If the Secure world wants to use pointer authentication, 153 * CTX_INCLUDE_PAUTH_REGS must be set to 1. 154 */ 155 if (security_state == NON_SECURE) 156 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 157 #endif /* !CTX_INCLUDE_PAUTH_REGS */ 158 159 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS 160 /* Get Memory Tagging Extension support level */ 161 unsigned int mte = get_armv8_5_mte_support(); 162 #endif 163 /* 164 * Enable MTE support. Support is enabled unilaterally for the normal 165 * world, and only for the secure world when CTX_INCLUDE_MTE_REGS is 166 * set. 167 */ 168 #if CTX_INCLUDE_MTE_REGS 169 assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)); 170 scr_el3 |= SCR_ATA_BIT; 171 #else 172 /* 173 * When MTE is only implemented at EL0, it can be enabled 174 * across both worlds as no MTE registers are used. 175 */ 176 if ((mte == MTE_IMPLEMENTED_EL0) || 177 /* 178 * When MTE is implemented at all ELs, it can be only enabled 179 * in Non-Secure world without register saving. 180 */ 181 (((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)) && 182 (security_state == NON_SECURE))) { 183 scr_el3 |= SCR_ATA_BIT; 184 } 185 #endif /* CTX_INCLUDE_MTE_REGS */ 186 187 #ifdef IMAGE_BL31 188 /* 189 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 190 * indicated by the interrupt routing model for BL31. 191 */ 192 scr_el3 |= get_scr_el3_from_routing_model(security_state); 193 #endif 194 195 /* Save the initialized value of CPTR_EL3 register */ 196 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3()); 197 if (security_state == SECURE) { 198 enable_extensions_secure(ctx); 199 } 200 201 /* 202 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 203 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 204 * next mode is Hyp. 205 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 206 * same conditions as HVC instructions and when the processor supports 207 * ARMv8.6-FGT. 208 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 209 * CNTPOFF_EL2 register under the same conditions as HVC instructions 210 * and when the processor supports ECV. 211 */ 212 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 213 || ((GET_RW(ep->spsr) != MODE_RW_64) 214 && (GET_M32(ep->spsr) == MODE32_hyp))) { 215 scr_el3 |= SCR_HCE_BIT; 216 217 if (is_armv8_6_fgt_present()) { 218 scr_el3 |= SCR_FGTEN_BIT; 219 } 220 221 if (get_armv8_6_ecv_support() 222 == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { 223 scr_el3 |= SCR_ECVEN_BIT; 224 } 225 } 226 227 /* Enable S-EL2 if the next EL is EL2 and security state is secure */ 228 if ((security_state == SECURE) && (GET_EL(ep->spsr) == MODE_EL2)) { 229 if (GET_RW(ep->spsr) != MODE_RW_64) { 230 ERROR("S-EL2 can not be used in AArch32."); 231 panic(); 232 } 233 234 scr_el3 |= SCR_EEL2_BIT; 235 } 236 237 /* 238 * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3 239 * and EL2, when clear, this bit traps accesses from EL2 so we set it 240 * to 1 when EL2 is present. 241 */ 242 if (is_armv8_6_feat_amuv1p1_present() && 243 (el_implemented(2) != EL_IMPL_NONE)) { 244 scr_el3 |= SCR_AMVOFFEN_BIT; 245 } 246 247 /* 248 * Initialise SCTLR_EL1 to the reset value corresponding to the target 249 * execution state setting all fields rather than relying of the hw. 250 * Some fields have architecturally UNKNOWN reset values and these are 251 * set to zero. 252 * 253 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 254 * 255 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 256 * required by PSCI specification) 257 */ 258 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; 259 if (GET_RW(ep->spsr) == MODE_RW_64) 260 sctlr_elx |= SCTLR_EL1_RES1; 261 else { 262 /* 263 * If the target execution state is AArch32 then the following 264 * fields need to be set. 265 * 266 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 267 * instructions are not trapped to EL1. 268 * 269 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 270 * instructions are not trapped to EL1. 271 * 272 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 273 * CP15DMB, CP15DSB, and CP15ISB instructions. 274 */ 275 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 276 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 277 } 278 279 #if ERRATA_A75_764081 280 /* 281 * If workaround of errata 764081 for Cortex-A75 is used then set 282 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 283 */ 284 sctlr_elx |= SCTLR_IESB_BIT; 285 #endif 286 287 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 288 if (is_armv8_6_twed_present()) { 289 uint32_t delay = plat_arm_set_twedel_scr_el3(); 290 291 if (delay != TWED_DISABLED) { 292 /* Make sure delay value fits */ 293 assert((delay & ~SCR_TWEDEL_MASK) == 0U); 294 295 /* Set delay in SCR_EL3 */ 296 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 297 scr_el3 |= ((delay & SCR_TWEDEL_MASK) 298 << SCR_TWEDEL_SHIFT); 299 300 /* Enable WFE delay */ 301 scr_el3 |= SCR_TWEDEn_BIT; 302 } 303 } 304 305 /* 306 * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2 307 * and other EL2 registers are set up by cm_prepare_el3_exit() as they 308 * are not part of the stored cpu_context. 309 */ 310 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); 311 312 /* 313 * Base the context ACTLR_EL1 on the current value, as it is 314 * implementation defined. The context restore process will write 315 * the value from the context to the actual register and can cause 316 * problems for processor cores that don't expect certain bits to 317 * be zero. 318 */ 319 actlr_elx = read_actlr_el1(); 320 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 321 322 /* 323 * Populate EL3 state so that we've the right context 324 * before doing ERET 325 */ 326 state = get_el3state_ctx(ctx); 327 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 328 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 329 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 330 331 /* 332 * Store the X0-X7 value from the entrypoint into the context 333 * Use memcpy as we are in control of the layout of the structures 334 */ 335 gp_regs = get_gpregs_ctx(ctx); 336 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 337 } 338 339 /******************************************************************************* 340 * Enable architecture extensions on first entry to Non-secure world. 341 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise 342 * it is zero. 343 ******************************************************************************/ 344 static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx) 345 { 346 #if IMAGE_BL31 347 #if ENABLE_SPE_FOR_LOWER_ELS 348 spe_enable(el2_unused); 349 #endif 350 351 #if ENABLE_AMU 352 amu_enable(el2_unused, ctx); 353 #endif 354 355 #if ENABLE_SVE_FOR_NS 356 sve_enable(ctx); 357 #endif 358 359 #if ENABLE_MPAM_FOR_LOWER_ELS 360 mpam_enable(el2_unused); 361 #endif 362 363 #if ENABLE_TRBE_FOR_NS 364 trbe_enable(); 365 #endif /* ENABLE_TRBE_FOR_NS */ 366 367 #if ENABLE_SYS_REG_TRACE_FOR_NS 368 sys_reg_trace_enable(ctx); 369 #endif /* ENABLE_SYS_REG_TRACE_FOR_NS */ 370 371 #if ENABLE_TRF_FOR_NS 372 trf_enable(); 373 #endif /* ENABLE_TRF_FOR_NS */ 374 375 #endif 376 } 377 378 /******************************************************************************* 379 * Enable architecture extensions on first entry to Secure world. 380 ******************************************************************************/ 381 static void enable_extensions_secure(cpu_context_t *ctx) 382 { 383 #if IMAGE_BL31 384 #if ENABLE_SVE_FOR_SWD 385 sve_enable(ctx); 386 #endif 387 #endif 388 } 389 390 /******************************************************************************* 391 * The following function initializes the cpu_context for a CPU specified by 392 * its `cpu_idx` for first use, and sets the initial entrypoint state as 393 * specified by the entry_point_info structure. 394 ******************************************************************************/ 395 void cm_init_context_by_index(unsigned int cpu_idx, 396 const entry_point_info_t *ep) 397 { 398 cpu_context_t *ctx; 399 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 400 cm_setup_context(ctx, ep); 401 } 402 403 /******************************************************************************* 404 * The following function initializes the cpu_context for the current CPU 405 * for first use, and sets the initial entrypoint state as specified by the 406 * entry_point_info structure. 407 ******************************************************************************/ 408 void cm_init_my_context(const entry_point_info_t *ep) 409 { 410 cpu_context_t *ctx; 411 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 412 cm_setup_context(ctx, ep); 413 } 414 415 /******************************************************************************* 416 * Prepare the CPU system registers for first entry into secure or normal world 417 * 418 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 419 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 420 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 421 * For all entries, the EL1 registers are initialized from the cpu_context 422 ******************************************************************************/ 423 void cm_prepare_el3_exit(uint32_t security_state) 424 { 425 u_register_t sctlr_elx, scr_el3, mdcr_el2; 426 cpu_context_t *ctx = cm_get_context(security_state); 427 bool el2_unused = false; 428 uint64_t hcr_el2 = 0U; 429 430 assert(ctx != NULL); 431 432 if (security_state == NON_SECURE) { 433 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 434 CTX_SCR_EL3); 435 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 436 /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ 437 sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), 438 CTX_SCTLR_EL1); 439 sctlr_elx &= SCTLR_EE_BIT; 440 sctlr_elx |= SCTLR_EL2_RES1; 441 #if ERRATA_A75_764081 442 /* 443 * If workaround of errata 764081 for Cortex-A75 is used 444 * then set SCTLR_EL2.IESB to enable Implicit Error 445 * Synchronization Barrier. 446 */ 447 sctlr_elx |= SCTLR_IESB_BIT; 448 #endif 449 write_sctlr_el2(sctlr_elx); 450 } else if (el_implemented(2) != EL_IMPL_NONE) { 451 el2_unused = true; 452 453 /* 454 * EL2 present but unused, need to disable safely. 455 * SCTLR_EL2 can be ignored in this case. 456 * 457 * Set EL2 register width appropriately: Set HCR_EL2 458 * field to match SCR_EL3.RW. 459 */ 460 if ((scr_el3 & SCR_RW_BIT) != 0U) 461 hcr_el2 |= HCR_RW_BIT; 462 463 /* 464 * For Armv8.3 pointer authentication feature, disable 465 * traps to EL2 when accessing key registers or using 466 * pointer authentication instructions from lower ELs. 467 */ 468 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); 469 470 write_hcr_el2(hcr_el2); 471 472 /* 473 * Initialise CPTR_EL2 setting all fields rather than 474 * relying on the hw. All fields have architecturally 475 * UNKNOWN reset values. 476 * 477 * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1 478 * accesses to the CPACR_EL1 or CPACR from both 479 * Execution states do not trap to EL2. 480 * 481 * CPTR_EL2.TTA: Set to zero so that Non-secure System 482 * register accesses to the trace registers from both 483 * Execution states do not trap to EL2. 484 * If PE trace unit System registers are not implemented 485 * then this bit is reserved, and must be set to zero. 486 * 487 * CPTR_EL2.TFP: Set to zero so that Non-secure accesses 488 * to SIMD and floating-point functionality from both 489 * Execution states do not trap to EL2. 490 */ 491 write_cptr_el2(CPTR_EL2_RESET_VAL & 492 ~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT 493 | CPTR_EL2_TFP_BIT)); 494 495 /* 496 * Initialise CNTHCTL_EL2. All fields are 497 * architecturally UNKNOWN on reset and are set to zero 498 * except for field(s) listed below. 499 * 500 * CNTHCTL_EL2.EL1PCEN: Set to one to disable traps to 501 * Hyp mode of Non-secure EL0 and EL1 accesses to the 502 * physical timer registers. 503 * 504 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to 505 * Hyp mode of Non-secure EL0 and EL1 accesses to the 506 * physical counter registers. 507 */ 508 write_cnthctl_el2(CNTHCTL_RESET_VAL | 509 EL1PCEN_BIT | EL1PCTEN_BIT); 510 511 /* 512 * Initialise CNTVOFF_EL2 to zero as it resets to an 513 * architecturally UNKNOWN value. 514 */ 515 write_cntvoff_el2(0); 516 517 /* 518 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and 519 * MPIDR_EL1 respectively. 520 */ 521 write_vpidr_el2(read_midr_el1()); 522 write_vmpidr_el2(read_mpidr_el1()); 523 524 /* 525 * Initialise VTTBR_EL2. All fields are architecturally 526 * UNKNOWN on reset. 527 * 528 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 529 * 2 address translation is disabled, cache maintenance 530 * operations depend on the VMID. 531 * 532 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address 533 * translation is disabled. 534 */ 535 write_vttbr_el2(VTTBR_RESET_VAL & 536 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) 537 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 538 539 /* 540 * Initialise MDCR_EL2, setting all fields rather than 541 * relying on hw. Some fields are architecturally 542 * UNKNOWN on reset. 543 * 544 * MDCR_EL2.HLP: Set to one so that event counter 545 * overflow, that is recorded in PMOVSCLR_EL0[0-30], 546 * occurs on the increment that changes 547 * PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is 548 * implemented. This bit is RES0 in versions of the 549 * architecture earlier than ARMv8.5, setting it to 1 550 * doesn't have any effect on them. 551 * 552 * MDCR_EL2.TTRF: Set to zero so that access to Trace 553 * Filter Control register TRFCR_EL1 at EL1 is not 554 * trapped to EL2. This bit is RES0 in versions of 555 * the architecture earlier than ARMv8.4. 556 * 557 * MDCR_EL2.HPMD: Set to one so that event counting is 558 * prohibited at EL2. This bit is RES0 in versions of 559 * the architecture earlier than ARMv8.1, setting it 560 * to 1 doesn't have any effect on them. 561 * 562 * MDCR_EL2.TPMS: Set to zero so that accesses to 563 * Statistical Profiling control registers from EL1 564 * do not trap to EL2. This bit is RES0 when SPE is 565 * not implemented. 566 * 567 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and 568 * EL1 System register accesses to the Debug ROM 569 * registers are not trapped to EL2. 570 * 571 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 572 * System register accesses to the powerdown debug 573 * registers are not trapped to EL2. 574 * 575 * MDCR_EL2.TDA: Set to zero so that System register 576 * accesses to the debug registers do not trap to EL2. 577 * 578 * MDCR_EL2.TDE: Set to zero so that debug exceptions 579 * are not routed to EL2. 580 * 581 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance 582 * Monitors. 583 * 584 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and 585 * EL1 accesses to all Performance Monitors registers 586 * are not trapped to EL2. 587 * 588 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0 589 * and EL1 accesses to the PMCR_EL0 or PMCR are not 590 * trapped to EL2. 591 * 592 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the 593 * architecturally-defined reset value. 594 * 595 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer 596 * owning exception level is NS-EL1 and, tracing is 597 * prohibited at NS-EL2. These bits are RES0 when 598 * FEAT_TRBE is not implemented. 599 */ 600 mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP | 601 MDCR_EL2_HPMD) | 602 ((read_pmcr_el0() & PMCR_EL0_N_BITS) 603 >> PMCR_EL0_N_SHIFT)) & 604 ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS | 605 MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | 606 MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT | 607 MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | 608 MDCR_EL2_TPMCR_BIT | 609 MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)); 610 611 write_mdcr_el2(mdcr_el2); 612 613 /* 614 * Initialise HSTR_EL2. All fields are architecturally 615 * UNKNOWN on reset. 616 * 617 * HSTR_EL2.T<n>: Set all these fields to zero so that 618 * Non-secure EL0 or EL1 accesses to System registers 619 * do not trap to EL2. 620 */ 621 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 622 /* 623 * Initialise CNTHP_CTL_EL2. All fields are 624 * architecturally UNKNOWN on reset. 625 * 626 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 627 * physical timer and prevent timer interrupts. 628 */ 629 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & 630 ~(CNTHP_CTL_ENABLE_BIT)); 631 } 632 enable_extensions_nonsecure(el2_unused, ctx); 633 } 634 635 cm_el1_sysregs_context_restore(security_state); 636 cm_set_next_eret_context(security_state); 637 } 638 639 #if CTX_INCLUDE_EL2_REGS 640 /******************************************************************************* 641 * Save EL2 sysreg context 642 ******************************************************************************/ 643 void cm_el2_sysregs_context_save(uint32_t security_state) 644 { 645 u_register_t scr_el3 = read_scr(); 646 647 /* 648 * Always save the non-secure EL2 context, only save the 649 * S-EL2 context if S-EL2 is enabled. 650 */ 651 if ((security_state == NON_SECURE) || 652 ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { 653 cpu_context_t *ctx; 654 655 ctx = cm_get_context(security_state); 656 assert(ctx != NULL); 657 658 el2_sysregs_context_save(get_el2_sysregs_ctx(ctx)); 659 } 660 } 661 662 /******************************************************************************* 663 * Restore EL2 sysreg context 664 ******************************************************************************/ 665 void cm_el2_sysregs_context_restore(uint32_t security_state) 666 { 667 u_register_t scr_el3 = read_scr(); 668 669 /* 670 * Always restore the non-secure EL2 context, only restore the 671 * S-EL2 context if S-EL2 is enabled. 672 */ 673 if ((security_state == NON_SECURE) || 674 ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { 675 cpu_context_t *ctx; 676 677 ctx = cm_get_context(security_state); 678 assert(ctx != NULL); 679 680 el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx)); 681 } 682 } 683 #endif /* CTX_INCLUDE_EL2_REGS */ 684 685 /******************************************************************************* 686 * The next four functions are used by runtime services to save and restore 687 * EL1 context on the 'cpu_context' structure for the specified security 688 * state. 689 ******************************************************************************/ 690 void cm_el1_sysregs_context_save(uint32_t security_state) 691 { 692 cpu_context_t *ctx; 693 694 ctx = cm_get_context(security_state); 695 assert(ctx != NULL); 696 697 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 698 699 #if IMAGE_BL31 700 if (security_state == SECURE) 701 PUBLISH_EVENT(cm_exited_secure_world); 702 else 703 PUBLISH_EVENT(cm_exited_normal_world); 704 #endif 705 } 706 707 void cm_el1_sysregs_context_restore(uint32_t security_state) 708 { 709 cpu_context_t *ctx; 710 711 ctx = cm_get_context(security_state); 712 assert(ctx != NULL); 713 714 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 715 716 #if IMAGE_BL31 717 if (security_state == SECURE) 718 PUBLISH_EVENT(cm_entering_secure_world); 719 else 720 PUBLISH_EVENT(cm_entering_normal_world); 721 #endif 722 } 723 724 /******************************************************************************* 725 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 726 * given security state with the given entrypoint 727 ******************************************************************************/ 728 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 729 { 730 cpu_context_t *ctx; 731 el3_state_t *state; 732 733 ctx = cm_get_context(security_state); 734 assert(ctx != NULL); 735 736 /* Populate EL3 state so that ERET jumps to the correct entry */ 737 state = get_el3state_ctx(ctx); 738 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 739 } 740 741 /******************************************************************************* 742 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 743 * pertaining to the given security state 744 ******************************************************************************/ 745 void cm_set_elr_spsr_el3(uint32_t security_state, 746 uintptr_t entrypoint, uint32_t spsr) 747 { 748 cpu_context_t *ctx; 749 el3_state_t *state; 750 751 ctx = cm_get_context(security_state); 752 assert(ctx != NULL); 753 754 /* Populate EL3 state so that ERET jumps to the correct entry */ 755 state = get_el3state_ctx(ctx); 756 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 757 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 758 } 759 760 /******************************************************************************* 761 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 762 * pertaining to the given security state using the value and bit position 763 * specified in the parameters. It preserves all other bits. 764 ******************************************************************************/ 765 void cm_write_scr_el3_bit(uint32_t security_state, 766 uint32_t bit_pos, 767 uint32_t value) 768 { 769 cpu_context_t *ctx; 770 el3_state_t *state; 771 u_register_t scr_el3; 772 773 ctx = cm_get_context(security_state); 774 assert(ctx != NULL); 775 776 /* Ensure that the bit position is a valid one */ 777 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 778 779 /* Ensure that the 'value' is only a bit wide */ 780 assert(value <= 1U); 781 782 /* 783 * Get the SCR_EL3 value from the cpu context, clear the desired bit 784 * and set it to its new value. 785 */ 786 state = get_el3state_ctx(ctx); 787 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 788 scr_el3 &= ~(1UL << bit_pos); 789 scr_el3 |= (u_register_t)value << bit_pos; 790 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 791 } 792 793 /******************************************************************************* 794 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 795 * given security state. 796 ******************************************************************************/ 797 u_register_t cm_get_scr_el3(uint32_t security_state) 798 { 799 cpu_context_t *ctx; 800 el3_state_t *state; 801 802 ctx = cm_get_context(security_state); 803 assert(ctx != NULL); 804 805 /* Populate EL3 state so that ERET jumps to the correct entry */ 806 state = get_el3state_ctx(ctx); 807 return read_ctx_reg(state, CTX_SCR_EL3); 808 } 809 810 /******************************************************************************* 811 * This function is used to program the context that's used for exception 812 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 813 * the required security state 814 ******************************************************************************/ 815 void cm_set_next_eret_context(uint32_t security_state) 816 { 817 cpu_context_t *ctx; 818 819 ctx = cm_get_context(security_state); 820 assert(ctx != NULL); 821 822 cm_set_next_context(ctx); 823 } 824