1 /* 2 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 #include <string.h> 10 11 #include <platform_def.h> 12 13 #include <arch.h> 14 #include <arch_helpers.h> 15 #include <arch_features.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/bl_common.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/pubsub_events.h> 21 #include <lib/extensions/amu.h> 22 #include <lib/extensions/mpam.h> 23 #include <lib/extensions/sme.h> 24 #include <lib/extensions/spe.h> 25 #include <lib/extensions/sve.h> 26 #include <lib/extensions/sys_reg_trace.h> 27 #include <lib/extensions/trbe.h> 28 #include <lib/extensions/trf.h> 29 #include <lib/extensions/twed.h> 30 #include <lib/utils.h> 31 32 static void manage_extensions_secure(cpu_context_t *ctx); 33 34 /****************************************************************************** 35 * This function performs initializations that are specific to SECURE state 36 * and updates the cpu context specified by 'ctx'. 37 *****************************************************************************/ 38 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 39 { 40 u_register_t scr_el3; 41 el3_state_t *state; 42 43 state = get_el3state_ctx(ctx); 44 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 45 46 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 47 /* 48 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 49 * indicated by the interrupt routing model for BL31. 50 */ 51 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 52 #endif 53 54 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS 55 /* Get Memory Tagging Extension support level */ 56 unsigned int mte = get_armv8_5_mte_support(); 57 #endif 58 /* 59 * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS 60 * is set, or when MTE is only implemented at EL0. 61 */ 62 #if CTX_INCLUDE_MTE_REGS 63 assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)); 64 scr_el3 |= SCR_ATA_BIT; 65 #else 66 if (mte == MTE_IMPLEMENTED_EL0) { 67 scr_el3 |= SCR_ATA_BIT; 68 } 69 #endif /* CTX_INCLUDE_MTE_REGS */ 70 71 /* Enable S-EL2 if the next EL is EL2 and S-EL2 is present */ 72 if ((GET_EL(ep->spsr) == MODE_EL2) && is_armv8_4_sel2_present()) { 73 if (GET_RW(ep->spsr) != MODE_RW_64) { 74 ERROR("S-EL2 can not be used in AArch32\n."); 75 panic(); 76 } 77 78 scr_el3 |= SCR_EEL2_BIT; 79 } 80 81 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 82 83 manage_extensions_secure(ctx); 84 } 85 86 #if ENABLE_RME 87 /****************************************************************************** 88 * This function performs initializations that are specific to REALM state 89 * and updates the cpu context specified by 'ctx'. 90 *****************************************************************************/ 91 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 92 { 93 u_register_t scr_el3; 94 el3_state_t *state; 95 96 state = get_el3state_ctx(ctx); 97 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 98 99 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT; 100 101 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 102 } 103 #endif /* ENABLE_RME */ 104 105 /****************************************************************************** 106 * This function performs initializations that are specific to NON-SECURE state 107 * and updates the cpu context specified by 'ctx'. 108 *****************************************************************************/ 109 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 110 { 111 u_register_t scr_el3; 112 el3_state_t *state; 113 114 state = get_el3state_ctx(ctx); 115 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 116 117 /* SCR_NS: Set the NS bit */ 118 scr_el3 |= SCR_NS_BIT; 119 120 #if !CTX_INCLUDE_PAUTH_REGS 121 /* 122 * If the pointer authentication registers aren't saved during world 123 * switches the value of the registers can be leaked from the Secure to 124 * the Non-secure world. To prevent this, rather than enabling pointer 125 * authentication everywhere, we only enable it in the Non-secure world. 126 * 127 * If the Secure world wants to use pointer authentication, 128 * CTX_INCLUDE_PAUTH_REGS must be set to 1. 129 */ 130 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 131 #endif /* !CTX_INCLUDE_PAUTH_REGS */ 132 133 /* Allow access to Allocation Tags when MTE is implemented. */ 134 scr_el3 |= SCR_ATA_BIT; 135 136 #ifdef IMAGE_BL31 137 /* 138 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 139 * indicated by the interrupt routing model for BL31. 140 */ 141 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 142 #endif 143 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 144 } 145 146 /******************************************************************************* 147 * The following function performs initialization of the cpu_context 'ctx' 148 * for first use that is common to all security states, and sets the 149 * initial entrypoint state as specified by the entry_point_info structure. 150 * 151 * The EE and ST attributes are used to configure the endianness and secure 152 * timer availability for the new execution context. 153 ******************************************************************************/ 154 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 155 { 156 u_register_t scr_el3; 157 el3_state_t *state; 158 gp_regs_t *gp_regs; 159 u_register_t sctlr_elx, actlr_elx; 160 161 /* Clear any residual register values from the context */ 162 zeromem(ctx, sizeof(*ctx)); 163 164 /* 165 * SCR_EL3 was initialised during reset sequence in macro 166 * el3_arch_init_common. This code modifies the SCR_EL3 fields that 167 * affect the next EL. 168 * 169 * The following fields are initially set to zero and then updated to 170 * the required value depending on the state of the SPSR_EL3 and the 171 * Security state and entrypoint attributes of the next EL. 172 */ 173 scr_el3 = read_scr(); 174 scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | 175 SCR_ST_BIT | SCR_HCE_BIT | SCR_NSE_BIT); 176 177 /* 178 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 179 * Exception level as specified by SPSR. 180 */ 181 if (GET_RW(ep->spsr) == MODE_RW_64) { 182 scr_el3 |= SCR_RW_BIT; 183 } 184 185 /* 186 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 187 * Secure timer registers to EL3, from AArch64 state only, if specified 188 * by the entrypoint attributes. 189 */ 190 if (EP_GET_ST(ep->h.attr) != 0U) { 191 scr_el3 |= SCR_ST_BIT; 192 } 193 194 /* 195 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 196 * SCR_EL3.HXEn. 197 */ 198 #if ENABLE_FEAT_HCX 199 scr_el3 |= SCR_HXEn_BIT; 200 #endif 201 202 #if RAS_TRAP_LOWER_EL_ERR_ACCESS 203 /* 204 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 205 * and RAS ERX registers from EL1 and EL2 are trapped to EL3. 206 */ 207 scr_el3 |= SCR_TERR_BIT; 208 #endif 209 210 #if !HANDLE_EA_EL3_FIRST 211 /* 212 * SCR_EL3.EA: Do not route External Abort and SError Interrupt External 213 * to EL3 when executing at a lower EL. When executing at EL3, External 214 * Aborts are taken to EL3. 215 */ 216 scr_el3 &= ~SCR_EA_BIT; 217 #endif 218 219 #if FAULT_INJECTION_SUPPORT 220 /* Enable fault injection from lower ELs */ 221 scr_el3 |= SCR_FIEN_BIT; 222 #endif 223 224 /* 225 * CPTR_EL3 was initialized out of reset, copy that value to the 226 * context register. 227 */ 228 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3()); 229 230 /* 231 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 232 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 233 * next mode is Hyp. 234 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 235 * same conditions as HVC instructions and when the processor supports 236 * ARMv8.6-FGT. 237 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 238 * CNTPOFF_EL2 register under the same conditions as HVC instructions 239 * and when the processor supports ECV. 240 */ 241 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 242 || ((GET_RW(ep->spsr) != MODE_RW_64) 243 && (GET_M32(ep->spsr) == MODE32_hyp))) { 244 scr_el3 |= SCR_HCE_BIT; 245 246 if (is_armv8_6_fgt_present()) { 247 scr_el3 |= SCR_FGTEN_BIT; 248 } 249 250 if (get_armv8_6_ecv_support() 251 == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { 252 scr_el3 |= SCR_ECVEN_BIT; 253 } 254 } 255 256 /* 257 * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3 258 * and EL2, when clear, this bit traps accesses from EL2 so we set it 259 * to 1 when EL2 is present. 260 */ 261 if (is_armv8_6_feat_amuv1p1_present() && 262 (el_implemented(2) != EL_IMPL_NONE)) { 263 scr_el3 |= SCR_AMVOFFEN_BIT; 264 } 265 266 /* 267 * Initialise SCTLR_EL1 to the reset value corresponding to the target 268 * execution state setting all fields rather than relying of the hw. 269 * Some fields have architecturally UNKNOWN reset values and these are 270 * set to zero. 271 * 272 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 273 * 274 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 275 * required by PSCI specification) 276 */ 277 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; 278 if (GET_RW(ep->spsr) == MODE_RW_64) { 279 sctlr_elx |= SCTLR_EL1_RES1; 280 } else { 281 /* 282 * If the target execution state is AArch32 then the following 283 * fields need to be set. 284 * 285 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 286 * instructions are not trapped to EL1. 287 * 288 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 289 * instructions are not trapped to EL1. 290 * 291 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 292 * CP15DMB, CP15DSB, and CP15ISB instructions. 293 */ 294 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 295 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 296 } 297 298 #if ERRATA_A75_764081 299 /* 300 * If workaround of errata 764081 for Cortex-A75 is used then set 301 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 302 */ 303 sctlr_elx |= SCTLR_IESB_BIT; 304 #endif 305 306 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 307 if (is_armv8_6_twed_present()) { 308 uint32_t delay = plat_arm_set_twedel_scr_el3(); 309 310 if (delay != TWED_DISABLED) { 311 /* Make sure delay value fits */ 312 assert((delay & ~SCR_TWEDEL_MASK) == 0U); 313 314 /* Set delay in SCR_EL3 */ 315 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 316 scr_el3 |= ((delay & SCR_TWEDEL_MASK) 317 << SCR_TWEDEL_SHIFT); 318 319 /* Enable WFE delay */ 320 scr_el3 |= SCR_TWEDEn_BIT; 321 } 322 } 323 324 /* 325 * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2 326 * and other EL2 registers are set up by cm_prepare_el3_exit() as they 327 * are not part of the stored cpu_context. 328 */ 329 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); 330 331 /* 332 * Base the context ACTLR_EL1 on the current value, as it is 333 * implementation defined. The context restore process will write 334 * the value from the context to the actual register and can cause 335 * problems for processor cores that don't expect certain bits to 336 * be zero. 337 */ 338 actlr_elx = read_actlr_el1(); 339 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 340 341 /* 342 * Populate EL3 state so that we've the right context 343 * before doing ERET 344 */ 345 state = get_el3state_ctx(ctx); 346 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 347 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 348 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 349 350 /* 351 * Store the X0-X7 value from the entrypoint into the context 352 * Use memcpy as we are in control of the layout of the structures 353 */ 354 gp_regs = get_gpregs_ctx(ctx); 355 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 356 } 357 358 /******************************************************************************* 359 * Context management library initialization routine. This library is used by 360 * runtime services to share pointers to 'cpu_context' structures for secure 361 * non-secure and realm states. Management of the structures and their associated 362 * memory is not done by the context management library e.g. the PSCI service 363 * manages the cpu context used for entry from and exit to the non-secure state. 364 * The Secure payload dispatcher service manages the context(s) corresponding to 365 * the secure state. It also uses this library to get access to the non-secure 366 * state cpu context pointers. 367 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 368 * which will be used for programming an entry into a lower EL. The same context 369 * will be used to save state upon exception entry from that EL. 370 ******************************************************************************/ 371 void __init cm_init(void) 372 { 373 /* 374 * The context management library has only global data to intialize, but 375 * that will be done when the BSS is zeroed out. 376 */ 377 } 378 379 /******************************************************************************* 380 * This is the high-level function used to initialize the cpu_context 'ctx' for 381 * first use. It performs initializations that are common to all security states 382 * and initializations specific to the security state specified in 'ep' 383 ******************************************************************************/ 384 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 385 { 386 unsigned int security_state; 387 388 assert(ctx != NULL); 389 390 /* 391 * Perform initializations that are common 392 * to all security states 393 */ 394 setup_context_common(ctx, ep); 395 396 security_state = GET_SECURITY_STATE(ep->h.attr); 397 398 /* Perform security state specific initializations */ 399 switch (security_state) { 400 case SECURE: 401 setup_secure_context(ctx, ep); 402 break; 403 #if ENABLE_RME 404 case REALM: 405 setup_realm_context(ctx, ep); 406 break; 407 #endif 408 case NON_SECURE: 409 setup_ns_context(ctx, ep); 410 break; 411 default: 412 ERROR("Invalid security state\n"); 413 panic(); 414 break; 415 } 416 } 417 418 /******************************************************************************* 419 * Enable architecture extensions on first entry to Non-secure world. 420 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise 421 * it is zero. 422 ******************************************************************************/ 423 static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx) 424 { 425 #if IMAGE_BL31 426 #if ENABLE_SPE_FOR_LOWER_ELS 427 spe_enable(el2_unused); 428 #endif 429 430 #if ENABLE_AMU 431 amu_enable(el2_unused, ctx); 432 #endif 433 434 #if ENABLE_SME_FOR_NS 435 /* Enable SME, SVE, and FPU/SIMD for non-secure world. */ 436 sme_enable(ctx); 437 #elif ENABLE_SVE_FOR_NS 438 /* Enable SVE and FPU/SIMD for non-secure world. */ 439 sve_enable(ctx); 440 #endif 441 442 #if ENABLE_MPAM_FOR_LOWER_ELS 443 mpam_enable(el2_unused); 444 #endif 445 446 #if ENABLE_TRBE_FOR_NS 447 trbe_enable(); 448 #endif /* ENABLE_TRBE_FOR_NS */ 449 450 #if ENABLE_SYS_REG_TRACE_FOR_NS 451 sys_reg_trace_enable(ctx); 452 #endif /* ENABLE_SYS_REG_TRACE_FOR_NS */ 453 454 #if ENABLE_TRF_FOR_NS 455 trf_enable(); 456 #endif /* ENABLE_TRF_FOR_NS */ 457 #endif 458 } 459 460 /******************************************************************************* 461 * Enable architecture extensions on first entry to Secure world. 462 ******************************************************************************/ 463 static void manage_extensions_secure(cpu_context_t *ctx) 464 { 465 #if IMAGE_BL31 466 #if ENABLE_SME_FOR_NS 467 #if ENABLE_SME_FOR_SWD 468 /* 469 * Enable SME, SVE, FPU/SIMD in secure context, secure manager must 470 * ensure SME, SVE, and FPU/SIMD context properly managed. 471 */ 472 sme_enable(ctx); 473 #else /* ENABLE_SME_FOR_SWD */ 474 /* 475 * Disable SME, SVE, FPU/SIMD in secure context so non-secure world can 476 * safely use the associated registers. 477 */ 478 sme_disable(ctx); 479 #endif /* ENABLE_SME_FOR_SWD */ 480 #elif ENABLE_SVE_FOR_NS 481 #if ENABLE_SVE_FOR_SWD 482 /* 483 * Enable SVE and FPU in secure context, secure manager must ensure that 484 * the SVE and FPU register contexts are properly managed. 485 */ 486 sve_enable(ctx); 487 #else /* ENABLE_SVE_FOR_SWD */ 488 /* 489 * Disable SVE and FPU in secure context so non-secure world can safely 490 * use them. 491 */ 492 sve_disable(ctx); 493 #endif /* ENABLE_SVE_FOR_SWD */ 494 #endif /* ENABLE_SVE_FOR_NS */ 495 #endif /* IMAGE_BL31 */ 496 } 497 498 /******************************************************************************* 499 * The following function initializes the cpu_context for a CPU specified by 500 * its `cpu_idx` for first use, and sets the initial entrypoint state as 501 * specified by the entry_point_info structure. 502 ******************************************************************************/ 503 void cm_init_context_by_index(unsigned int cpu_idx, 504 const entry_point_info_t *ep) 505 { 506 cpu_context_t *ctx; 507 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 508 cm_setup_context(ctx, ep); 509 } 510 511 /******************************************************************************* 512 * The following function initializes the cpu_context for the current CPU 513 * for first use, and sets the initial entrypoint state as specified by the 514 * entry_point_info structure. 515 ******************************************************************************/ 516 void cm_init_my_context(const entry_point_info_t *ep) 517 { 518 cpu_context_t *ctx; 519 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 520 cm_setup_context(ctx, ep); 521 } 522 523 /******************************************************************************* 524 * Prepare the CPU system registers for first entry into realm, secure, or 525 * normal world. 526 * 527 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 528 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 529 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 530 * For all entries, the EL1 registers are initialized from the cpu_context 531 ******************************************************************************/ 532 void cm_prepare_el3_exit(uint32_t security_state) 533 { 534 u_register_t sctlr_elx, scr_el3, mdcr_el2; 535 cpu_context_t *ctx = cm_get_context(security_state); 536 bool el2_unused = false; 537 uint64_t hcr_el2 = 0U; 538 539 assert(ctx != NULL); 540 541 if (security_state == NON_SECURE) { 542 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 543 CTX_SCR_EL3); 544 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 545 /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ 546 sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), 547 CTX_SCTLR_EL1); 548 sctlr_elx &= SCTLR_EE_BIT; 549 sctlr_elx |= SCTLR_EL2_RES1; 550 #if ERRATA_A75_764081 551 /* 552 * If workaround of errata 764081 for Cortex-A75 is used 553 * then set SCTLR_EL2.IESB to enable Implicit Error 554 * Synchronization Barrier. 555 */ 556 sctlr_elx |= SCTLR_IESB_BIT; 557 #endif 558 write_sctlr_el2(sctlr_elx); 559 } else if (el_implemented(2) != EL_IMPL_NONE) { 560 el2_unused = true; 561 562 /* 563 * EL2 present but unused, need to disable safely. 564 * SCTLR_EL2 can be ignored in this case. 565 * 566 * Set EL2 register width appropriately: Set HCR_EL2 567 * field to match SCR_EL3.RW. 568 */ 569 if ((scr_el3 & SCR_RW_BIT) != 0U) 570 hcr_el2 |= HCR_RW_BIT; 571 572 /* 573 * For Armv8.3 pointer authentication feature, disable 574 * traps to EL2 when accessing key registers or using 575 * pointer authentication instructions from lower ELs. 576 */ 577 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); 578 579 write_hcr_el2(hcr_el2); 580 581 /* 582 * Initialise CPTR_EL2 setting all fields rather than 583 * relying on the hw. All fields have architecturally 584 * UNKNOWN reset values. 585 * 586 * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1 587 * accesses to the CPACR_EL1 or CPACR from both 588 * Execution states do not trap to EL2. 589 * 590 * CPTR_EL2.TTA: Set to zero so that Non-secure System 591 * register accesses to the trace registers from both 592 * Execution states do not trap to EL2. 593 * If PE trace unit System registers are not implemented 594 * then this bit is reserved, and must be set to zero. 595 * 596 * CPTR_EL2.TFP: Set to zero so that Non-secure accesses 597 * to SIMD and floating-point functionality from both 598 * Execution states do not trap to EL2. 599 */ 600 write_cptr_el2(CPTR_EL2_RESET_VAL & 601 ~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT 602 | CPTR_EL2_TFP_BIT)); 603 604 /* 605 * Initialise CNTHCTL_EL2. All fields are 606 * architecturally UNKNOWN on reset and are set to zero 607 * except for field(s) listed below. 608 * 609 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to 610 * Hyp mode of Non-secure EL0 and EL1 accesses to the 611 * physical timer registers. 612 * 613 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to 614 * Hyp mode of Non-secure EL0 and EL1 accesses to the 615 * physical counter registers. 616 */ 617 write_cnthctl_el2(CNTHCTL_RESET_VAL | 618 EL1PCEN_BIT | EL1PCTEN_BIT); 619 620 /* 621 * Initialise CNTVOFF_EL2 to zero as it resets to an 622 * architecturally UNKNOWN value. 623 */ 624 write_cntvoff_el2(0); 625 626 /* 627 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and 628 * MPIDR_EL1 respectively. 629 */ 630 write_vpidr_el2(read_midr_el1()); 631 write_vmpidr_el2(read_mpidr_el1()); 632 633 /* 634 * Initialise VTTBR_EL2. All fields are architecturally 635 * UNKNOWN on reset. 636 * 637 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 638 * 2 address translation is disabled, cache maintenance 639 * operations depend on the VMID. 640 * 641 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address 642 * translation is disabled. 643 */ 644 write_vttbr_el2(VTTBR_RESET_VAL & 645 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) 646 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 647 648 /* 649 * Initialise MDCR_EL2, setting all fields rather than 650 * relying on hw. Some fields are architecturally 651 * UNKNOWN on reset. 652 * 653 * MDCR_EL2.HLP: Set to one so that event counter 654 * overflow, that is recorded in PMOVSCLR_EL0[0-30], 655 * occurs on the increment that changes 656 * PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is 657 * implemented. This bit is RES0 in versions of the 658 * architecture earlier than ARMv8.5, setting it to 1 659 * doesn't have any effect on them. 660 * 661 * MDCR_EL2.TTRF: Set to zero so that access to Trace 662 * Filter Control register TRFCR_EL1 at EL1 is not 663 * trapped to EL2. This bit is RES0 in versions of 664 * the architecture earlier than ARMv8.4. 665 * 666 * MDCR_EL2.HPMD: Set to one so that event counting is 667 * prohibited at EL2. This bit is RES0 in versions of 668 * the architecture earlier than ARMv8.1, setting it 669 * to 1 doesn't have any effect on them. 670 * 671 * MDCR_EL2.TPMS: Set to zero so that accesses to 672 * Statistical Profiling control registers from EL1 673 * do not trap to EL2. This bit is RES0 when SPE is 674 * not implemented. 675 * 676 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and 677 * EL1 System register accesses to the Debug ROM 678 * registers are not trapped to EL2. 679 * 680 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 681 * System register accesses to the powerdown debug 682 * registers are not trapped to EL2. 683 * 684 * MDCR_EL2.TDA: Set to zero so that System register 685 * accesses to the debug registers do not trap to EL2. 686 * 687 * MDCR_EL2.TDE: Set to zero so that debug exceptions 688 * are not routed to EL2. 689 * 690 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance 691 * Monitors. 692 * 693 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and 694 * EL1 accesses to all Performance Monitors registers 695 * are not trapped to EL2. 696 * 697 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0 698 * and EL1 accesses to the PMCR_EL0 or PMCR are not 699 * trapped to EL2. 700 * 701 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the 702 * architecturally-defined reset value. 703 * 704 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer 705 * owning exception level is NS-EL1 and, tracing is 706 * prohibited at NS-EL2. These bits are RES0 when 707 * FEAT_TRBE is not implemented. 708 */ 709 mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP | 710 MDCR_EL2_HPMD) | 711 ((read_pmcr_el0() & PMCR_EL0_N_BITS) 712 >> PMCR_EL0_N_SHIFT)) & 713 ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS | 714 MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | 715 MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT | 716 MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | 717 MDCR_EL2_TPMCR_BIT | 718 MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)); 719 720 write_mdcr_el2(mdcr_el2); 721 722 /* 723 * Initialise HSTR_EL2. All fields are architecturally 724 * UNKNOWN on reset. 725 * 726 * HSTR_EL2.T<n>: Set all these fields to zero so that 727 * Non-secure EL0 or EL1 accesses to System registers 728 * do not trap to EL2. 729 */ 730 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 731 /* 732 * Initialise CNTHP_CTL_EL2. All fields are 733 * architecturally UNKNOWN on reset. 734 * 735 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 736 * physical timer and prevent timer interrupts. 737 */ 738 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & 739 ~(CNTHP_CTL_ENABLE_BIT)); 740 } 741 manage_extensions_nonsecure(el2_unused, ctx); 742 } 743 744 cm_el1_sysregs_context_restore(security_state); 745 cm_set_next_eret_context(security_state); 746 } 747 748 #if CTX_INCLUDE_EL2_REGS 749 /******************************************************************************* 750 * Save EL2 sysreg context 751 ******************************************************************************/ 752 void cm_el2_sysregs_context_save(uint32_t security_state) 753 { 754 u_register_t scr_el3 = read_scr(); 755 756 /* 757 * Always save the non-secure and realm EL2 context, only save the 758 * S-EL2 context if S-EL2 is enabled. 759 */ 760 if ((security_state != SECURE) || 761 ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { 762 cpu_context_t *ctx; 763 764 ctx = cm_get_context(security_state); 765 assert(ctx != NULL); 766 767 el2_sysregs_context_save(get_el2_sysregs_ctx(ctx)); 768 } 769 } 770 771 /******************************************************************************* 772 * Restore EL2 sysreg context 773 ******************************************************************************/ 774 void cm_el2_sysregs_context_restore(uint32_t security_state) 775 { 776 u_register_t scr_el3 = read_scr(); 777 778 /* 779 * Always restore the non-secure and realm EL2 context, only restore the 780 * S-EL2 context if S-EL2 is enabled. 781 */ 782 if ((security_state != SECURE) || 783 ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { 784 cpu_context_t *ctx; 785 786 ctx = cm_get_context(security_state); 787 assert(ctx != NULL); 788 789 el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx)); 790 } 791 } 792 #endif /* CTX_INCLUDE_EL2_REGS */ 793 794 /******************************************************************************* 795 * The next four functions are used by runtime services to save and restore 796 * EL1 context on the 'cpu_context' structure for the specified security 797 * state. 798 ******************************************************************************/ 799 void cm_el1_sysregs_context_save(uint32_t security_state) 800 { 801 cpu_context_t *ctx; 802 803 ctx = cm_get_context(security_state); 804 assert(ctx != NULL); 805 806 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 807 808 #if IMAGE_BL31 809 if (security_state == SECURE) 810 PUBLISH_EVENT(cm_exited_secure_world); 811 else 812 PUBLISH_EVENT(cm_exited_normal_world); 813 #endif 814 } 815 816 void cm_el1_sysregs_context_restore(uint32_t security_state) 817 { 818 cpu_context_t *ctx; 819 820 ctx = cm_get_context(security_state); 821 assert(ctx != NULL); 822 823 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 824 825 #if IMAGE_BL31 826 if (security_state == SECURE) 827 PUBLISH_EVENT(cm_entering_secure_world); 828 else 829 PUBLISH_EVENT(cm_entering_normal_world); 830 #endif 831 } 832 833 /******************************************************************************* 834 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 835 * given security state with the given entrypoint 836 ******************************************************************************/ 837 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 838 { 839 cpu_context_t *ctx; 840 el3_state_t *state; 841 842 ctx = cm_get_context(security_state); 843 assert(ctx != NULL); 844 845 /* Populate EL3 state so that ERET jumps to the correct entry */ 846 state = get_el3state_ctx(ctx); 847 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 848 } 849 850 /******************************************************************************* 851 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 852 * pertaining to the given security state 853 ******************************************************************************/ 854 void cm_set_elr_spsr_el3(uint32_t security_state, 855 uintptr_t entrypoint, uint32_t spsr) 856 { 857 cpu_context_t *ctx; 858 el3_state_t *state; 859 860 ctx = cm_get_context(security_state); 861 assert(ctx != NULL); 862 863 /* Populate EL3 state so that ERET jumps to the correct entry */ 864 state = get_el3state_ctx(ctx); 865 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 866 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 867 } 868 869 /******************************************************************************* 870 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 871 * pertaining to the given security state using the value and bit position 872 * specified in the parameters. It preserves all other bits. 873 ******************************************************************************/ 874 void cm_write_scr_el3_bit(uint32_t security_state, 875 uint32_t bit_pos, 876 uint32_t value) 877 { 878 cpu_context_t *ctx; 879 el3_state_t *state; 880 u_register_t scr_el3; 881 882 ctx = cm_get_context(security_state); 883 assert(ctx != NULL); 884 885 /* Ensure that the bit position is a valid one */ 886 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 887 888 /* Ensure that the 'value' is only a bit wide */ 889 assert(value <= 1U); 890 891 /* 892 * Get the SCR_EL3 value from the cpu context, clear the desired bit 893 * and set it to its new value. 894 */ 895 state = get_el3state_ctx(ctx); 896 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 897 scr_el3 &= ~(1UL << bit_pos); 898 scr_el3 |= (u_register_t)value << bit_pos; 899 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 900 } 901 902 /******************************************************************************* 903 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 904 * given security state. 905 ******************************************************************************/ 906 u_register_t cm_get_scr_el3(uint32_t security_state) 907 { 908 cpu_context_t *ctx; 909 el3_state_t *state; 910 911 ctx = cm_get_context(security_state); 912 assert(ctx != NULL); 913 914 /* Populate EL3 state so that ERET jumps to the correct entry */ 915 state = get_el3state_ctx(ctx); 916 return read_ctx_reg(state, CTX_SCR_EL3); 917 } 918 919 /******************************************************************************* 920 * This function is used to program the context that's used for exception 921 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 922 * the required security state 923 ******************************************************************************/ 924 void cm_set_next_eret_context(uint32_t security_state) 925 { 926 cpu_context_t *ctx; 927 928 ctx = cm_get_context(security_state); 929 assert(ctx != NULL); 930 931 cm_set_next_context(ctx); 932 } 933