1 /* 2 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <platform_def.h> 13 14 #include <arch.h> 15 #include <arch_helpers.h> 16 #include <arch_features.h> 17 #include <bl31/interrupt_mgmt.h> 18 #include <common/bl_common.h> 19 #include <common/debug.h> 20 #include <context.h> 21 #include <drivers/arm/gicv3.h> 22 #include <lib/el3_runtime/context_mgmt.h> 23 #include <lib/el3_runtime/cpu_data.h> 24 #include <lib/el3_runtime/pubsub_events.h> 25 #include <lib/extensions/amu.h> 26 #include <lib/extensions/brbe.h> 27 #include <lib/extensions/mpam.h> 28 #include <lib/extensions/pmuv3.h> 29 #include <lib/extensions/sme.h> 30 #include <lib/extensions/spe.h> 31 #include <lib/extensions/sve.h> 32 #include <lib/extensions/sys_reg_trace.h> 33 #include <lib/extensions/trbe.h> 34 #include <lib/extensions/trf.h> 35 #include <lib/utils.h> 36 37 #if ENABLE_FEAT_TWED 38 /* Make sure delay value fits within the range(0-15) */ 39 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); 40 #endif /* ENABLE_FEAT_TWED */ 41 42 per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM]; 43 static bool has_secure_perworld_init; 44 45 static void manage_extensions_nonsecure(cpu_context_t *ctx); 46 static void manage_extensions_secure(cpu_context_t *ctx); 47 static void manage_extensions_secure_per_world(void); 48 49 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) 50 { 51 u_register_t sctlr_elx, actlr_elx; 52 53 /* 54 * Initialise SCTLR_EL1 to the reset value corresponding to the target 55 * execution state setting all fields rather than relying on the hw. 56 * Some fields have architecturally UNKNOWN reset values and these are 57 * set to zero. 58 * 59 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 60 * 61 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 62 * required by PSCI specification) 63 */ 64 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 65 if (GET_RW(ep->spsr) == MODE_RW_64) { 66 sctlr_elx |= SCTLR_EL1_RES1; 67 } else { 68 /* 69 * If the target execution state is AArch32 then the following 70 * fields need to be set. 71 * 72 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 73 * instructions are not trapped to EL1. 74 * 75 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 76 * instructions are not trapped to EL1. 77 * 78 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 79 * CP15DMB, CP15DSB, and CP15ISB instructions. 80 */ 81 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 82 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 83 } 84 85 #if ERRATA_A75_764081 86 /* 87 * If workaround of errata 764081 for Cortex-A75 is used then set 88 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 89 */ 90 sctlr_elx |= SCTLR_IESB_BIT; 91 #endif 92 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 93 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); 94 95 /* 96 * Base the context ACTLR_EL1 on the current value, as it is 97 * implementation defined. The context restore process will write 98 * the value from the context to the actual register and can cause 99 * problems for processor cores that don't expect certain bits to 100 * be zero. 101 */ 102 actlr_elx = read_actlr_el1(); 103 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 104 } 105 106 /****************************************************************************** 107 * This function performs initializations that are specific to SECURE state 108 * and updates the cpu context specified by 'ctx'. 109 *****************************************************************************/ 110 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 111 { 112 u_register_t scr_el3; 113 el3_state_t *state; 114 115 state = get_el3state_ctx(ctx); 116 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 117 118 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 119 /* 120 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 121 * indicated by the interrupt routing model for BL31. 122 */ 123 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 124 #endif 125 126 /* Allow access to Allocation Tags when mte is set*/ 127 if (is_feat_mte_supported()) { 128 scr_el3 |= SCR_ATA_BIT; 129 } 130 131 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 132 133 /* 134 * Initialize EL1 context registers unless SPMC is running 135 * at S-EL2. 136 */ 137 #if !SPMD_SPM_AT_SEL2 138 setup_el1_context(ctx, ep); 139 #endif 140 141 manage_extensions_secure(ctx); 142 143 /** 144 * manage_extensions_secure_per_world api has to be executed once, 145 * as the registers getting initialised, maintain constant value across 146 * all the cpus for the secure world. 147 * Henceforth, this check ensures that the registers are initialised once 148 * and avoids re-initialization from multiple cores. 149 */ 150 if (!has_secure_perworld_init) { 151 manage_extensions_secure_per_world(); 152 } 153 154 } 155 156 #if ENABLE_RME 157 /****************************************************************************** 158 * This function performs initializations that are specific to REALM state 159 * and updates the cpu context specified by 'ctx'. 160 *****************************************************************************/ 161 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 162 { 163 u_register_t scr_el3; 164 el3_state_t *state; 165 166 state = get_el3state_ctx(ctx); 167 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 168 169 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT; 170 171 if (is_feat_csv2_2_supported()) { 172 /* Enable access to the SCXTNUM_ELx registers. */ 173 scr_el3 |= SCR_EnSCXT_BIT; 174 } 175 176 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 177 } 178 #endif /* ENABLE_RME */ 179 180 /****************************************************************************** 181 * This function performs initializations that are specific to NON-SECURE state 182 * and updates the cpu context specified by 'ctx'. 183 *****************************************************************************/ 184 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 185 { 186 u_register_t scr_el3; 187 el3_state_t *state; 188 189 state = get_el3state_ctx(ctx); 190 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 191 192 /* SCR_NS: Set the NS bit */ 193 scr_el3 |= SCR_NS_BIT; 194 195 /* Allow access to Allocation Tags when MTE is implemented. */ 196 scr_el3 |= SCR_ATA_BIT; 197 198 #if !CTX_INCLUDE_PAUTH_REGS 199 /* 200 * Pointer Authentication feature, if present, is always enabled by default 201 * for Non secure lower exception levels. We do not have an explicit 202 * flag to set it. 203 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower 204 * exception levels of secure and realm worlds. 205 * 206 * To prevent the leakage between the worlds during world switch, 207 * we enable it only for the non-secure world. 208 * 209 * If the Secure/realm world wants to use pointer authentication, 210 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case 211 * it will be enabled globally for all the contexts. 212 * 213 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 214 * other than EL3 215 * 216 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 217 * than EL3 218 */ 219 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 220 221 #endif /* CTX_INCLUDE_PAUTH_REGS */ 222 223 #if HANDLE_EA_EL3_FIRST_NS 224 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ 225 scr_el3 |= SCR_EA_BIT; 226 #endif 227 228 #if RAS_TRAP_NS_ERR_REC_ACCESS 229 /* 230 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 231 * and RAS ERX registers from EL1 and EL2(from any security state) 232 * are trapped to EL3. 233 * Set here to trap only for NS EL1/EL2 234 * 235 */ 236 scr_el3 |= SCR_TERR_BIT; 237 #endif 238 239 if (is_feat_csv2_2_supported()) { 240 /* Enable access to the SCXTNUM_ELx registers. */ 241 scr_el3 |= SCR_EnSCXT_BIT; 242 } 243 244 #ifdef IMAGE_BL31 245 /* 246 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 247 * indicated by the interrupt routing model for BL31. 248 */ 249 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 250 #endif 251 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 252 253 /* Initialize EL1 context registers */ 254 setup_el1_context(ctx, ep); 255 256 /* Initialize EL2 context registers */ 257 #if CTX_INCLUDE_EL2_REGS 258 259 /* 260 * Initialize SCTLR_EL2 context register using Endianness value 261 * taken from the entrypoint attribute. 262 */ 263 u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 264 sctlr_el2 |= SCTLR_EL2_RES1; 265 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2, 266 sctlr_el2); 267 268 if (is_feat_hcx_supported()) { 269 /* 270 * Initialize register HCRX_EL2 with its init value. 271 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a 272 * chance that this can lead to unexpected behavior in lower 273 * ELs that have not been updated since the introduction of 274 * this feature if not properly initialized, especially when 275 * it comes to those bits that enable/disable traps. 276 */ 277 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HCRX_EL2, 278 HCRX_EL2_INIT_VAL); 279 } 280 281 if (is_feat_fgt_supported()) { 282 /* 283 * Initialize HFG*_EL2 registers with a default value so legacy 284 * systems unaware of FEAT_FGT do not get trapped due to their lack 285 * of initialization for this feature. 286 */ 287 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGITR_EL2, 288 HFGITR_EL2_INIT_VAL); 289 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGRTR_EL2, 290 HFGRTR_EL2_INIT_VAL); 291 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGWTR_EL2, 292 HFGWTR_EL2_INIT_VAL); 293 } 294 #endif /* CTX_INCLUDE_EL2_REGS */ 295 296 manage_extensions_nonsecure(ctx); 297 } 298 299 /******************************************************************************* 300 * The following function performs initialization of the cpu_context 'ctx' 301 * for first use that is common to all security states, and sets the 302 * initial entrypoint state as specified by the entry_point_info structure. 303 * 304 * The EE and ST attributes are used to configure the endianness and secure 305 * timer availability for the new execution context. 306 ******************************************************************************/ 307 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 308 { 309 u_register_t scr_el3; 310 el3_state_t *state; 311 gp_regs_t *gp_regs; 312 313 state = get_el3state_ctx(ctx); 314 315 /* Clear any residual register values from the context */ 316 zeromem(ctx, sizeof(*ctx)); 317 318 /* 319 * The lower-EL context is zeroed so that no stale values leak to a world. 320 * It is assumed that an all-zero lower-EL context is good enough for it 321 * to boot correctly. However, there are very few registers where this 322 * is not true and some values need to be recreated. 323 */ 324 #if CTX_INCLUDE_EL2_REGS 325 el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx); 326 327 /* 328 * These bits are set in the gicv3 driver. Losing them (especially the 329 * SRE bit) is problematic for all worlds. Henceforth recreate them. 330 */ 331 u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | 332 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; 333 write_ctx_reg(el2_ctx, CTX_ICC_SRE_EL2, icc_sre_el2); 334 #endif /* CTX_INCLUDE_EL2_REGS */ 335 336 /* Start with a clean SCR_EL3 copy as all relevant values are set */ 337 scr_el3 = SCR_RESET_VAL; 338 339 /* 340 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 341 * EL2, EL1 and EL0 are not trapped to EL3. 342 * 343 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 344 * EL2, EL1 and EL0 are not trapped to EL3. 345 * 346 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 347 * both Security states and both Execution states. 348 * 349 * SCR_EL3.SIF: Set to one to disable secure instruction execution from 350 * Non-secure memory. 351 */ 352 scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT); 353 354 scr_el3 |= SCR_SIF_BIT; 355 356 /* 357 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 358 * Exception level as specified by SPSR. 359 */ 360 if (GET_RW(ep->spsr) == MODE_RW_64) { 361 scr_el3 |= SCR_RW_BIT; 362 } 363 364 /* 365 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 366 * Secure timer registers to EL3, from AArch64 state only, if specified 367 * by the entrypoint attributes. If SEL2 is present and enabled, the ST 368 * bit always behaves as 1 (i.e. secure physical timer register access 369 * is not trapped) 370 */ 371 if (EP_GET_ST(ep->h.attr) != 0U) { 372 scr_el3 |= SCR_ST_BIT; 373 } 374 375 /* 376 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 377 * SCR_EL3.HXEn. 378 */ 379 if (is_feat_hcx_supported()) { 380 scr_el3 |= SCR_HXEn_BIT; 381 } 382 383 /* 384 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS 385 * registers are trapped to EL3. 386 */ 387 #if ENABLE_FEAT_RNG_TRAP 388 scr_el3 |= SCR_TRNDR_BIT; 389 #endif 390 391 #if FAULT_INJECTION_SUPPORT 392 /* Enable fault injection from lower ELs */ 393 scr_el3 |= SCR_FIEN_BIT; 394 #endif 395 396 #if CTX_INCLUDE_PAUTH_REGS 397 /* 398 * Enable Pointer Authentication globally for all the worlds. 399 * 400 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 401 * other than EL3 402 * 403 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 404 * than EL3 405 */ 406 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 407 #endif /* CTX_INCLUDE_PAUTH_REGS */ 408 409 /* 410 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present. 411 */ 412 if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) { 413 scr_el3 |= SCR_TCR2EN_BIT; 414 } 415 416 /* 417 * SCR_EL3.PIEN: Enable permission indirection and overlay 418 * registers for AArch64 if present. 419 */ 420 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) { 421 scr_el3 |= SCR_PIEN_BIT; 422 } 423 424 /* 425 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present. 426 */ 427 if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) { 428 scr_el3 |= SCR_GCSEn_BIT; 429 } 430 431 /* 432 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 433 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 434 * next mode is Hyp. 435 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 436 * same conditions as HVC instructions and when the processor supports 437 * ARMv8.6-FGT. 438 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 439 * CNTPOFF_EL2 register under the same conditions as HVC instructions 440 * and when the processor supports ECV. 441 */ 442 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 443 || ((GET_RW(ep->spsr) != MODE_RW_64) 444 && (GET_M32(ep->spsr) == MODE32_hyp))) { 445 scr_el3 |= SCR_HCE_BIT; 446 447 if (is_feat_fgt_supported()) { 448 scr_el3 |= SCR_FGTEN_BIT; 449 } 450 451 if (is_feat_ecv_supported()) { 452 scr_el3 |= SCR_ECVEN_BIT; 453 } 454 } 455 456 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 457 if (is_feat_twed_supported()) { 458 /* Set delay in SCR_EL3 */ 459 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 460 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) 461 << SCR_TWEDEL_SHIFT); 462 463 /* Enable WFE delay */ 464 scr_el3 |= SCR_TWEDEn_BIT; 465 } 466 467 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 468 /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */ 469 if (is_feat_sel2_supported()) { 470 scr_el3 |= SCR_EEL2_BIT; 471 } 472 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */ 473 474 /* 475 * Populate EL3 state so that we've the right context 476 * before doing ERET 477 */ 478 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 479 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 480 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 481 482 /* 483 * Store the X0-X7 value from the entrypoint into the context 484 * Use memcpy as we are in control of the layout of the structures 485 */ 486 gp_regs = get_gpregs_ctx(ctx); 487 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 488 } 489 490 /******************************************************************************* 491 * Context management library initialization routine. This library is used by 492 * runtime services to share pointers to 'cpu_context' structures for secure 493 * non-secure and realm states. Management of the structures and their associated 494 * memory is not done by the context management library e.g. the PSCI service 495 * manages the cpu context used for entry from and exit to the non-secure state. 496 * The Secure payload dispatcher service manages the context(s) corresponding to 497 * the secure state. It also uses this library to get access to the non-secure 498 * state cpu context pointers. 499 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 500 * which will be used for programming an entry into a lower EL. The same context 501 * will be used to save state upon exception entry from that EL. 502 ******************************************************************************/ 503 void __init cm_init(void) 504 { 505 /* 506 * The context management library has only global data to initialize, but 507 * that will be done when the BSS is zeroed out. 508 */ 509 } 510 511 /******************************************************************************* 512 * This is the high-level function used to initialize the cpu_context 'ctx' for 513 * first use. It performs initializations that are common to all security states 514 * and initializations specific to the security state specified in 'ep' 515 ******************************************************************************/ 516 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 517 { 518 unsigned int security_state; 519 520 assert(ctx != NULL); 521 522 /* 523 * Perform initializations that are common 524 * to all security states 525 */ 526 setup_context_common(ctx, ep); 527 528 security_state = GET_SECURITY_STATE(ep->h.attr); 529 530 /* Perform security state specific initializations */ 531 switch (security_state) { 532 case SECURE: 533 setup_secure_context(ctx, ep); 534 break; 535 #if ENABLE_RME 536 case REALM: 537 setup_realm_context(ctx, ep); 538 break; 539 #endif 540 case NON_SECURE: 541 setup_ns_context(ctx, ep); 542 break; 543 default: 544 ERROR("Invalid security state\n"); 545 panic(); 546 break; 547 } 548 } 549 550 /******************************************************************************* 551 * Enable architecture extensions for EL3 execution. This function only updates 552 * registers in-place which are expected to either never change or be 553 * overwritten by el3_exit. 554 ******************************************************************************/ 555 #if IMAGE_BL31 556 void cm_manage_extensions_el3(void) 557 { 558 if (is_feat_spe_supported()) { 559 spe_init_el3(); 560 } 561 562 if (is_feat_amu_supported()) { 563 amu_init_el3(); 564 } 565 566 if (is_feat_sme_supported()) { 567 sme_init_el3(); 568 } 569 570 if (is_feat_trbe_supported()) { 571 trbe_init_el3(); 572 } 573 574 if (is_feat_brbe_supported()) { 575 brbe_init_el3(); 576 } 577 578 if (is_feat_trf_supported()) { 579 trf_init_el3(); 580 } 581 582 pmuv3_init_el3(); 583 } 584 #endif /* IMAGE_BL31 */ 585 586 /****************************************************************************** 587 * Function to initialise the registers with the RESET values in the context 588 * memory, which are maintained per world. 589 ******************************************************************************/ 590 #if IMAGE_BL31 591 void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx) 592 { 593 /* 594 * Initialise CPTR_EL3, setting all fields rather than relying on hw. 595 * 596 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers 597 * by Advanced SIMD, floating-point or SVE instructions (if 598 * implemented) do not trap to EL3. 599 * 600 * CPTR_EL3.TCPAC: Set to zero so that accesses to CPACR_EL1, 601 * CPTR_EL2,CPACR, or HCPTR do not trap to EL3. 602 */ 603 uint64_t cptr_el3 = CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TFP_BIT); 604 605 per_world_ctx->ctx_cptr_el3 = cptr_el3; 606 607 /* 608 * Initialize MPAM3_EL3 to its default reset value 609 * 610 * MPAM3_EL3_RESET_VAL sets the MPAM3_EL3.TRAPLOWER bit that forces 611 * all lower ELn MPAM3_EL3 register access to, trap to EL3 612 */ 613 614 per_world_ctx->ctx_mpam3_el3 = MPAM3_EL3_RESET_VAL; 615 } 616 #endif /* IMAGE_BL31 */ 617 618 /******************************************************************************* 619 * Initialise per_world_context for Non-Secure world. 620 * This function enables the architecture extensions, which have same value 621 * across the cores for the non-secure world. 622 ******************************************************************************/ 623 #if IMAGE_BL31 624 void manage_extensions_nonsecure_per_world(void) 625 { 626 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]); 627 628 if (is_feat_sme_supported()) { 629 sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 630 } 631 632 if (is_feat_sve_supported()) { 633 sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 634 } 635 636 if (is_feat_amu_supported()) { 637 amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 638 } 639 640 if (is_feat_sys_reg_trace_supported()) { 641 sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 642 } 643 644 if (is_feat_mpam_supported()) { 645 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 646 } 647 } 648 #endif /* IMAGE_BL31 */ 649 650 /******************************************************************************* 651 * Initialise per_world_context for Secure world. 652 * This function enables the architecture extensions, which have same value 653 * across the cores for the secure world. 654 ******************************************************************************/ 655 static void manage_extensions_secure_per_world(void) 656 { 657 #if IMAGE_BL31 658 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 659 660 if (is_feat_sme_supported()) { 661 662 if (ENABLE_SME_FOR_SWD) { 663 /* 664 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure 665 * SME, SVE, and FPU/SIMD context properly managed. 666 */ 667 sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 668 } else { 669 /* 670 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 671 * world can safely use the associated registers. 672 */ 673 sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 674 } 675 } 676 if (is_feat_sve_supported()) { 677 if (ENABLE_SVE_FOR_SWD) { 678 /* 679 * Enable SVE and FPU in secure context, SPM must ensure 680 * that the SVE and FPU register contexts are properly managed. 681 */ 682 sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 683 } else { 684 /* 685 * Disable SVE and FPU in secure context so non-secure world 686 * can safely use them. 687 */ 688 sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 689 } 690 } 691 692 /* NS can access this but Secure shouldn't */ 693 if (is_feat_sys_reg_trace_supported()) { 694 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 695 } 696 697 has_secure_perworld_init = true; 698 #endif /* IMAGE_BL31 */ 699 } 700 701 /******************************************************************************* 702 * Enable architecture extensions on first entry to Non-secure world. 703 ******************************************************************************/ 704 static void manage_extensions_nonsecure(cpu_context_t *ctx) 705 { 706 #if IMAGE_BL31 707 if (is_feat_amu_supported()) { 708 amu_enable(ctx); 709 } 710 711 if (is_feat_sme_supported()) { 712 sme_enable(ctx); 713 } 714 715 pmuv3_enable(ctx); 716 #endif /* IMAGE_BL31 */ 717 } 718 719 /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */ 720 static __unused void enable_pauth_el2(void) 721 { 722 u_register_t hcr_el2 = read_hcr_el2(); 723 /* 724 * For Armv8.3 pointer authentication feature, disable traps to EL2 when 725 * accessing key registers or using pointer authentication instructions 726 * from lower ELs. 727 */ 728 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); 729 730 write_hcr_el2(hcr_el2); 731 } 732 733 #if INIT_UNUSED_NS_EL2 734 /******************************************************************************* 735 * Enable architecture extensions in-place at EL2 on first entry to Non-secure 736 * world when EL2 is empty and unused. 737 ******************************************************************************/ 738 static void manage_extensions_nonsecure_el2_unused(void) 739 { 740 #if IMAGE_BL31 741 if (is_feat_spe_supported()) { 742 spe_init_el2_unused(); 743 } 744 745 if (is_feat_amu_supported()) { 746 amu_init_el2_unused(); 747 } 748 749 if (is_feat_mpam_supported()) { 750 mpam_init_el2_unused(); 751 } 752 753 if (is_feat_trbe_supported()) { 754 trbe_init_el2_unused(); 755 } 756 757 if (is_feat_sys_reg_trace_supported()) { 758 sys_reg_trace_init_el2_unused(); 759 } 760 761 if (is_feat_trf_supported()) { 762 trf_init_el2_unused(); 763 } 764 765 pmuv3_init_el2_unused(); 766 767 if (is_feat_sve_supported()) { 768 sve_init_el2_unused(); 769 } 770 771 if (is_feat_sme_supported()) { 772 sme_init_el2_unused(); 773 } 774 775 #if ENABLE_PAUTH 776 enable_pauth_el2(); 777 #endif /* ENABLE_PAUTH */ 778 #endif /* IMAGE_BL31 */ 779 } 780 #endif /* INIT_UNUSED_NS_EL2 */ 781 782 /******************************************************************************* 783 * Enable architecture extensions on first entry to Secure world. 784 ******************************************************************************/ 785 static void manage_extensions_secure(cpu_context_t *ctx) 786 { 787 #if IMAGE_BL31 788 if (is_feat_sme_supported()) { 789 if (ENABLE_SME_FOR_SWD) { 790 /* 791 * Enable SME, SVE, FPU/SIMD in secure context, secure manager 792 * must ensure SME, SVE, and FPU/SIMD context properly managed. 793 */ 794 sme_init_el3(); 795 sme_enable(ctx); 796 } else { 797 /* 798 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 799 * world can safely use the associated registers. 800 */ 801 sme_disable(ctx); 802 } 803 } 804 #endif /* IMAGE_BL31 */ 805 } 806 807 /******************************************************************************* 808 * The following function initializes the cpu_context for a CPU specified by 809 * its `cpu_idx` for first use, and sets the initial entrypoint state as 810 * specified by the entry_point_info structure. 811 ******************************************************************************/ 812 void cm_init_context_by_index(unsigned int cpu_idx, 813 const entry_point_info_t *ep) 814 { 815 cpu_context_t *ctx; 816 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 817 cm_setup_context(ctx, ep); 818 } 819 820 /******************************************************************************* 821 * The following function initializes the cpu_context for the current CPU 822 * for first use, and sets the initial entrypoint state as specified by the 823 * entry_point_info structure. 824 ******************************************************************************/ 825 void cm_init_my_context(const entry_point_info_t *ep) 826 { 827 cpu_context_t *ctx; 828 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 829 cm_setup_context(ctx, ep); 830 } 831 832 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */ 833 static void init_nonsecure_el2_unused(cpu_context_t *ctx) 834 { 835 #if INIT_UNUSED_NS_EL2 836 u_register_t hcr_el2 = HCR_RESET_VAL; 837 u_register_t mdcr_el2; 838 u_register_t scr_el3; 839 840 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 841 842 /* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */ 843 if ((scr_el3 & SCR_RW_BIT) != 0U) { 844 hcr_el2 |= HCR_RW_BIT; 845 } 846 847 write_hcr_el2(hcr_el2); 848 849 /* 850 * Initialise CPTR_EL2 setting all fields rather than relying on the hw. 851 * All fields have architecturally UNKNOWN reset values. 852 */ 853 write_cptr_el2(CPTR_EL2_RESET_VAL); 854 855 /* 856 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on 857 * reset and are set to zero except for field(s) listed below. 858 * 859 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of 860 * Non-secure EL0 and EL1 accesses to the physical timer registers. 861 * 862 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of 863 * Non-secure EL0 and EL1 accesses to the physical counter registers. 864 */ 865 write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT); 866 867 /* 868 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally 869 * UNKNOWN value. 870 */ 871 write_cntvoff_el2(0); 872 873 /* 874 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1 875 * respectively. 876 */ 877 write_vpidr_el2(read_midr_el1()); 878 write_vmpidr_el2(read_mpidr_el1()); 879 880 /* 881 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset. 882 * 883 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address 884 * translation is disabled, cache maintenance operations depend on the 885 * VMID. 886 * 887 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is 888 * disabled. 889 */ 890 write_vttbr_el2(VTTBR_RESET_VAL & 891 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) | 892 (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 893 894 /* 895 * Initialise MDCR_EL2, setting all fields rather than relying on hw. 896 * Some fields are architecturally UNKNOWN on reset. 897 * 898 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System 899 * register accesses to the Debug ROM registers are not trapped to EL2. 900 * 901 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register 902 * accesses to the powerdown debug registers are not trapped to EL2. 903 * 904 * MDCR_EL2.TDA: Set to zero so that System register accesses to the 905 * debug registers do not trap to EL2. 906 * 907 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to 908 * EL2. 909 */ 910 mdcr_el2 = MDCR_EL2_RESET_VAL & 911 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT | 912 MDCR_EL2_TDE_BIT); 913 914 write_mdcr_el2(mdcr_el2); 915 916 /* 917 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset. 918 * 919 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or 920 * EL1 accesses to System registers do not trap to EL2. 921 */ 922 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 923 924 /* 925 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on 926 * reset. 927 * 928 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer 929 * and prevent timer interrupts. 930 */ 931 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); 932 933 manage_extensions_nonsecure_el2_unused(); 934 #endif /* INIT_UNUSED_NS_EL2 */ 935 } 936 937 /******************************************************************************* 938 * Prepare the CPU system registers for first entry into realm, secure, or 939 * normal world. 940 * 941 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 942 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 943 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 944 * For all entries, the EL1 registers are initialized from the cpu_context 945 ******************************************************************************/ 946 void cm_prepare_el3_exit(uint32_t security_state) 947 { 948 u_register_t sctlr_elx, scr_el3; 949 cpu_context_t *ctx = cm_get_context(security_state); 950 951 assert(ctx != NULL); 952 953 if (security_state == NON_SECURE) { 954 uint64_t el2_implemented = el_implemented(2); 955 956 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 957 CTX_SCR_EL3); 958 959 if (((scr_el3 & SCR_HCE_BIT) != 0U) 960 || (el2_implemented != EL_IMPL_NONE)) { 961 /* 962 * If context is not being used for EL2, initialize 963 * HCRX_EL2 with its init value here. 964 */ 965 if (is_feat_hcx_supported()) { 966 write_hcrx_el2(HCRX_EL2_INIT_VAL); 967 } 968 969 /* 970 * Initialize Fine-grained trap registers introduced 971 * by FEAT_FGT so all traps are initially disabled when 972 * switching to EL2 or a lower EL, preventing undesired 973 * behavior. 974 */ 975 if (is_feat_fgt_supported()) { 976 /* 977 * Initialize HFG*_EL2 registers with a default 978 * value so legacy systems unaware of FEAT_FGT 979 * do not get trapped due to their lack of 980 * initialization for this feature. 981 */ 982 write_hfgitr_el2(HFGITR_EL2_INIT_VAL); 983 write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL); 984 write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL); 985 } 986 } 987 988 989 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 990 /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ 991 sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), 992 CTX_SCTLR_EL1); 993 sctlr_elx &= SCTLR_EE_BIT; 994 sctlr_elx |= SCTLR_EL2_RES1; 995 #if ERRATA_A75_764081 996 /* 997 * If workaround of errata 764081 for Cortex-A75 is used 998 * then set SCTLR_EL2.IESB to enable Implicit Error 999 * Synchronization Barrier. 1000 */ 1001 sctlr_elx |= SCTLR_IESB_BIT; 1002 #endif 1003 write_sctlr_el2(sctlr_elx); 1004 } else if (el2_implemented != EL_IMPL_NONE) { 1005 init_nonsecure_el2_unused(ctx); 1006 } 1007 } 1008 1009 cm_el1_sysregs_context_restore(security_state); 1010 cm_set_next_eret_context(security_state); 1011 } 1012 1013 #if CTX_INCLUDE_EL2_REGS 1014 1015 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx) 1016 { 1017 write_ctx_reg(ctx, CTX_HDFGRTR_EL2, read_hdfgrtr_el2()); 1018 if (is_feat_amu_supported()) { 1019 write_ctx_reg(ctx, CTX_HAFGRTR_EL2, read_hafgrtr_el2()); 1020 } 1021 write_ctx_reg(ctx, CTX_HDFGWTR_EL2, read_hdfgwtr_el2()); 1022 write_ctx_reg(ctx, CTX_HFGITR_EL2, read_hfgitr_el2()); 1023 write_ctx_reg(ctx, CTX_HFGRTR_EL2, read_hfgrtr_el2()); 1024 write_ctx_reg(ctx, CTX_HFGWTR_EL2, read_hfgwtr_el2()); 1025 } 1026 1027 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx) 1028 { 1029 write_hdfgrtr_el2(read_ctx_reg(ctx, CTX_HDFGRTR_EL2)); 1030 if (is_feat_amu_supported()) { 1031 write_hafgrtr_el2(read_ctx_reg(ctx, CTX_HAFGRTR_EL2)); 1032 } 1033 write_hdfgwtr_el2(read_ctx_reg(ctx, CTX_HDFGWTR_EL2)); 1034 write_hfgitr_el2(read_ctx_reg(ctx, CTX_HFGITR_EL2)); 1035 write_hfgrtr_el2(read_ctx_reg(ctx, CTX_HFGRTR_EL2)); 1036 write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2)); 1037 } 1038 1039 #if CTX_INCLUDE_MPAM_REGS 1040 1041 static void el2_sysregs_context_save_mpam(mpam_t *ctx) 1042 { 1043 u_register_t mpam_idr = read_mpamidr_el1(); 1044 1045 write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2()); 1046 1047 /* 1048 * The context registers that we intend to save would be part of the 1049 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1. 1050 */ 1051 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1052 return; 1053 } 1054 1055 /* 1056 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if 1057 * MPAMIDR_HAS_HCR_BIT == 1. 1058 */ 1059 write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2()); 1060 write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2()); 1061 write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2()); 1062 1063 /* 1064 * The number of MPAMVPM registers is implementation defined, their 1065 * number is stored in the MPAMIDR_EL1 register. 1066 */ 1067 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1068 case 7: 1069 write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2()); 1070 __fallthrough; 1071 case 6: 1072 write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2()); 1073 __fallthrough; 1074 case 5: 1075 write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2()); 1076 __fallthrough; 1077 case 4: 1078 write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2()); 1079 __fallthrough; 1080 case 3: 1081 write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2()); 1082 __fallthrough; 1083 case 2: 1084 write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2()); 1085 __fallthrough; 1086 case 1: 1087 write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2()); 1088 break; 1089 } 1090 } 1091 1092 #endif /* CTX_INCLUDE_MPAM_REGS */ 1093 1094 #if CTX_INCLUDE_MPAM_REGS 1095 static void el2_sysregs_context_restore_mpam(mpam_t *ctx) 1096 { 1097 u_register_t mpam_idr = read_mpamidr_el1(); 1098 1099 write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2)); 1100 1101 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1102 return; 1103 } 1104 1105 write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2)); 1106 write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2)); 1107 write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2)); 1108 1109 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1110 case 7: 1111 write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2)); 1112 __fallthrough; 1113 case 6: 1114 write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2)); 1115 __fallthrough; 1116 case 5: 1117 write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2)); 1118 __fallthrough; 1119 case 4: 1120 write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2)); 1121 __fallthrough; 1122 case 3: 1123 write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2)); 1124 __fallthrough; 1125 case 2: 1126 write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2)); 1127 __fallthrough; 1128 case 1: 1129 write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2)); 1130 break; 1131 } 1132 } 1133 #endif /* CTX_INCLUDE_MPAM_REGS */ 1134 1135 /* ----------------------------------------------------- 1136 * The following registers are not added: 1137 * AMEVCNTVOFF0<n>_EL2 1138 * AMEVCNTVOFF1<n>_EL2 1139 * ICH_AP0R<n>_EL2 1140 * ICH_AP1R<n>_EL2 1141 * ICH_LR<n>_EL2 1142 * ----------------------------------------------------- 1143 */ 1144 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx) 1145 { 1146 write_ctx_reg(ctx, CTX_ACTLR_EL2, read_actlr_el2()); 1147 write_ctx_reg(ctx, CTX_AFSR0_EL2, read_afsr0_el2()); 1148 write_ctx_reg(ctx, CTX_AFSR1_EL2, read_afsr1_el2()); 1149 write_ctx_reg(ctx, CTX_AMAIR_EL2, read_amair_el2()); 1150 write_ctx_reg(ctx, CTX_CNTHCTL_EL2, read_cnthctl_el2()); 1151 write_ctx_reg(ctx, CTX_CNTVOFF_EL2, read_cntvoff_el2()); 1152 write_ctx_reg(ctx, CTX_CPTR_EL2, read_cptr_el2()); 1153 if (CTX_INCLUDE_AARCH32_REGS) { 1154 write_ctx_reg(ctx, CTX_DBGVCR32_EL2, read_dbgvcr32_el2()); 1155 } 1156 write_ctx_reg(ctx, CTX_ELR_EL2, read_elr_el2()); 1157 write_ctx_reg(ctx, CTX_ESR_EL2, read_esr_el2()); 1158 write_ctx_reg(ctx, CTX_FAR_EL2, read_far_el2()); 1159 write_ctx_reg(ctx, CTX_HACR_EL2, read_hacr_el2()); 1160 write_ctx_reg(ctx, CTX_HCR_EL2, read_hcr_el2()); 1161 write_ctx_reg(ctx, CTX_HPFAR_EL2, read_hpfar_el2()); 1162 write_ctx_reg(ctx, CTX_HSTR_EL2, read_hstr_el2()); 1163 1164 /* 1165 * Set the NS bit to be able to access the ICC_SRE_EL2 register 1166 * TODO: remove with root context 1167 */ 1168 u_register_t scr_el3 = read_scr_el3(); 1169 1170 write_scr_el3(scr_el3 | SCR_NS_BIT); 1171 isb(); 1172 write_ctx_reg(ctx, CTX_ICC_SRE_EL2, read_icc_sre_el2()); 1173 1174 write_scr_el3(scr_el3); 1175 isb(); 1176 1177 write_ctx_reg(ctx, CTX_ICH_HCR_EL2, read_ich_hcr_el2()); 1178 write_ctx_reg(ctx, CTX_ICH_VMCR_EL2, read_ich_vmcr_el2()); 1179 write_ctx_reg(ctx, CTX_MAIR_EL2, read_mair_el2()); 1180 write_ctx_reg(ctx, CTX_MDCR_EL2, read_mdcr_el2()); 1181 write_ctx_reg(ctx, CTX_SCTLR_EL2, read_sctlr_el2()); 1182 write_ctx_reg(ctx, CTX_SPSR_EL2, read_spsr_el2()); 1183 write_ctx_reg(ctx, CTX_SP_EL2, read_sp_el2()); 1184 write_ctx_reg(ctx, CTX_TCR_EL2, read_tcr_el2()); 1185 write_ctx_reg(ctx, CTX_TPIDR_EL2, read_tpidr_el2()); 1186 write_ctx_reg(ctx, CTX_TTBR0_EL2, read_ttbr0_el2()); 1187 write_ctx_reg(ctx, CTX_VBAR_EL2, read_vbar_el2()); 1188 write_ctx_reg(ctx, CTX_VMPIDR_EL2, read_vmpidr_el2()); 1189 write_ctx_reg(ctx, CTX_VPIDR_EL2, read_vpidr_el2()); 1190 write_ctx_reg(ctx, CTX_VTCR_EL2, read_vtcr_el2()); 1191 write_ctx_reg(ctx, CTX_VTTBR_EL2, read_vttbr_el2()); 1192 } 1193 1194 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx) 1195 { 1196 write_actlr_el2(read_ctx_reg(ctx, CTX_ACTLR_EL2)); 1197 write_afsr0_el2(read_ctx_reg(ctx, CTX_AFSR0_EL2)); 1198 write_afsr1_el2(read_ctx_reg(ctx, CTX_AFSR1_EL2)); 1199 write_amair_el2(read_ctx_reg(ctx, CTX_AMAIR_EL2)); 1200 write_cnthctl_el2(read_ctx_reg(ctx, CTX_CNTHCTL_EL2)); 1201 write_cntvoff_el2(read_ctx_reg(ctx, CTX_CNTVOFF_EL2)); 1202 write_cptr_el2(read_ctx_reg(ctx, CTX_CPTR_EL2)); 1203 if (CTX_INCLUDE_AARCH32_REGS) { 1204 write_dbgvcr32_el2(read_ctx_reg(ctx, CTX_DBGVCR32_EL2)); 1205 } 1206 write_elr_el2(read_ctx_reg(ctx, CTX_ELR_EL2)); 1207 write_esr_el2(read_ctx_reg(ctx, CTX_ESR_EL2)); 1208 write_far_el2(read_ctx_reg(ctx, CTX_FAR_EL2)); 1209 write_hacr_el2(read_ctx_reg(ctx, CTX_HACR_EL2)); 1210 write_hcr_el2(read_ctx_reg(ctx, CTX_HCR_EL2)); 1211 write_hpfar_el2(read_ctx_reg(ctx, CTX_HPFAR_EL2)); 1212 write_hstr_el2(read_ctx_reg(ctx, CTX_HSTR_EL2)); 1213 1214 /* 1215 * Set the NS bit to be able to access the ICC_SRE_EL2 register 1216 * TODO: remove with root context 1217 */ 1218 u_register_t scr_el3 = read_scr_el3(); 1219 1220 write_scr_el3(scr_el3 | SCR_NS_BIT); 1221 isb(); 1222 write_icc_sre_el2(read_ctx_reg(ctx, CTX_ICC_SRE_EL2)); 1223 1224 write_scr_el3(scr_el3); 1225 isb(); 1226 1227 write_ich_hcr_el2(read_ctx_reg(ctx, CTX_ICH_HCR_EL2)); 1228 write_ich_vmcr_el2(read_ctx_reg(ctx, CTX_ICH_VMCR_EL2)); 1229 write_mair_el2(read_ctx_reg(ctx, CTX_MAIR_EL2)); 1230 write_mdcr_el2(read_ctx_reg(ctx, CTX_MDCR_EL2)); 1231 write_sctlr_el2(read_ctx_reg(ctx, CTX_SCTLR_EL2)); 1232 write_spsr_el2(read_ctx_reg(ctx, CTX_SPSR_EL2)); 1233 write_sp_el2(read_ctx_reg(ctx, CTX_SP_EL2)); 1234 write_tcr_el2(read_ctx_reg(ctx, CTX_TCR_EL2)); 1235 write_tpidr_el2(read_ctx_reg(ctx, CTX_TPIDR_EL2)); 1236 write_ttbr0_el2(read_ctx_reg(ctx, CTX_TTBR0_EL2)); 1237 write_vbar_el2(read_ctx_reg(ctx, CTX_VBAR_EL2)); 1238 write_vmpidr_el2(read_ctx_reg(ctx, CTX_VMPIDR_EL2)); 1239 write_vpidr_el2(read_ctx_reg(ctx, CTX_VPIDR_EL2)); 1240 write_vtcr_el2(read_ctx_reg(ctx, CTX_VTCR_EL2)); 1241 write_vttbr_el2(read_ctx_reg(ctx, CTX_VTTBR_EL2)); 1242 } 1243 1244 /******************************************************************************* 1245 * Save EL2 sysreg context 1246 ******************************************************************************/ 1247 void cm_el2_sysregs_context_save(uint32_t security_state) 1248 { 1249 cpu_context_t *ctx; 1250 el2_sysregs_t *el2_sysregs_ctx; 1251 1252 ctx = cm_get_context(security_state); 1253 assert(ctx != NULL); 1254 1255 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1256 1257 el2_sysregs_context_save_common(el2_sysregs_ctx); 1258 1259 if (is_feat_mte_supported()) { 1260 write_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2, read_tfsr_el2()); 1261 } 1262 1263 #if CTX_INCLUDE_MPAM_REGS 1264 if (is_feat_mpam_supported()) { 1265 mpam_t *mpam_ctx = get_mpam_ctx(ctx); 1266 el2_sysregs_context_save_mpam(mpam_ctx); 1267 } 1268 #endif 1269 1270 if (is_feat_fgt_supported()) { 1271 el2_sysregs_context_save_fgt(el2_sysregs_ctx); 1272 } 1273 1274 if (is_feat_ecv_v2_supported()) { 1275 write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2, read_cntpoff_el2()); 1276 } 1277 1278 if (is_feat_vhe_supported()) { 1279 write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2, read_contextidr_el2()); 1280 write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2, read_ttbr1_el2()); 1281 } 1282 1283 if (is_feat_ras_supported()) { 1284 write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2, read_vdisr_el2()); 1285 write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2, read_vsesr_el2()); 1286 } 1287 1288 if (is_feat_nv2_supported()) { 1289 write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2, read_vncr_el2()); 1290 } 1291 1292 if (is_feat_trf_supported()) { 1293 write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2()); 1294 } 1295 1296 if (is_feat_csv2_2_supported()) { 1297 write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2, read_scxtnum_el2()); 1298 } 1299 1300 if (is_feat_hcx_supported()) { 1301 write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2()); 1302 } 1303 if (is_feat_tcr2_supported()) { 1304 write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2()); 1305 } 1306 if (is_feat_sxpie_supported()) { 1307 write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2()); 1308 write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2()); 1309 } 1310 if (is_feat_s2pie_supported()) { 1311 write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2()); 1312 } 1313 if (is_feat_sxpoe_supported()) { 1314 write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2()); 1315 } 1316 if (is_feat_gcs_supported()) { 1317 write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2()); 1318 write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2()); 1319 } 1320 } 1321 1322 /******************************************************************************* 1323 * Restore EL2 sysreg context 1324 ******************************************************************************/ 1325 void cm_el2_sysregs_context_restore(uint32_t security_state) 1326 { 1327 cpu_context_t *ctx; 1328 el2_sysregs_t *el2_sysregs_ctx; 1329 1330 ctx = cm_get_context(security_state); 1331 assert(ctx != NULL); 1332 1333 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1334 1335 el2_sysregs_context_restore_common(el2_sysregs_ctx); 1336 #if CTX_INCLUDE_MTE_REGS 1337 write_tfsr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2)); 1338 #endif 1339 1340 #if CTX_INCLUDE_MPAM_REGS 1341 if (is_feat_mpam_supported()) { 1342 mpam_t *mpam_ctx = get_mpam_ctx(ctx); 1343 el2_sysregs_context_restore_mpam(mpam_ctx); 1344 } 1345 #endif 1346 1347 if (is_feat_fgt_supported()) { 1348 el2_sysregs_context_restore_fgt(el2_sysregs_ctx); 1349 } 1350 1351 if (is_feat_ecv_v2_supported()) { 1352 write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2)); 1353 } 1354 1355 if (is_feat_vhe_supported()) { 1356 write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2)); 1357 write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2)); 1358 } 1359 1360 if (is_feat_ras_supported()) { 1361 write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2)); 1362 write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2)); 1363 } 1364 1365 if (is_feat_nv2_supported()) { 1366 write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2)); 1367 } 1368 if (is_feat_trf_supported()) { 1369 write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2)); 1370 } 1371 1372 if (is_feat_csv2_2_supported()) { 1373 write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2)); 1374 } 1375 1376 if (is_feat_hcx_supported()) { 1377 write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2)); 1378 } 1379 if (is_feat_tcr2_supported()) { 1380 write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2)); 1381 } 1382 if (is_feat_sxpie_supported()) { 1383 write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2)); 1384 write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2)); 1385 } 1386 if (is_feat_s2pie_supported()) { 1387 write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2)); 1388 } 1389 if (is_feat_sxpoe_supported()) { 1390 write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2)); 1391 } 1392 if (is_feat_gcs_supported()) { 1393 write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2)); 1394 write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2)); 1395 } 1396 } 1397 #endif /* CTX_INCLUDE_EL2_REGS */ 1398 1399 /******************************************************************************* 1400 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS 1401 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly 1402 * updating EL1 and EL2 registers. Otherwise, it calls the generic 1403 * cm_prepare_el3_exit function. 1404 ******************************************************************************/ 1405 void cm_prepare_el3_exit_ns(void) 1406 { 1407 #if CTX_INCLUDE_EL2_REGS 1408 #if ENABLE_ASSERTIONS 1409 cpu_context_t *ctx = cm_get_context(NON_SECURE); 1410 assert(ctx != NULL); 1411 1412 /* Assert that EL2 is used. */ 1413 u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1414 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && 1415 (el_implemented(2U) != EL_IMPL_NONE)); 1416 #endif /* ENABLE_ASSERTIONS */ 1417 1418 /* Restore EL2 and EL1 sysreg contexts */ 1419 cm_el2_sysregs_context_restore(NON_SECURE); 1420 cm_el1_sysregs_context_restore(NON_SECURE); 1421 cm_set_next_eret_context(NON_SECURE); 1422 #else 1423 cm_prepare_el3_exit(NON_SECURE); 1424 #endif /* CTX_INCLUDE_EL2_REGS */ 1425 } 1426 1427 /******************************************************************************* 1428 * The next four functions are used by runtime services to save and restore 1429 * EL1 context on the 'cpu_context' structure for the specified security 1430 * state. 1431 ******************************************************************************/ 1432 void cm_el1_sysregs_context_save(uint32_t security_state) 1433 { 1434 cpu_context_t *ctx; 1435 1436 ctx = cm_get_context(security_state); 1437 assert(ctx != NULL); 1438 1439 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 1440 1441 #if IMAGE_BL31 1442 if (security_state == SECURE) 1443 PUBLISH_EVENT(cm_exited_secure_world); 1444 else 1445 PUBLISH_EVENT(cm_exited_normal_world); 1446 #endif 1447 } 1448 1449 void cm_el1_sysregs_context_restore(uint32_t security_state) 1450 { 1451 cpu_context_t *ctx; 1452 1453 ctx = cm_get_context(security_state); 1454 assert(ctx != NULL); 1455 1456 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 1457 1458 #if IMAGE_BL31 1459 if (security_state == SECURE) 1460 PUBLISH_EVENT(cm_entering_secure_world); 1461 else 1462 PUBLISH_EVENT(cm_entering_normal_world); 1463 #endif 1464 } 1465 1466 /******************************************************************************* 1467 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 1468 * given security state with the given entrypoint 1469 ******************************************************************************/ 1470 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 1471 { 1472 cpu_context_t *ctx; 1473 el3_state_t *state; 1474 1475 ctx = cm_get_context(security_state); 1476 assert(ctx != NULL); 1477 1478 /* Populate EL3 state so that ERET jumps to the correct entry */ 1479 state = get_el3state_ctx(ctx); 1480 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1481 } 1482 1483 /******************************************************************************* 1484 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 1485 * pertaining to the given security state 1486 ******************************************************************************/ 1487 void cm_set_elr_spsr_el3(uint32_t security_state, 1488 uintptr_t entrypoint, uint32_t spsr) 1489 { 1490 cpu_context_t *ctx; 1491 el3_state_t *state; 1492 1493 ctx = cm_get_context(security_state); 1494 assert(ctx != NULL); 1495 1496 /* Populate EL3 state so that ERET jumps to the correct entry */ 1497 state = get_el3state_ctx(ctx); 1498 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1499 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 1500 } 1501 1502 /******************************************************************************* 1503 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 1504 * pertaining to the given security state using the value and bit position 1505 * specified in the parameters. It preserves all other bits. 1506 ******************************************************************************/ 1507 void cm_write_scr_el3_bit(uint32_t security_state, 1508 uint32_t bit_pos, 1509 uint32_t value) 1510 { 1511 cpu_context_t *ctx; 1512 el3_state_t *state; 1513 u_register_t scr_el3; 1514 1515 ctx = cm_get_context(security_state); 1516 assert(ctx != NULL); 1517 1518 /* Ensure that the bit position is a valid one */ 1519 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 1520 1521 /* Ensure that the 'value' is only a bit wide */ 1522 assert(value <= 1U); 1523 1524 /* 1525 * Get the SCR_EL3 value from the cpu context, clear the desired bit 1526 * and set it to its new value. 1527 */ 1528 state = get_el3state_ctx(ctx); 1529 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 1530 scr_el3 &= ~(1UL << bit_pos); 1531 scr_el3 |= (u_register_t)value << bit_pos; 1532 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 1533 } 1534 1535 /******************************************************************************* 1536 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 1537 * given security state. 1538 ******************************************************************************/ 1539 u_register_t cm_get_scr_el3(uint32_t security_state) 1540 { 1541 cpu_context_t *ctx; 1542 el3_state_t *state; 1543 1544 ctx = cm_get_context(security_state); 1545 assert(ctx != NULL); 1546 1547 /* Populate EL3 state so that ERET jumps to the correct entry */ 1548 state = get_el3state_ctx(ctx); 1549 return read_ctx_reg(state, CTX_SCR_EL3); 1550 } 1551 1552 /******************************************************************************* 1553 * This function is used to program the context that's used for exception 1554 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 1555 * the required security state 1556 ******************************************************************************/ 1557 void cm_set_next_eret_context(uint32_t security_state) 1558 { 1559 cpu_context_t *ctx; 1560 1561 ctx = cm_get_context(security_state); 1562 assert(ctx != NULL); 1563 1564 cm_set_next_context(ctx); 1565 } 1566