1 /* 2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <platform_def.h> 13 14 #include <arch.h> 15 #include <arch_helpers.h> 16 #include <arch_features.h> 17 #include <bl31/interrupt_mgmt.h> 18 #include <common/bl_common.h> 19 #include <common/debug.h> 20 #include <context.h> 21 #include <drivers/arm/gicv3.h> 22 #include <lib/el3_runtime/context_mgmt.h> 23 #include <lib/el3_runtime/cpu_data.h> 24 #include <lib/el3_runtime/pubsub_events.h> 25 #include <lib/extensions/amu.h> 26 #include <lib/extensions/brbe.h> 27 #include <lib/extensions/mpam.h> 28 #include <lib/extensions/pmuv3.h> 29 #include <lib/extensions/sme.h> 30 #include <lib/extensions/spe.h> 31 #include <lib/extensions/sve.h> 32 #include <lib/extensions/sys_reg_trace.h> 33 #include <lib/extensions/trbe.h> 34 #include <lib/extensions/trf.h> 35 #include <lib/utils.h> 36 37 #if ENABLE_FEAT_TWED 38 /* Make sure delay value fits within the range(0-15) */ 39 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); 40 #endif /* ENABLE_FEAT_TWED */ 41 42 per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM]; 43 static bool has_secure_perworld_init; 44 45 static void manage_extensions_nonsecure(cpu_context_t *ctx); 46 static void manage_extensions_secure(cpu_context_t *ctx); 47 static void manage_extensions_secure_per_world(void); 48 49 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) 50 { 51 u_register_t sctlr_elx, actlr_elx; 52 53 /* 54 * Initialise SCTLR_EL1 to the reset value corresponding to the target 55 * execution state setting all fields rather than relying on the hw. 56 * Some fields have architecturally UNKNOWN reset values and these are 57 * set to zero. 58 * 59 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 60 * 61 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 62 * required by PSCI specification) 63 */ 64 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 65 if (GET_RW(ep->spsr) == MODE_RW_64) { 66 sctlr_elx |= SCTLR_EL1_RES1; 67 } else { 68 /* 69 * If the target execution state is AArch32 then the following 70 * fields need to be set. 71 * 72 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 73 * instructions are not trapped to EL1. 74 * 75 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 76 * instructions are not trapped to EL1. 77 * 78 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 79 * CP15DMB, CP15DSB, and CP15ISB instructions. 80 */ 81 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 82 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 83 } 84 85 #if ERRATA_A75_764081 86 /* 87 * If workaround of errata 764081 for Cortex-A75 is used then set 88 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 89 */ 90 sctlr_elx |= SCTLR_IESB_BIT; 91 #endif 92 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 93 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); 94 95 /* 96 * Base the context ACTLR_EL1 on the current value, as it is 97 * implementation defined. The context restore process will write 98 * the value from the context to the actual register and can cause 99 * problems for processor cores that don't expect certain bits to 100 * be zero. 101 */ 102 actlr_elx = read_actlr_el1(); 103 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 104 } 105 106 /****************************************************************************** 107 * This function performs initializations that are specific to SECURE state 108 * and updates the cpu context specified by 'ctx'. 109 *****************************************************************************/ 110 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 111 { 112 u_register_t scr_el3; 113 el3_state_t *state; 114 115 state = get_el3state_ctx(ctx); 116 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 117 118 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 119 /* 120 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 121 * indicated by the interrupt routing model for BL31. 122 */ 123 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 124 #endif 125 126 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS 127 /* Get Memory Tagging Extension support level */ 128 unsigned int mte = get_armv8_5_mte_support(); 129 #endif 130 /* 131 * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS 132 * is set, or when MTE is only implemented at EL0. 133 */ 134 #if CTX_INCLUDE_MTE_REGS 135 assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)); 136 scr_el3 |= SCR_ATA_BIT; 137 #else 138 if (mte == MTE_IMPLEMENTED_EL0) { 139 scr_el3 |= SCR_ATA_BIT; 140 } 141 #endif /* CTX_INCLUDE_MTE_REGS */ 142 143 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 144 145 /* 146 * Initialize EL1 context registers unless SPMC is running 147 * at S-EL2. 148 */ 149 #if !SPMD_SPM_AT_SEL2 150 setup_el1_context(ctx, ep); 151 #endif 152 153 manage_extensions_secure(ctx); 154 155 /** 156 * manage_extensions_secure_per_world api has to be executed once, 157 * as the registers getting initialised, maintain constant value across 158 * all the cpus for the secure world. 159 * Henceforth, this check ensures that the registers are initialised once 160 * and avoids re-initialization from multiple cores. 161 */ 162 if (!has_secure_perworld_init) { 163 manage_extensions_secure_per_world(); 164 } 165 166 } 167 168 #if ENABLE_RME 169 /****************************************************************************** 170 * This function performs initializations that are specific to REALM state 171 * and updates the cpu context specified by 'ctx'. 172 *****************************************************************************/ 173 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 174 { 175 u_register_t scr_el3; 176 el3_state_t *state; 177 178 state = get_el3state_ctx(ctx); 179 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 180 181 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT; 182 183 if (is_feat_csv2_2_supported()) { 184 /* Enable access to the SCXTNUM_ELx registers. */ 185 scr_el3 |= SCR_EnSCXT_BIT; 186 } 187 188 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 189 } 190 #endif /* ENABLE_RME */ 191 192 /****************************************************************************** 193 * This function performs initializations that are specific to NON-SECURE state 194 * and updates the cpu context specified by 'ctx'. 195 *****************************************************************************/ 196 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 197 { 198 u_register_t scr_el3; 199 el3_state_t *state; 200 201 state = get_el3state_ctx(ctx); 202 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 203 204 /* SCR_NS: Set the NS bit */ 205 scr_el3 |= SCR_NS_BIT; 206 207 /* Allow access to Allocation Tags when MTE is implemented. */ 208 scr_el3 |= SCR_ATA_BIT; 209 210 #if !CTX_INCLUDE_PAUTH_REGS 211 /* 212 * Pointer Authentication feature, if present, is always enabled by default 213 * for Non secure lower exception levels. We do not have an explicit 214 * flag to set it. 215 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower 216 * exception levels of secure and realm worlds. 217 * 218 * To prevent the leakage between the worlds during world switch, 219 * we enable it only for the non-secure world. 220 * 221 * If the Secure/realm world wants to use pointer authentication, 222 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case 223 * it will be enabled globally for all the contexts. 224 * 225 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 226 * other than EL3 227 * 228 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 229 * than EL3 230 */ 231 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 232 233 #endif /* CTX_INCLUDE_PAUTH_REGS */ 234 235 #if HANDLE_EA_EL3_FIRST_NS 236 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ 237 scr_el3 |= SCR_EA_BIT; 238 #endif 239 240 #if RAS_TRAP_NS_ERR_REC_ACCESS 241 /* 242 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 243 * and RAS ERX registers from EL1 and EL2(from any security state) 244 * are trapped to EL3. 245 * Set here to trap only for NS EL1/EL2 246 * 247 */ 248 scr_el3 |= SCR_TERR_BIT; 249 #endif 250 251 if (is_feat_csv2_2_supported()) { 252 /* Enable access to the SCXTNUM_ELx registers. */ 253 scr_el3 |= SCR_EnSCXT_BIT; 254 } 255 256 #ifdef IMAGE_BL31 257 /* 258 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 259 * indicated by the interrupt routing model for BL31. 260 */ 261 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 262 #endif 263 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 264 265 /* Initialize EL1 context registers */ 266 setup_el1_context(ctx, ep); 267 268 /* Initialize EL2 context registers */ 269 #if CTX_INCLUDE_EL2_REGS 270 271 /* 272 * Initialize SCTLR_EL2 context register using Endianness value 273 * taken from the entrypoint attribute. 274 */ 275 u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 276 sctlr_el2 |= SCTLR_EL2_RES1; 277 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2, 278 sctlr_el2); 279 280 if (is_feat_hcx_supported()) { 281 /* 282 * Initialize register HCRX_EL2 with its init value. 283 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a 284 * chance that this can lead to unexpected behavior in lower 285 * ELs that have not been updated since the introduction of 286 * this feature if not properly initialized, especially when 287 * it comes to those bits that enable/disable traps. 288 */ 289 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HCRX_EL2, 290 HCRX_EL2_INIT_VAL); 291 } 292 293 if (is_feat_fgt_supported()) { 294 /* 295 * Initialize HFG*_EL2 registers with a default value so legacy 296 * systems unaware of FEAT_FGT do not get trapped due to their lack 297 * of initialization for this feature. 298 */ 299 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGITR_EL2, 300 HFGITR_EL2_INIT_VAL); 301 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGRTR_EL2, 302 HFGRTR_EL2_INIT_VAL); 303 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGWTR_EL2, 304 HFGWTR_EL2_INIT_VAL); 305 } 306 #endif /* CTX_INCLUDE_EL2_REGS */ 307 308 manage_extensions_nonsecure(ctx); 309 } 310 311 /******************************************************************************* 312 * The following function performs initialization of the cpu_context 'ctx' 313 * for first use that is common to all security states, and sets the 314 * initial entrypoint state as specified by the entry_point_info structure. 315 * 316 * The EE and ST attributes are used to configure the endianness and secure 317 * timer availability for the new execution context. 318 ******************************************************************************/ 319 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 320 { 321 u_register_t scr_el3; 322 el3_state_t *state; 323 gp_regs_t *gp_regs; 324 325 state = get_el3state_ctx(ctx); 326 327 /* Clear any residual register values from the context */ 328 zeromem(ctx, sizeof(*ctx)); 329 330 /* 331 * The lower-EL context is zeroed so that no stale values leak to a world. 332 * It is assumed that an all-zero lower-EL context is good enough for it 333 * to boot correctly. However, there are very few registers where this 334 * is not true and some values need to be recreated. 335 */ 336 #if CTX_INCLUDE_EL2_REGS 337 el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx); 338 339 /* 340 * These bits are set in the gicv3 driver. Losing them (especially the 341 * SRE bit) is problematic for all worlds. Henceforth recreate them. 342 */ 343 u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | 344 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; 345 write_ctx_reg(el2_ctx, CTX_ICC_SRE_EL2, icc_sre_el2); 346 #endif /* CTX_INCLUDE_EL2_REGS */ 347 348 /* Start with a clean SCR_EL3 copy as all relevant values are set */ 349 scr_el3 = SCR_RESET_VAL; 350 351 /* 352 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 353 * EL2, EL1 and EL0 are not trapped to EL3. 354 * 355 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 356 * EL2, EL1 and EL0 are not trapped to EL3. 357 * 358 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 359 * both Security states and both Execution states. 360 * 361 * SCR_EL3.SIF: Set to one to disable secure instruction execution from 362 * Non-secure memory. 363 */ 364 scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT); 365 366 scr_el3 |= SCR_SIF_BIT; 367 368 /* 369 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 370 * Exception level as specified by SPSR. 371 */ 372 if (GET_RW(ep->spsr) == MODE_RW_64) { 373 scr_el3 |= SCR_RW_BIT; 374 } 375 376 /* 377 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 378 * Secure timer registers to EL3, from AArch64 state only, if specified 379 * by the entrypoint attributes. If SEL2 is present and enabled, the ST 380 * bit always behaves as 1 (i.e. secure physical timer register access 381 * is not trapped) 382 */ 383 if (EP_GET_ST(ep->h.attr) != 0U) { 384 scr_el3 |= SCR_ST_BIT; 385 } 386 387 /* 388 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 389 * SCR_EL3.HXEn. 390 */ 391 if (is_feat_hcx_supported()) { 392 scr_el3 |= SCR_HXEn_BIT; 393 } 394 395 /* 396 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS 397 * registers are trapped to EL3. 398 */ 399 #if ENABLE_FEAT_RNG_TRAP 400 scr_el3 |= SCR_TRNDR_BIT; 401 #endif 402 403 #if FAULT_INJECTION_SUPPORT 404 /* Enable fault injection from lower ELs */ 405 scr_el3 |= SCR_FIEN_BIT; 406 #endif 407 408 #if CTX_INCLUDE_PAUTH_REGS 409 /* 410 * Enable Pointer Authentication globally for all the worlds. 411 * 412 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 413 * other than EL3 414 * 415 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 416 * than EL3 417 */ 418 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 419 #endif /* CTX_INCLUDE_PAUTH_REGS */ 420 421 /* 422 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present. 423 */ 424 if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) { 425 scr_el3 |= SCR_TCR2EN_BIT; 426 } 427 428 /* 429 * SCR_EL3.PIEN: Enable permission indirection and overlay 430 * registers for AArch64 if present. 431 */ 432 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) { 433 scr_el3 |= SCR_PIEN_BIT; 434 } 435 436 /* 437 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present. 438 */ 439 if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) { 440 scr_el3 |= SCR_GCSEn_BIT; 441 } 442 443 /* 444 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 445 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 446 * next mode is Hyp. 447 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 448 * same conditions as HVC instructions and when the processor supports 449 * ARMv8.6-FGT. 450 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 451 * CNTPOFF_EL2 register under the same conditions as HVC instructions 452 * and when the processor supports ECV. 453 */ 454 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 455 || ((GET_RW(ep->spsr) != MODE_RW_64) 456 && (GET_M32(ep->spsr) == MODE32_hyp))) { 457 scr_el3 |= SCR_HCE_BIT; 458 459 if (is_feat_fgt_supported()) { 460 scr_el3 |= SCR_FGTEN_BIT; 461 } 462 463 if (is_feat_ecv_supported()) { 464 scr_el3 |= SCR_ECVEN_BIT; 465 } 466 } 467 468 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 469 if (is_feat_twed_supported()) { 470 /* Set delay in SCR_EL3 */ 471 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 472 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) 473 << SCR_TWEDEL_SHIFT); 474 475 /* Enable WFE delay */ 476 scr_el3 |= SCR_TWEDEn_BIT; 477 } 478 479 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 480 /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */ 481 if (is_feat_sel2_supported()) { 482 scr_el3 |= SCR_EEL2_BIT; 483 } 484 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */ 485 486 if (is_feat_mpam_supported()) { 487 write_ctx_reg(get_el3state_ctx(ctx), CTX_MPAM3_EL3, \ 488 MPAM3_EL3_RESET_VAL); 489 } 490 491 /* 492 * Populate EL3 state so that we've the right context 493 * before doing ERET 494 */ 495 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 496 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 497 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 498 499 /* 500 * Store the X0-X7 value from the entrypoint into the context 501 * Use memcpy as we are in control of the layout of the structures 502 */ 503 gp_regs = get_gpregs_ctx(ctx); 504 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 505 } 506 507 /******************************************************************************* 508 * Context management library initialization routine. This library is used by 509 * runtime services to share pointers to 'cpu_context' structures for secure 510 * non-secure and realm states. Management of the structures and their associated 511 * memory is not done by the context management library e.g. the PSCI service 512 * manages the cpu context used for entry from and exit to the non-secure state. 513 * The Secure payload dispatcher service manages the context(s) corresponding to 514 * the secure state. It also uses this library to get access to the non-secure 515 * state cpu context pointers. 516 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 517 * which will be used for programming an entry into a lower EL. The same context 518 * will be used to save state upon exception entry from that EL. 519 ******************************************************************************/ 520 void __init cm_init(void) 521 { 522 /* 523 * The context management library has only global data to initialize, but 524 * that will be done when the BSS is zeroed out. 525 */ 526 } 527 528 /******************************************************************************* 529 * This is the high-level function used to initialize the cpu_context 'ctx' for 530 * first use. It performs initializations that are common to all security states 531 * and initializations specific to the security state specified in 'ep' 532 ******************************************************************************/ 533 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 534 { 535 unsigned int security_state; 536 537 assert(ctx != NULL); 538 539 /* 540 * Perform initializations that are common 541 * to all security states 542 */ 543 setup_context_common(ctx, ep); 544 545 security_state = GET_SECURITY_STATE(ep->h.attr); 546 547 /* Perform security state specific initializations */ 548 switch (security_state) { 549 case SECURE: 550 setup_secure_context(ctx, ep); 551 break; 552 #if ENABLE_RME 553 case REALM: 554 setup_realm_context(ctx, ep); 555 break; 556 #endif 557 case NON_SECURE: 558 setup_ns_context(ctx, ep); 559 break; 560 default: 561 ERROR("Invalid security state\n"); 562 panic(); 563 break; 564 } 565 } 566 567 /******************************************************************************* 568 * Enable architecture extensions for EL3 execution. This function only updates 569 * registers in-place which are expected to either never change or be 570 * overwritten by el3_exit. 571 ******************************************************************************/ 572 #if IMAGE_BL31 573 void cm_manage_extensions_el3(void) 574 { 575 if (is_feat_spe_supported()) { 576 spe_init_el3(); 577 } 578 579 if (is_feat_amu_supported()) { 580 amu_init_el3(); 581 } 582 583 if (is_feat_sme_supported()) { 584 sme_init_el3(); 585 } 586 587 if (is_feat_trbe_supported()) { 588 trbe_init_el3(); 589 } 590 591 if (is_feat_brbe_supported()) { 592 brbe_init_el3(); 593 } 594 595 if (is_feat_trf_supported()) { 596 trf_init_el3(); 597 } 598 599 pmuv3_init_el3(); 600 } 601 #endif /* IMAGE_BL31 */ 602 603 /****************************************************************************** 604 * Function to initialise the registers with the RESET values in the context 605 * memory, which are maintained per world. 606 ******************************************************************************/ 607 #if IMAGE_BL31 608 void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx) 609 { 610 /* 611 * Initialise CPTR_EL3, setting all fields rather than relying on hw. 612 * 613 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers 614 * by Advanced SIMD, floating-point or SVE instructions (if 615 * implemented) do not trap to EL3. 616 * 617 * CPTR_EL3.TCPAC: Set to zero so that accesses to CPACR_EL1, 618 * CPTR_EL2,CPACR, or HCPTR do not trap to EL3. 619 */ 620 uint64_t cptr_el3 = CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TFP_BIT); 621 per_world_ctx->ctx_cptr_el3 = cptr_el3; 622 } 623 #endif /* IMAGE_BL31 */ 624 625 /******************************************************************************* 626 * Initialise per_world_context for Non-Secure world. 627 * This function enables the architecture extensions, which have same value 628 * across the cores for the non-secure world. 629 ******************************************************************************/ 630 #if IMAGE_BL31 631 void manage_extensions_nonsecure_per_world(void) 632 { 633 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]); 634 635 if (is_feat_sme_supported()) { 636 sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 637 } 638 639 if (is_feat_sve_supported()) { 640 sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 641 } 642 643 if (is_feat_amu_supported()) { 644 amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 645 } 646 647 if (is_feat_sys_reg_trace_supported()) { 648 sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 649 } 650 } 651 #endif /* IMAGE_BL31 */ 652 653 /******************************************************************************* 654 * Initialise per_world_context for Secure world. 655 * This function enables the architecture extensions, which have same value 656 * across the cores for the secure world. 657 ******************************************************************************/ 658 static void manage_extensions_secure_per_world(void) 659 { 660 #if IMAGE_BL31 661 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 662 663 if (is_feat_sme_supported()) { 664 665 if (ENABLE_SME_FOR_SWD) { 666 /* 667 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure 668 * SME, SVE, and FPU/SIMD context properly managed. 669 */ 670 sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 671 } else { 672 /* 673 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 674 * world can safely use the associated registers. 675 */ 676 sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 677 } 678 } 679 if (is_feat_sve_supported()) { 680 if (ENABLE_SVE_FOR_SWD) { 681 /* 682 * Enable SVE and FPU in secure context, SPM must ensure 683 * that the SVE and FPU register contexts are properly managed. 684 */ 685 sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 686 } else { 687 /* 688 * Disable SVE and FPU in secure context so non-secure world 689 * can safely use them. 690 */ 691 sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 692 } 693 } 694 695 /* NS can access this but Secure shouldn't */ 696 if (is_feat_sys_reg_trace_supported()) { 697 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 698 } 699 700 has_secure_perworld_init = true; 701 #endif /* IMAGE_BL31 */ 702 } 703 704 /******************************************************************************* 705 * Enable architecture extensions on first entry to Non-secure world. 706 ******************************************************************************/ 707 static void manage_extensions_nonsecure(cpu_context_t *ctx) 708 { 709 #if IMAGE_BL31 710 if (is_feat_amu_supported()) { 711 amu_enable(ctx); 712 } 713 714 if (is_feat_sme_supported()) { 715 sme_enable(ctx); 716 } 717 718 if (is_feat_mpam_supported()) { 719 mpam_enable(ctx); 720 } 721 pmuv3_enable(ctx); 722 #endif /* IMAGE_BL31 */ 723 } 724 725 /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */ 726 static __unused void enable_pauth_el2(void) 727 { 728 u_register_t hcr_el2 = read_hcr_el2(); 729 /* 730 * For Armv8.3 pointer authentication feature, disable traps to EL2 when 731 * accessing key registers or using pointer authentication instructions 732 * from lower ELs. 733 */ 734 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); 735 736 write_hcr_el2(hcr_el2); 737 } 738 739 #if INIT_UNUSED_NS_EL2 740 /******************************************************************************* 741 * Enable architecture extensions in-place at EL2 on first entry to Non-secure 742 * world when EL2 is empty and unused. 743 ******************************************************************************/ 744 static void manage_extensions_nonsecure_el2_unused(void) 745 { 746 #if IMAGE_BL31 747 if (is_feat_spe_supported()) { 748 spe_init_el2_unused(); 749 } 750 751 if (is_feat_amu_supported()) { 752 amu_init_el2_unused(); 753 } 754 755 if (is_feat_mpam_supported()) { 756 mpam_init_el2_unused(); 757 } 758 759 if (is_feat_trbe_supported()) { 760 trbe_init_el2_unused(); 761 } 762 763 if (is_feat_sys_reg_trace_supported()) { 764 sys_reg_trace_init_el2_unused(); 765 } 766 767 if (is_feat_trf_supported()) { 768 trf_init_el2_unused(); 769 } 770 771 pmuv3_init_el2_unused(); 772 773 if (is_feat_sve_supported()) { 774 sve_init_el2_unused(); 775 } 776 777 if (is_feat_sme_supported()) { 778 sme_init_el2_unused(); 779 } 780 781 #if ENABLE_PAUTH 782 enable_pauth_el2(); 783 #endif /* ENABLE_PAUTH */ 784 #endif /* IMAGE_BL31 */ 785 } 786 #endif /* INIT_UNUSED_NS_EL2 */ 787 788 /******************************************************************************* 789 * Enable architecture extensions on first entry to Secure world. 790 ******************************************************************************/ 791 static void manage_extensions_secure(cpu_context_t *ctx) 792 { 793 #if IMAGE_BL31 794 if (is_feat_sme_supported()) { 795 if (ENABLE_SME_FOR_SWD) { 796 /* 797 * Enable SME, SVE, FPU/SIMD in secure context, secure manager 798 * must ensure SME, SVE, and FPU/SIMD context properly managed. 799 */ 800 sme_init_el3(); 801 sme_enable(ctx); 802 } else { 803 /* 804 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 805 * world can safely use the associated registers. 806 */ 807 sme_disable(ctx); 808 } 809 } 810 #endif /* IMAGE_BL31 */ 811 } 812 813 /******************************************************************************* 814 * The following function initializes the cpu_context for a CPU specified by 815 * its `cpu_idx` for first use, and sets the initial entrypoint state as 816 * specified by the entry_point_info structure. 817 ******************************************************************************/ 818 void cm_init_context_by_index(unsigned int cpu_idx, 819 const entry_point_info_t *ep) 820 { 821 cpu_context_t *ctx; 822 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 823 cm_setup_context(ctx, ep); 824 } 825 826 /******************************************************************************* 827 * The following function initializes the cpu_context for the current CPU 828 * for first use, and sets the initial entrypoint state as specified by the 829 * entry_point_info structure. 830 ******************************************************************************/ 831 void cm_init_my_context(const entry_point_info_t *ep) 832 { 833 cpu_context_t *ctx; 834 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 835 cm_setup_context(ctx, ep); 836 } 837 838 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */ 839 static void init_nonsecure_el2_unused(cpu_context_t *ctx) 840 { 841 #if INIT_UNUSED_NS_EL2 842 u_register_t hcr_el2 = HCR_RESET_VAL; 843 u_register_t mdcr_el2; 844 u_register_t scr_el3; 845 846 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 847 848 /* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */ 849 if ((scr_el3 & SCR_RW_BIT) != 0U) { 850 hcr_el2 |= HCR_RW_BIT; 851 } 852 853 write_hcr_el2(hcr_el2); 854 855 /* 856 * Initialise CPTR_EL2 setting all fields rather than relying on the hw. 857 * All fields have architecturally UNKNOWN reset values. 858 */ 859 write_cptr_el2(CPTR_EL2_RESET_VAL); 860 861 /* 862 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on 863 * reset and are set to zero except for field(s) listed below. 864 * 865 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of 866 * Non-secure EL0 and EL1 accesses to the physical timer registers. 867 * 868 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of 869 * Non-secure EL0 and EL1 accesses to the physical counter registers. 870 */ 871 write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT); 872 873 /* 874 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally 875 * UNKNOWN value. 876 */ 877 write_cntvoff_el2(0); 878 879 /* 880 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1 881 * respectively. 882 */ 883 write_vpidr_el2(read_midr_el1()); 884 write_vmpidr_el2(read_mpidr_el1()); 885 886 /* 887 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset. 888 * 889 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address 890 * translation is disabled, cache maintenance operations depend on the 891 * VMID. 892 * 893 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is 894 * disabled. 895 */ 896 write_vttbr_el2(VTTBR_RESET_VAL & 897 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) | 898 (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 899 900 /* 901 * Initialise MDCR_EL2, setting all fields rather than relying on hw. 902 * Some fields are architecturally UNKNOWN on reset. 903 * 904 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System 905 * register accesses to the Debug ROM registers are not trapped to EL2. 906 * 907 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register 908 * accesses to the powerdown debug registers are not trapped to EL2. 909 * 910 * MDCR_EL2.TDA: Set to zero so that System register accesses to the 911 * debug registers do not trap to EL2. 912 * 913 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to 914 * EL2. 915 */ 916 mdcr_el2 = MDCR_EL2_RESET_VAL & 917 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT | 918 MDCR_EL2_TDE_BIT); 919 920 write_mdcr_el2(mdcr_el2); 921 922 /* 923 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset. 924 * 925 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or 926 * EL1 accesses to System registers do not trap to EL2. 927 */ 928 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 929 930 /* 931 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on 932 * reset. 933 * 934 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer 935 * and prevent timer interrupts. 936 */ 937 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); 938 939 manage_extensions_nonsecure_el2_unused(); 940 #endif /* INIT_UNUSED_NS_EL2 */ 941 } 942 943 /******************************************************************************* 944 * Prepare the CPU system registers for first entry into realm, secure, or 945 * normal world. 946 * 947 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 948 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 949 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 950 * For all entries, the EL1 registers are initialized from the cpu_context 951 ******************************************************************************/ 952 void cm_prepare_el3_exit(uint32_t security_state) 953 { 954 u_register_t sctlr_elx, scr_el3; 955 cpu_context_t *ctx = cm_get_context(security_state); 956 957 assert(ctx != NULL); 958 959 if (security_state == NON_SECURE) { 960 uint64_t el2_implemented = el_implemented(2); 961 962 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 963 CTX_SCR_EL3); 964 965 if (((scr_el3 & SCR_HCE_BIT) != 0U) 966 || (el2_implemented != EL_IMPL_NONE)) { 967 /* 968 * If context is not being used for EL2, initialize 969 * HCRX_EL2 with its init value here. 970 */ 971 if (is_feat_hcx_supported()) { 972 write_hcrx_el2(HCRX_EL2_INIT_VAL); 973 } 974 975 /* 976 * Initialize Fine-grained trap registers introduced 977 * by FEAT_FGT so all traps are initially disabled when 978 * switching to EL2 or a lower EL, preventing undesired 979 * behavior. 980 */ 981 if (is_feat_fgt_supported()) { 982 /* 983 * Initialize HFG*_EL2 registers with a default 984 * value so legacy systems unaware of FEAT_FGT 985 * do not get trapped due to their lack of 986 * initialization for this feature. 987 */ 988 write_hfgitr_el2(HFGITR_EL2_INIT_VAL); 989 write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL); 990 write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL); 991 } 992 } 993 994 995 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 996 /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ 997 sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), 998 CTX_SCTLR_EL1); 999 sctlr_elx &= SCTLR_EE_BIT; 1000 sctlr_elx |= SCTLR_EL2_RES1; 1001 #if ERRATA_A75_764081 1002 /* 1003 * If workaround of errata 764081 for Cortex-A75 is used 1004 * then set SCTLR_EL2.IESB to enable Implicit Error 1005 * Synchronization Barrier. 1006 */ 1007 sctlr_elx |= SCTLR_IESB_BIT; 1008 #endif 1009 write_sctlr_el2(sctlr_elx); 1010 } else if (el2_implemented != EL_IMPL_NONE) { 1011 init_nonsecure_el2_unused(ctx); 1012 } 1013 } 1014 1015 cm_el1_sysregs_context_restore(security_state); 1016 cm_set_next_eret_context(security_state); 1017 } 1018 1019 #if CTX_INCLUDE_EL2_REGS 1020 1021 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx) 1022 { 1023 write_ctx_reg(ctx, CTX_HDFGRTR_EL2, read_hdfgrtr_el2()); 1024 if (is_feat_amu_supported()) { 1025 write_ctx_reg(ctx, CTX_HAFGRTR_EL2, read_hafgrtr_el2()); 1026 } 1027 write_ctx_reg(ctx, CTX_HDFGWTR_EL2, read_hdfgwtr_el2()); 1028 write_ctx_reg(ctx, CTX_HFGITR_EL2, read_hfgitr_el2()); 1029 write_ctx_reg(ctx, CTX_HFGRTR_EL2, read_hfgrtr_el2()); 1030 write_ctx_reg(ctx, CTX_HFGWTR_EL2, read_hfgwtr_el2()); 1031 } 1032 1033 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx) 1034 { 1035 write_hdfgrtr_el2(read_ctx_reg(ctx, CTX_HDFGRTR_EL2)); 1036 if (is_feat_amu_supported()) { 1037 write_hafgrtr_el2(read_ctx_reg(ctx, CTX_HAFGRTR_EL2)); 1038 } 1039 write_hdfgwtr_el2(read_ctx_reg(ctx, CTX_HDFGWTR_EL2)); 1040 write_hfgitr_el2(read_ctx_reg(ctx, CTX_HFGITR_EL2)); 1041 write_hfgrtr_el2(read_ctx_reg(ctx, CTX_HFGRTR_EL2)); 1042 write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2)); 1043 } 1044 1045 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx) 1046 { 1047 u_register_t mpam_idr = read_mpamidr_el1(); 1048 1049 write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2()); 1050 1051 /* 1052 * The context registers that we intend to save would be part of the 1053 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1. 1054 */ 1055 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1056 return; 1057 } 1058 1059 /* 1060 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if 1061 * MPAMIDR_HAS_HCR_BIT == 1. 1062 */ 1063 write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2()); 1064 write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2()); 1065 write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2()); 1066 1067 /* 1068 * The number of MPAMVPM registers is implementation defined, their 1069 * number is stored in the MPAMIDR_EL1 register. 1070 */ 1071 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1072 case 7: 1073 write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2()); 1074 __fallthrough; 1075 case 6: 1076 write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2()); 1077 __fallthrough; 1078 case 5: 1079 write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2()); 1080 __fallthrough; 1081 case 4: 1082 write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2()); 1083 __fallthrough; 1084 case 3: 1085 write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2()); 1086 __fallthrough; 1087 case 2: 1088 write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2()); 1089 __fallthrough; 1090 case 1: 1091 write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2()); 1092 break; 1093 } 1094 } 1095 1096 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx) 1097 { 1098 u_register_t mpam_idr = read_mpamidr_el1(); 1099 1100 write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2)); 1101 1102 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1103 return; 1104 } 1105 1106 write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2)); 1107 write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2)); 1108 write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2)); 1109 1110 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1111 case 7: 1112 write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2)); 1113 __fallthrough; 1114 case 6: 1115 write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2)); 1116 __fallthrough; 1117 case 5: 1118 write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2)); 1119 __fallthrough; 1120 case 4: 1121 write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2)); 1122 __fallthrough; 1123 case 3: 1124 write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2)); 1125 __fallthrough; 1126 case 2: 1127 write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2)); 1128 __fallthrough; 1129 case 1: 1130 write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2)); 1131 break; 1132 } 1133 } 1134 1135 /* ----------------------------------------------------- 1136 * The following registers are not added: 1137 * AMEVCNTVOFF0<n>_EL2 1138 * AMEVCNTVOFF1<n>_EL2 1139 * ICH_AP0R<n>_EL2 1140 * ICH_AP1R<n>_EL2 1141 * ICH_LR<n>_EL2 1142 * ----------------------------------------------------- 1143 */ 1144 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx) 1145 { 1146 write_ctx_reg(ctx, CTX_ACTLR_EL2, read_actlr_el2()); 1147 write_ctx_reg(ctx, CTX_AFSR0_EL2, read_afsr0_el2()); 1148 write_ctx_reg(ctx, CTX_AFSR1_EL2, read_afsr1_el2()); 1149 write_ctx_reg(ctx, CTX_AMAIR_EL2, read_amair_el2()); 1150 write_ctx_reg(ctx, CTX_CNTHCTL_EL2, read_cnthctl_el2()); 1151 write_ctx_reg(ctx, CTX_CNTVOFF_EL2, read_cntvoff_el2()); 1152 write_ctx_reg(ctx, CTX_CPTR_EL2, read_cptr_el2()); 1153 if (CTX_INCLUDE_AARCH32_REGS) { 1154 write_ctx_reg(ctx, CTX_DBGVCR32_EL2, read_dbgvcr32_el2()); 1155 } 1156 write_ctx_reg(ctx, CTX_ELR_EL2, read_elr_el2()); 1157 write_ctx_reg(ctx, CTX_ESR_EL2, read_esr_el2()); 1158 write_ctx_reg(ctx, CTX_FAR_EL2, read_far_el2()); 1159 write_ctx_reg(ctx, CTX_HACR_EL2, read_hacr_el2()); 1160 write_ctx_reg(ctx, CTX_HCR_EL2, read_hcr_el2()); 1161 write_ctx_reg(ctx, CTX_HPFAR_EL2, read_hpfar_el2()); 1162 write_ctx_reg(ctx, CTX_HSTR_EL2, read_hstr_el2()); 1163 1164 /* 1165 * Set the NS bit to be able to access the ICC_SRE_EL2 register 1166 * TODO: remove with root context 1167 */ 1168 u_register_t scr_el3 = read_scr_el3(); 1169 1170 write_scr_el3(scr_el3 | SCR_NS_BIT); 1171 isb(); 1172 write_ctx_reg(ctx, CTX_ICC_SRE_EL2, read_icc_sre_el2()); 1173 1174 write_scr_el3(scr_el3); 1175 isb(); 1176 1177 write_ctx_reg(ctx, CTX_ICH_HCR_EL2, read_ich_hcr_el2()); 1178 write_ctx_reg(ctx, CTX_ICH_VMCR_EL2, read_ich_vmcr_el2()); 1179 write_ctx_reg(ctx, CTX_MAIR_EL2, read_mair_el2()); 1180 write_ctx_reg(ctx, CTX_MDCR_EL2, read_mdcr_el2()); 1181 write_ctx_reg(ctx, CTX_SCTLR_EL2, read_sctlr_el2()); 1182 write_ctx_reg(ctx, CTX_SPSR_EL2, read_spsr_el2()); 1183 write_ctx_reg(ctx, CTX_SP_EL2, read_sp_el2()); 1184 write_ctx_reg(ctx, CTX_TCR_EL2, read_tcr_el2()); 1185 write_ctx_reg(ctx, CTX_TPIDR_EL2, read_tpidr_el2()); 1186 write_ctx_reg(ctx, CTX_TTBR0_EL2, read_ttbr0_el2()); 1187 write_ctx_reg(ctx, CTX_VBAR_EL2, read_vbar_el2()); 1188 write_ctx_reg(ctx, CTX_VMPIDR_EL2, read_vmpidr_el2()); 1189 write_ctx_reg(ctx, CTX_VPIDR_EL2, read_vpidr_el2()); 1190 write_ctx_reg(ctx, CTX_VTCR_EL2, read_vtcr_el2()); 1191 write_ctx_reg(ctx, CTX_VTTBR_EL2, read_vttbr_el2()); 1192 } 1193 1194 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx) 1195 { 1196 write_actlr_el2(read_ctx_reg(ctx, CTX_ACTLR_EL2)); 1197 write_afsr0_el2(read_ctx_reg(ctx, CTX_AFSR0_EL2)); 1198 write_afsr1_el2(read_ctx_reg(ctx, CTX_AFSR1_EL2)); 1199 write_amair_el2(read_ctx_reg(ctx, CTX_AMAIR_EL2)); 1200 write_cnthctl_el2(read_ctx_reg(ctx, CTX_CNTHCTL_EL2)); 1201 write_cntvoff_el2(read_ctx_reg(ctx, CTX_CNTVOFF_EL2)); 1202 write_cptr_el2(read_ctx_reg(ctx, CTX_CPTR_EL2)); 1203 if (CTX_INCLUDE_AARCH32_REGS) { 1204 write_dbgvcr32_el2(read_ctx_reg(ctx, CTX_DBGVCR32_EL2)); 1205 } 1206 write_elr_el2(read_ctx_reg(ctx, CTX_ELR_EL2)); 1207 write_esr_el2(read_ctx_reg(ctx, CTX_ESR_EL2)); 1208 write_far_el2(read_ctx_reg(ctx, CTX_FAR_EL2)); 1209 write_hacr_el2(read_ctx_reg(ctx, CTX_HACR_EL2)); 1210 write_hcr_el2(read_ctx_reg(ctx, CTX_HCR_EL2)); 1211 write_hpfar_el2(read_ctx_reg(ctx, CTX_HPFAR_EL2)); 1212 write_hstr_el2(read_ctx_reg(ctx, CTX_HSTR_EL2)); 1213 1214 /* 1215 * Set the NS bit to be able to access the ICC_SRE_EL2 register 1216 * TODO: remove with root context 1217 */ 1218 u_register_t scr_el3 = read_scr_el3(); 1219 1220 write_scr_el3(scr_el3 | SCR_NS_BIT); 1221 isb(); 1222 write_icc_sre_el2(read_ctx_reg(ctx, CTX_ICC_SRE_EL2)); 1223 1224 write_scr_el3(scr_el3); 1225 isb(); 1226 1227 write_ich_hcr_el2(read_ctx_reg(ctx, CTX_ICH_HCR_EL2)); 1228 write_ich_vmcr_el2(read_ctx_reg(ctx, CTX_ICH_VMCR_EL2)); 1229 write_mair_el2(read_ctx_reg(ctx, CTX_MAIR_EL2)); 1230 write_mdcr_el2(read_ctx_reg(ctx, CTX_MDCR_EL2)); 1231 write_sctlr_el2(read_ctx_reg(ctx, CTX_SCTLR_EL2)); 1232 write_spsr_el2(read_ctx_reg(ctx, CTX_SPSR_EL2)); 1233 write_sp_el2(read_ctx_reg(ctx, CTX_SP_EL2)); 1234 write_tcr_el2(read_ctx_reg(ctx, CTX_TCR_EL2)); 1235 write_tpidr_el2(read_ctx_reg(ctx, CTX_TPIDR_EL2)); 1236 write_ttbr0_el2(read_ctx_reg(ctx, CTX_TTBR0_EL2)); 1237 write_vbar_el2(read_ctx_reg(ctx, CTX_VBAR_EL2)); 1238 write_vmpidr_el2(read_ctx_reg(ctx, CTX_VMPIDR_EL2)); 1239 write_vpidr_el2(read_ctx_reg(ctx, CTX_VPIDR_EL2)); 1240 write_vtcr_el2(read_ctx_reg(ctx, CTX_VTCR_EL2)); 1241 write_vttbr_el2(read_ctx_reg(ctx, CTX_VTTBR_EL2)); 1242 } 1243 1244 /******************************************************************************* 1245 * Save EL2 sysreg context 1246 ******************************************************************************/ 1247 void cm_el2_sysregs_context_save(uint32_t security_state) 1248 { 1249 cpu_context_t *ctx; 1250 el2_sysregs_t *el2_sysregs_ctx; 1251 1252 ctx = cm_get_context(security_state); 1253 assert(ctx != NULL); 1254 1255 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1256 1257 el2_sysregs_context_save_common(el2_sysregs_ctx); 1258 #if CTX_INCLUDE_MTE_REGS 1259 write_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2, read_tfsr_el2()); 1260 #endif 1261 if (is_feat_mpam_supported()) { 1262 el2_sysregs_context_save_mpam(el2_sysregs_ctx); 1263 } 1264 1265 if (is_feat_fgt_supported()) { 1266 el2_sysregs_context_save_fgt(el2_sysregs_ctx); 1267 } 1268 1269 if (is_feat_ecv_v2_supported()) { 1270 write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2, read_cntpoff_el2()); 1271 } 1272 1273 if (is_feat_vhe_supported()) { 1274 write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2, read_contextidr_el2()); 1275 write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2, read_ttbr1_el2()); 1276 } 1277 1278 if (is_feat_ras_supported()) { 1279 write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2, read_vdisr_el2()); 1280 write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2, read_vsesr_el2()); 1281 } 1282 1283 if (is_feat_nv2_supported()) { 1284 write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2, read_vncr_el2()); 1285 } 1286 1287 if (is_feat_trf_supported()) { 1288 write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2()); 1289 } 1290 1291 if (is_feat_csv2_2_supported()) { 1292 write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2, read_scxtnum_el2()); 1293 } 1294 1295 if (is_feat_hcx_supported()) { 1296 write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2()); 1297 } 1298 if (is_feat_tcr2_supported()) { 1299 write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2()); 1300 } 1301 if (is_feat_sxpie_supported()) { 1302 write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2()); 1303 write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2()); 1304 } 1305 if (is_feat_s2pie_supported()) { 1306 write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2()); 1307 } 1308 if (is_feat_sxpoe_supported()) { 1309 write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2()); 1310 } 1311 if (is_feat_gcs_supported()) { 1312 write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2()); 1313 write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2()); 1314 } 1315 } 1316 1317 /******************************************************************************* 1318 * Restore EL2 sysreg context 1319 ******************************************************************************/ 1320 void cm_el2_sysregs_context_restore(uint32_t security_state) 1321 { 1322 cpu_context_t *ctx; 1323 el2_sysregs_t *el2_sysregs_ctx; 1324 1325 ctx = cm_get_context(security_state); 1326 assert(ctx != NULL); 1327 1328 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1329 1330 el2_sysregs_context_restore_common(el2_sysregs_ctx); 1331 #if CTX_INCLUDE_MTE_REGS 1332 write_tfsr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2)); 1333 #endif 1334 if (is_feat_mpam_supported()) { 1335 el2_sysregs_context_restore_mpam(el2_sysregs_ctx); 1336 } 1337 1338 if (is_feat_fgt_supported()) { 1339 el2_sysregs_context_restore_fgt(el2_sysregs_ctx); 1340 } 1341 1342 if (is_feat_ecv_v2_supported()) { 1343 write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2)); 1344 } 1345 1346 if (is_feat_vhe_supported()) { 1347 write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2)); 1348 write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2)); 1349 } 1350 1351 if (is_feat_ras_supported()) { 1352 write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2)); 1353 write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2)); 1354 } 1355 1356 if (is_feat_nv2_supported()) { 1357 write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2)); 1358 } 1359 if (is_feat_trf_supported()) { 1360 write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2)); 1361 } 1362 1363 if (is_feat_csv2_2_supported()) { 1364 write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2)); 1365 } 1366 1367 if (is_feat_hcx_supported()) { 1368 write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2)); 1369 } 1370 if (is_feat_tcr2_supported()) { 1371 write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2)); 1372 } 1373 if (is_feat_sxpie_supported()) { 1374 write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2)); 1375 write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2)); 1376 } 1377 if (is_feat_s2pie_supported()) { 1378 write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2)); 1379 } 1380 if (is_feat_sxpoe_supported()) { 1381 write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2)); 1382 } 1383 if (is_feat_gcs_supported()) { 1384 write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2)); 1385 write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2)); 1386 } 1387 } 1388 #endif /* CTX_INCLUDE_EL2_REGS */ 1389 1390 /******************************************************************************* 1391 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS 1392 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly 1393 * updating EL1 and EL2 registers. Otherwise, it calls the generic 1394 * cm_prepare_el3_exit function. 1395 ******************************************************************************/ 1396 void cm_prepare_el3_exit_ns(void) 1397 { 1398 #if CTX_INCLUDE_EL2_REGS 1399 #if ENABLE_ASSERTIONS 1400 cpu_context_t *ctx = cm_get_context(NON_SECURE); 1401 assert(ctx != NULL); 1402 1403 /* Assert that EL2 is used. */ 1404 u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1405 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && 1406 (el_implemented(2U) != EL_IMPL_NONE)); 1407 #endif /* ENABLE_ASSERTIONS */ 1408 1409 /* Restore EL2 and EL1 sysreg contexts */ 1410 cm_el2_sysregs_context_restore(NON_SECURE); 1411 cm_el1_sysregs_context_restore(NON_SECURE); 1412 cm_set_next_eret_context(NON_SECURE); 1413 #else 1414 cm_prepare_el3_exit(NON_SECURE); 1415 #endif /* CTX_INCLUDE_EL2_REGS */ 1416 } 1417 1418 /******************************************************************************* 1419 * The next four functions are used by runtime services to save and restore 1420 * EL1 context on the 'cpu_context' structure for the specified security 1421 * state. 1422 ******************************************************************************/ 1423 void cm_el1_sysregs_context_save(uint32_t security_state) 1424 { 1425 cpu_context_t *ctx; 1426 1427 ctx = cm_get_context(security_state); 1428 assert(ctx != NULL); 1429 1430 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 1431 1432 #if IMAGE_BL31 1433 if (security_state == SECURE) 1434 PUBLISH_EVENT(cm_exited_secure_world); 1435 else 1436 PUBLISH_EVENT(cm_exited_normal_world); 1437 #endif 1438 } 1439 1440 void cm_el1_sysregs_context_restore(uint32_t security_state) 1441 { 1442 cpu_context_t *ctx; 1443 1444 ctx = cm_get_context(security_state); 1445 assert(ctx != NULL); 1446 1447 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 1448 1449 #if IMAGE_BL31 1450 if (security_state == SECURE) 1451 PUBLISH_EVENT(cm_entering_secure_world); 1452 else 1453 PUBLISH_EVENT(cm_entering_normal_world); 1454 #endif 1455 } 1456 1457 /******************************************************************************* 1458 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 1459 * given security state with the given entrypoint 1460 ******************************************************************************/ 1461 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 1462 { 1463 cpu_context_t *ctx; 1464 el3_state_t *state; 1465 1466 ctx = cm_get_context(security_state); 1467 assert(ctx != NULL); 1468 1469 /* Populate EL3 state so that ERET jumps to the correct entry */ 1470 state = get_el3state_ctx(ctx); 1471 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1472 } 1473 1474 /******************************************************************************* 1475 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 1476 * pertaining to the given security state 1477 ******************************************************************************/ 1478 void cm_set_elr_spsr_el3(uint32_t security_state, 1479 uintptr_t entrypoint, uint32_t spsr) 1480 { 1481 cpu_context_t *ctx; 1482 el3_state_t *state; 1483 1484 ctx = cm_get_context(security_state); 1485 assert(ctx != NULL); 1486 1487 /* Populate EL3 state so that ERET jumps to the correct entry */ 1488 state = get_el3state_ctx(ctx); 1489 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1490 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 1491 } 1492 1493 /******************************************************************************* 1494 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 1495 * pertaining to the given security state using the value and bit position 1496 * specified in the parameters. It preserves all other bits. 1497 ******************************************************************************/ 1498 void cm_write_scr_el3_bit(uint32_t security_state, 1499 uint32_t bit_pos, 1500 uint32_t value) 1501 { 1502 cpu_context_t *ctx; 1503 el3_state_t *state; 1504 u_register_t scr_el3; 1505 1506 ctx = cm_get_context(security_state); 1507 assert(ctx != NULL); 1508 1509 /* Ensure that the bit position is a valid one */ 1510 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 1511 1512 /* Ensure that the 'value' is only a bit wide */ 1513 assert(value <= 1U); 1514 1515 /* 1516 * Get the SCR_EL3 value from the cpu context, clear the desired bit 1517 * and set it to its new value. 1518 */ 1519 state = get_el3state_ctx(ctx); 1520 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 1521 scr_el3 &= ~(1UL << bit_pos); 1522 scr_el3 |= (u_register_t)value << bit_pos; 1523 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 1524 } 1525 1526 /******************************************************************************* 1527 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 1528 * given security state. 1529 ******************************************************************************/ 1530 u_register_t cm_get_scr_el3(uint32_t security_state) 1531 { 1532 cpu_context_t *ctx; 1533 el3_state_t *state; 1534 1535 ctx = cm_get_context(security_state); 1536 assert(ctx != NULL); 1537 1538 /* Populate EL3 state so that ERET jumps to the correct entry */ 1539 state = get_el3state_ctx(ctx); 1540 return read_ctx_reg(state, CTX_SCR_EL3); 1541 } 1542 1543 /******************************************************************************* 1544 * This function is used to program the context that's used for exception 1545 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 1546 * the required security state 1547 ******************************************************************************/ 1548 void cm_set_next_eret_context(uint32_t security_state) 1549 { 1550 cpu_context_t *ctx; 1551 1552 ctx = cm_get_context(security_state); 1553 assert(ctx != NULL); 1554 1555 cm_set_next_context(ctx); 1556 } 1557