1 /* 2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <platform_def.h> 13 14 #include <arch.h> 15 #include <arch_helpers.h> 16 #include <arch_features.h> 17 #include <bl31/interrupt_mgmt.h> 18 #include <common/bl_common.h> 19 #include <common/debug.h> 20 #include <context.h> 21 #include <drivers/arm/gicv3.h> 22 #include <lib/el3_runtime/context_mgmt.h> 23 #include <lib/el3_runtime/cpu_data.h> 24 #include <lib/el3_runtime/pubsub_events.h> 25 #include <lib/extensions/amu.h> 26 #include <lib/extensions/brbe.h> 27 #include <lib/extensions/mpam.h> 28 #include <lib/extensions/pmuv3.h> 29 #include <lib/extensions/sme.h> 30 #include <lib/extensions/spe.h> 31 #include <lib/extensions/sve.h> 32 #include <lib/extensions/sys_reg_trace.h> 33 #include <lib/extensions/trbe.h> 34 #include <lib/extensions/trf.h> 35 #include <lib/utils.h> 36 37 #if ENABLE_FEAT_TWED 38 /* Make sure delay value fits within the range(0-15) */ 39 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); 40 #endif /* ENABLE_FEAT_TWED */ 41 42 per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM]; 43 static bool has_secure_perworld_init; 44 45 static void manage_extensions_nonsecure(cpu_context_t *ctx); 46 static void manage_extensions_secure(cpu_context_t *ctx); 47 static void manage_extensions_secure_per_world(void); 48 49 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) 50 { 51 u_register_t sctlr_elx, actlr_elx; 52 53 /* 54 * Initialise SCTLR_EL1 to the reset value corresponding to the target 55 * execution state setting all fields rather than relying on the hw. 56 * Some fields have architecturally UNKNOWN reset values and these are 57 * set to zero. 58 * 59 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 60 * 61 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 62 * required by PSCI specification) 63 */ 64 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 65 if (GET_RW(ep->spsr) == MODE_RW_64) { 66 sctlr_elx |= SCTLR_EL1_RES1; 67 } else { 68 /* 69 * If the target execution state is AArch32 then the following 70 * fields need to be set. 71 * 72 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 73 * instructions are not trapped to EL1. 74 * 75 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 76 * instructions are not trapped to EL1. 77 * 78 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 79 * CP15DMB, CP15DSB, and CP15ISB instructions. 80 */ 81 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 82 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 83 } 84 85 #if ERRATA_A75_764081 86 /* 87 * If workaround of errata 764081 for Cortex-A75 is used then set 88 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 89 */ 90 sctlr_elx |= SCTLR_IESB_BIT; 91 #endif 92 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 93 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); 94 95 /* 96 * Base the context ACTLR_EL1 on the current value, as it is 97 * implementation defined. The context restore process will write 98 * the value from the context to the actual register and can cause 99 * problems for processor cores that don't expect certain bits to 100 * be zero. 101 */ 102 actlr_elx = read_actlr_el1(); 103 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 104 } 105 106 /****************************************************************************** 107 * This function performs initializations that are specific to SECURE state 108 * and updates the cpu context specified by 'ctx'. 109 *****************************************************************************/ 110 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 111 { 112 u_register_t scr_el3; 113 el3_state_t *state; 114 115 state = get_el3state_ctx(ctx); 116 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 117 118 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 119 /* 120 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 121 * indicated by the interrupt routing model for BL31. 122 */ 123 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 124 #endif 125 126 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS 127 /* Get Memory Tagging Extension support level */ 128 unsigned int mte = get_armv8_5_mte_support(); 129 #endif 130 /* 131 * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS 132 * is set, or when MTE is only implemented at EL0. 133 */ 134 #if CTX_INCLUDE_MTE_REGS 135 assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)); 136 scr_el3 |= SCR_ATA_BIT; 137 #else 138 if (mte == MTE_IMPLEMENTED_EL0) { 139 scr_el3 |= SCR_ATA_BIT; 140 } 141 #endif /* CTX_INCLUDE_MTE_REGS */ 142 143 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 144 145 /* 146 * Initialize EL1 context registers unless SPMC is running 147 * at S-EL2. 148 */ 149 #if !SPMD_SPM_AT_SEL2 150 setup_el1_context(ctx, ep); 151 #endif 152 153 manage_extensions_secure(ctx); 154 155 /** 156 * manage_extensions_secure_per_world api has to be executed once, 157 * as the registers getting initialised, maintain constant value across 158 * all the cpus for the secure world. 159 * Henceforth, this check ensures that the registers are initialised once 160 * and avoids re-initialization from multiple cores. 161 */ 162 if (!has_secure_perworld_init) { 163 manage_extensions_secure_per_world(); 164 } 165 166 } 167 168 #if ENABLE_RME 169 /****************************************************************************** 170 * This function performs initializations that are specific to REALM state 171 * and updates the cpu context specified by 'ctx'. 172 *****************************************************************************/ 173 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 174 { 175 u_register_t scr_el3; 176 el3_state_t *state; 177 178 state = get_el3state_ctx(ctx); 179 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 180 181 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT; 182 183 if (is_feat_csv2_2_supported()) { 184 /* Enable access to the SCXTNUM_ELx registers. */ 185 scr_el3 |= SCR_EnSCXT_BIT; 186 } 187 188 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 189 } 190 #endif /* ENABLE_RME */ 191 192 /****************************************************************************** 193 * This function performs initializations that are specific to NON-SECURE state 194 * and updates the cpu context specified by 'ctx'. 195 *****************************************************************************/ 196 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 197 { 198 u_register_t scr_el3; 199 el3_state_t *state; 200 201 state = get_el3state_ctx(ctx); 202 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 203 204 /* SCR_NS: Set the NS bit */ 205 scr_el3 |= SCR_NS_BIT; 206 207 /* Allow access to Allocation Tags when MTE is implemented. */ 208 scr_el3 |= SCR_ATA_BIT; 209 210 #if !CTX_INCLUDE_PAUTH_REGS 211 /* 212 * Pointer Authentication feature, if present, is always enabled by default 213 * for Non secure lower exception levels. We do not have an explicit 214 * flag to set it. 215 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower 216 * exception levels of secure and realm worlds. 217 * 218 * To prevent the leakage between the worlds during world switch, 219 * we enable it only for the non-secure world. 220 * 221 * If the Secure/realm world wants to use pointer authentication, 222 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case 223 * it will be enabled globally for all the contexts. 224 * 225 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 226 * other than EL3 227 * 228 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 229 * than EL3 230 */ 231 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 232 233 #endif /* CTX_INCLUDE_PAUTH_REGS */ 234 235 #if HANDLE_EA_EL3_FIRST_NS 236 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ 237 scr_el3 |= SCR_EA_BIT; 238 #endif 239 240 #if RAS_TRAP_NS_ERR_REC_ACCESS 241 /* 242 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 243 * and RAS ERX registers from EL1 and EL2(from any security state) 244 * are trapped to EL3. 245 * Set here to trap only for NS EL1/EL2 246 * 247 */ 248 scr_el3 |= SCR_TERR_BIT; 249 #endif 250 251 if (is_feat_csv2_2_supported()) { 252 /* Enable access to the SCXTNUM_ELx registers. */ 253 scr_el3 |= SCR_EnSCXT_BIT; 254 } 255 256 #ifdef IMAGE_BL31 257 /* 258 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 259 * indicated by the interrupt routing model for BL31. 260 */ 261 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 262 #endif 263 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 264 265 /* Initialize EL1 context registers */ 266 setup_el1_context(ctx, ep); 267 268 /* Initialize EL2 context registers */ 269 #if CTX_INCLUDE_EL2_REGS 270 271 /* 272 * Initialize SCTLR_EL2 context register using Endianness value 273 * taken from the entrypoint attribute. 274 */ 275 u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 276 sctlr_el2 |= SCTLR_EL2_RES1; 277 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2, 278 sctlr_el2); 279 280 if (is_feat_hcx_supported()) { 281 /* 282 * Initialize register HCRX_EL2 with its init value. 283 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a 284 * chance that this can lead to unexpected behavior in lower 285 * ELs that have not been updated since the introduction of 286 * this feature if not properly initialized, especially when 287 * it comes to those bits that enable/disable traps. 288 */ 289 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HCRX_EL2, 290 HCRX_EL2_INIT_VAL); 291 } 292 293 if (is_feat_fgt_supported()) { 294 /* 295 * Initialize HFG*_EL2 registers with a default value so legacy 296 * systems unaware of FEAT_FGT do not get trapped due to their lack 297 * of initialization for this feature. 298 */ 299 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGITR_EL2, 300 HFGITR_EL2_INIT_VAL); 301 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGRTR_EL2, 302 HFGRTR_EL2_INIT_VAL); 303 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HFGWTR_EL2, 304 HFGWTR_EL2_INIT_VAL); 305 } 306 #endif /* CTX_INCLUDE_EL2_REGS */ 307 308 manage_extensions_nonsecure(ctx); 309 } 310 311 /******************************************************************************* 312 * The following function performs initialization of the cpu_context 'ctx' 313 * for first use that is common to all security states, and sets the 314 * initial entrypoint state as specified by the entry_point_info structure. 315 * 316 * The EE and ST attributes are used to configure the endianness and secure 317 * timer availability for the new execution context. 318 ******************************************************************************/ 319 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 320 { 321 u_register_t scr_el3; 322 el3_state_t *state; 323 gp_regs_t *gp_regs; 324 325 state = get_el3state_ctx(ctx); 326 327 /* Clear any residual register values from the context */ 328 zeromem(ctx, sizeof(*ctx)); 329 330 /* 331 * The lower-EL context is zeroed so that no stale values leak to a world. 332 * It is assumed that an all-zero lower-EL context is good enough for it 333 * to boot correctly. However, there are very few registers where this 334 * is not true and some values need to be recreated. 335 */ 336 #if CTX_INCLUDE_EL2_REGS 337 el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx); 338 339 /* 340 * These bits are set in the gicv3 driver. Losing them (especially the 341 * SRE bit) is problematic for all worlds. Henceforth recreate them. 342 */ 343 u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | 344 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; 345 write_ctx_reg(el2_ctx, CTX_ICC_SRE_EL2, icc_sre_el2); 346 #endif /* CTX_INCLUDE_EL2_REGS */ 347 348 /* Start with a clean SCR_EL3 copy as all relevant values are set */ 349 scr_el3 = SCR_RESET_VAL; 350 351 /* 352 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 353 * EL2, EL1 and EL0 are not trapped to EL3. 354 * 355 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 356 * EL2, EL1 and EL0 are not trapped to EL3. 357 * 358 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 359 * both Security states and both Execution states. 360 * 361 * SCR_EL3.SIF: Set to one to disable secure instruction execution from 362 * Non-secure memory. 363 */ 364 scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT); 365 366 scr_el3 |= SCR_SIF_BIT; 367 368 /* 369 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 370 * Exception level as specified by SPSR. 371 */ 372 if (GET_RW(ep->spsr) == MODE_RW_64) { 373 scr_el3 |= SCR_RW_BIT; 374 } 375 376 /* 377 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 378 * Secure timer registers to EL3, from AArch64 state only, if specified 379 * by the entrypoint attributes. If SEL2 is present and enabled, the ST 380 * bit always behaves as 1 (i.e. secure physical timer register access 381 * is not trapped) 382 */ 383 if (EP_GET_ST(ep->h.attr) != 0U) { 384 scr_el3 |= SCR_ST_BIT; 385 } 386 387 /* 388 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 389 * SCR_EL3.HXEn. 390 */ 391 if (is_feat_hcx_supported()) { 392 scr_el3 |= SCR_HXEn_BIT; 393 } 394 395 /* 396 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS 397 * registers are trapped to EL3. 398 */ 399 #if ENABLE_FEAT_RNG_TRAP 400 scr_el3 |= SCR_TRNDR_BIT; 401 #endif 402 403 #if FAULT_INJECTION_SUPPORT 404 /* Enable fault injection from lower ELs */ 405 scr_el3 |= SCR_FIEN_BIT; 406 #endif 407 408 #if CTX_INCLUDE_PAUTH_REGS 409 /* 410 * Enable Pointer Authentication globally for all the worlds. 411 * 412 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 413 * other than EL3 414 * 415 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 416 * than EL3 417 */ 418 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 419 #endif /* CTX_INCLUDE_PAUTH_REGS */ 420 421 /* 422 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present. 423 */ 424 if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) { 425 scr_el3 |= SCR_TCR2EN_BIT; 426 } 427 428 /* 429 * SCR_EL3.PIEN: Enable permission indirection and overlay 430 * registers for AArch64 if present. 431 */ 432 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) { 433 scr_el3 |= SCR_PIEN_BIT; 434 } 435 436 /* 437 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present. 438 */ 439 if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) { 440 scr_el3 |= SCR_GCSEn_BIT; 441 } 442 443 /* 444 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 445 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 446 * next mode is Hyp. 447 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 448 * same conditions as HVC instructions and when the processor supports 449 * ARMv8.6-FGT. 450 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 451 * CNTPOFF_EL2 register under the same conditions as HVC instructions 452 * and when the processor supports ECV. 453 */ 454 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 455 || ((GET_RW(ep->spsr) != MODE_RW_64) 456 && (GET_M32(ep->spsr) == MODE32_hyp))) { 457 scr_el3 |= SCR_HCE_BIT; 458 459 if (is_feat_fgt_supported()) { 460 scr_el3 |= SCR_FGTEN_BIT; 461 } 462 463 if (is_feat_ecv_supported()) { 464 scr_el3 |= SCR_ECVEN_BIT; 465 } 466 } 467 468 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 469 if (is_feat_twed_supported()) { 470 /* Set delay in SCR_EL3 */ 471 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 472 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) 473 << SCR_TWEDEL_SHIFT); 474 475 /* Enable WFE delay */ 476 scr_el3 |= SCR_TWEDEn_BIT; 477 } 478 479 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 480 /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */ 481 if (is_feat_sel2_supported()) { 482 scr_el3 |= SCR_EEL2_BIT; 483 } 484 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */ 485 486 if (is_feat_mpam_supported()) { 487 write_ctx_reg(get_el3state_ctx(ctx), CTX_MPAM3_EL3, \ 488 MPAM3_EL3_RESET_VAL); 489 } 490 491 /* 492 * Populate EL3 state so that we've the right context 493 * before doing ERET 494 */ 495 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 496 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 497 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 498 499 /* 500 * Store the X0-X7 value from the entrypoint into the context 501 * Use memcpy as we are in control of the layout of the structures 502 */ 503 gp_regs = get_gpregs_ctx(ctx); 504 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 505 } 506 507 /******************************************************************************* 508 * Context management library initialization routine. This library is used by 509 * runtime services to share pointers to 'cpu_context' structures for secure 510 * non-secure and realm states. Management of the structures and their associated 511 * memory is not done by the context management library e.g. the PSCI service 512 * manages the cpu context used for entry from and exit to the non-secure state. 513 * The Secure payload dispatcher service manages the context(s) corresponding to 514 * the secure state. It also uses this library to get access to the non-secure 515 * state cpu context pointers. 516 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 517 * which will be used for programming an entry into a lower EL. The same context 518 * will be used to save state upon exception entry from that EL. 519 ******************************************************************************/ 520 void __init cm_init(void) 521 { 522 /* 523 * The context management library has only global data to initialize, but 524 * that will be done when the BSS is zeroed out. 525 */ 526 } 527 528 /******************************************************************************* 529 * This is the high-level function used to initialize the cpu_context 'ctx' for 530 * first use. It performs initializations that are common to all security states 531 * and initializations specific to the security state specified in 'ep' 532 ******************************************************************************/ 533 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 534 { 535 unsigned int security_state; 536 537 assert(ctx != NULL); 538 539 /* 540 * Perform initializations that are common 541 * to all security states 542 */ 543 setup_context_common(ctx, ep); 544 545 security_state = GET_SECURITY_STATE(ep->h.attr); 546 547 /* Perform security state specific initializations */ 548 switch (security_state) { 549 case SECURE: 550 setup_secure_context(ctx, ep); 551 break; 552 #if ENABLE_RME 553 case REALM: 554 setup_realm_context(ctx, ep); 555 break; 556 #endif 557 case NON_SECURE: 558 setup_ns_context(ctx, ep); 559 break; 560 default: 561 ERROR("Invalid security state\n"); 562 panic(); 563 break; 564 } 565 } 566 567 /******************************************************************************* 568 * Enable architecture extensions for EL3 execution. This function only updates 569 * registers in-place which are expected to either never change or be 570 * overwritten by el3_exit. 571 ******************************************************************************/ 572 #if IMAGE_BL31 573 void cm_manage_extensions_el3(void) 574 { 575 if (is_feat_spe_supported()) { 576 spe_init_el3(); 577 } 578 579 if (is_feat_amu_supported()) { 580 amu_init_el3(); 581 } 582 583 if (is_feat_sme_supported()) { 584 sme_init_el3(); 585 } 586 587 if (is_feat_trbe_supported()) { 588 trbe_init_el3(); 589 } 590 591 if (is_feat_brbe_supported()) { 592 brbe_init_el3(); 593 } 594 595 if (is_feat_trf_supported()) { 596 trf_init_el3(); 597 } 598 599 pmuv3_init_el3(); 600 } 601 #endif /* IMAGE_BL31 */ 602 603 /******************************************************************************* 604 * Initialise per_world_context for Non-Secure world. 605 * This function enables the architecture extensions, which have same value 606 * across the cores for the non-secure world. 607 ******************************************************************************/ 608 #if IMAGE_BL31 609 void manage_extensions_nonsecure_per_world(void) 610 { 611 if (is_feat_sme_supported()) { 612 sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 613 } 614 615 if (is_feat_sve_supported()) { 616 sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 617 } 618 619 if (is_feat_amu_supported()) { 620 amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 621 } 622 623 if (is_feat_sys_reg_trace_supported()) { 624 sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 625 } 626 } 627 #endif /* IMAGE_BL31 */ 628 629 /******************************************************************************* 630 * Initialise per_world_context for Secure world. 631 * This function enables the architecture extensions, which have same value 632 * across the cores for the secure world. 633 ******************************************************************************/ 634 635 static void manage_extensions_secure_per_world(void) 636 { 637 #if IMAGE_BL31 638 if (is_feat_sme_supported()) { 639 640 if (ENABLE_SME_FOR_SWD) { 641 /* 642 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure 643 * SME, SVE, and FPU/SIMD context properly managed. 644 */ 645 sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 646 } else { 647 /* 648 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 649 * world can safely use the associated registers. 650 */ 651 sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 652 } 653 } 654 if (is_feat_sve_supported()) { 655 if (ENABLE_SVE_FOR_SWD) { 656 /* 657 * Enable SVE and FPU in secure context, SPM must ensure 658 * that the SVE and FPU register contexts are properly managed. 659 */ 660 sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 661 } else { 662 /* 663 * Disable SVE and FPU in secure context so non-secure world 664 * can safely use them. 665 */ 666 sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 667 } 668 } 669 670 /* NS can access this but Secure shouldn't */ 671 if (is_feat_sys_reg_trace_supported()) { 672 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 673 } 674 675 has_secure_perworld_init = true; 676 #endif /* IMAGE_BL31 */ 677 } 678 679 /******************************************************************************* 680 * Enable architecture extensions on first entry to Non-secure world. 681 ******************************************************************************/ 682 static void manage_extensions_nonsecure(cpu_context_t *ctx) 683 { 684 #if IMAGE_BL31 685 if (is_feat_amu_supported()) { 686 amu_enable(ctx); 687 } 688 689 if (is_feat_sme_supported()) { 690 sme_enable(ctx); 691 } 692 693 if (is_feat_mpam_supported()) { 694 mpam_enable(ctx); 695 } 696 pmuv3_enable(ctx); 697 #endif /* IMAGE_BL31 */ 698 } 699 700 /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */ 701 static __unused void enable_pauth_el2(void) 702 { 703 u_register_t hcr_el2 = read_hcr_el2(); 704 /* 705 * For Armv8.3 pointer authentication feature, disable traps to EL2 when 706 * accessing key registers or using pointer authentication instructions 707 * from lower ELs. 708 */ 709 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); 710 711 write_hcr_el2(hcr_el2); 712 } 713 714 #if INIT_UNUSED_NS_EL2 715 /******************************************************************************* 716 * Enable architecture extensions in-place at EL2 on first entry to Non-secure 717 * world when EL2 is empty and unused. 718 ******************************************************************************/ 719 static void manage_extensions_nonsecure_el2_unused(void) 720 { 721 #if IMAGE_BL31 722 if (is_feat_spe_supported()) { 723 spe_init_el2_unused(); 724 } 725 726 if (is_feat_amu_supported()) { 727 amu_init_el2_unused(); 728 } 729 730 if (is_feat_mpam_supported()) { 731 mpam_init_el2_unused(); 732 } 733 734 if (is_feat_trbe_supported()) { 735 trbe_init_el2_unused(); 736 } 737 738 if (is_feat_sys_reg_trace_supported()) { 739 sys_reg_trace_init_el2_unused(); 740 } 741 742 if (is_feat_trf_supported()) { 743 trf_init_el2_unused(); 744 } 745 746 pmuv3_init_el2_unused(); 747 748 if (is_feat_sve_supported()) { 749 sve_init_el2_unused(); 750 } 751 752 if (is_feat_sme_supported()) { 753 sme_init_el2_unused(); 754 } 755 756 #if ENABLE_PAUTH 757 enable_pauth_el2(); 758 #endif /* ENABLE_PAUTH */ 759 #endif /* IMAGE_BL31 */ 760 } 761 #endif /* INIT_UNUSED_NS_EL2 */ 762 763 /******************************************************************************* 764 * Enable architecture extensions on first entry to Secure world. 765 ******************************************************************************/ 766 static void manage_extensions_secure(cpu_context_t *ctx) 767 { 768 #if IMAGE_BL31 769 if (is_feat_sme_supported()) { 770 if (ENABLE_SME_FOR_SWD) { 771 /* 772 * Enable SME, SVE, FPU/SIMD in secure context, secure manager 773 * must ensure SME, SVE, and FPU/SIMD context properly managed. 774 */ 775 sme_init_el3(); 776 sme_enable(ctx); 777 } else { 778 /* 779 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 780 * world can safely use the associated registers. 781 */ 782 sme_disable(ctx); 783 } 784 } 785 #endif /* IMAGE_BL31 */ 786 } 787 788 /******************************************************************************* 789 * The following function initializes the cpu_context for a CPU specified by 790 * its `cpu_idx` for first use, and sets the initial entrypoint state as 791 * specified by the entry_point_info structure. 792 ******************************************************************************/ 793 void cm_init_context_by_index(unsigned int cpu_idx, 794 const entry_point_info_t *ep) 795 { 796 cpu_context_t *ctx; 797 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 798 cm_setup_context(ctx, ep); 799 } 800 801 /******************************************************************************* 802 * The following function initializes the cpu_context for the current CPU 803 * for first use, and sets the initial entrypoint state as specified by the 804 * entry_point_info structure. 805 ******************************************************************************/ 806 void cm_init_my_context(const entry_point_info_t *ep) 807 { 808 cpu_context_t *ctx; 809 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 810 cm_setup_context(ctx, ep); 811 } 812 813 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */ 814 static void init_nonsecure_el2_unused(cpu_context_t *ctx) 815 { 816 #if INIT_UNUSED_NS_EL2 817 u_register_t hcr_el2 = HCR_RESET_VAL; 818 u_register_t mdcr_el2; 819 u_register_t scr_el3; 820 821 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 822 823 /* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */ 824 if ((scr_el3 & SCR_RW_BIT) != 0U) { 825 hcr_el2 |= HCR_RW_BIT; 826 } 827 828 write_hcr_el2(hcr_el2); 829 830 /* 831 * Initialise CPTR_EL2 setting all fields rather than relying on the hw. 832 * All fields have architecturally UNKNOWN reset values. 833 */ 834 write_cptr_el2(CPTR_EL2_RESET_VAL); 835 836 /* 837 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on 838 * reset and are set to zero except for field(s) listed below. 839 * 840 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of 841 * Non-secure EL0 and EL1 accesses to the physical timer registers. 842 * 843 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of 844 * Non-secure EL0 and EL1 accesses to the physical counter registers. 845 */ 846 write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT); 847 848 /* 849 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally 850 * UNKNOWN value. 851 */ 852 write_cntvoff_el2(0); 853 854 /* 855 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1 856 * respectively. 857 */ 858 write_vpidr_el2(read_midr_el1()); 859 write_vmpidr_el2(read_mpidr_el1()); 860 861 /* 862 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset. 863 * 864 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address 865 * translation is disabled, cache maintenance operations depend on the 866 * VMID. 867 * 868 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is 869 * disabled. 870 */ 871 write_vttbr_el2(VTTBR_RESET_VAL & 872 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) | 873 (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 874 875 /* 876 * Initialise MDCR_EL2, setting all fields rather than relying on hw. 877 * Some fields are architecturally UNKNOWN on reset. 878 * 879 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System 880 * register accesses to the Debug ROM registers are not trapped to EL2. 881 * 882 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register 883 * accesses to the powerdown debug registers are not trapped to EL2. 884 * 885 * MDCR_EL2.TDA: Set to zero so that System register accesses to the 886 * debug registers do not trap to EL2. 887 * 888 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to 889 * EL2. 890 */ 891 mdcr_el2 = MDCR_EL2_RESET_VAL & 892 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT | 893 MDCR_EL2_TDE_BIT); 894 895 write_mdcr_el2(mdcr_el2); 896 897 /* 898 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset. 899 * 900 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or 901 * EL1 accesses to System registers do not trap to EL2. 902 */ 903 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 904 905 /* 906 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on 907 * reset. 908 * 909 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer 910 * and prevent timer interrupts. 911 */ 912 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); 913 914 manage_extensions_nonsecure_el2_unused(); 915 #endif /* INIT_UNUSED_NS_EL2 */ 916 } 917 918 /******************************************************************************* 919 * Prepare the CPU system registers for first entry into realm, secure, or 920 * normal world. 921 * 922 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 923 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 924 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 925 * For all entries, the EL1 registers are initialized from the cpu_context 926 ******************************************************************************/ 927 void cm_prepare_el3_exit(uint32_t security_state) 928 { 929 u_register_t sctlr_elx, scr_el3; 930 cpu_context_t *ctx = cm_get_context(security_state); 931 932 assert(ctx != NULL); 933 934 if (security_state == NON_SECURE) { 935 uint64_t el2_implemented = el_implemented(2); 936 937 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 938 CTX_SCR_EL3); 939 940 if (((scr_el3 & SCR_HCE_BIT) != 0U) 941 || (el2_implemented != EL_IMPL_NONE)) { 942 /* 943 * If context is not being used for EL2, initialize 944 * HCRX_EL2 with its init value here. 945 */ 946 if (is_feat_hcx_supported()) { 947 write_hcrx_el2(HCRX_EL2_INIT_VAL); 948 } 949 950 /* 951 * Initialize Fine-grained trap registers introduced 952 * by FEAT_FGT so all traps are initially disabled when 953 * switching to EL2 or a lower EL, preventing undesired 954 * behavior. 955 */ 956 if (is_feat_fgt_supported()) { 957 /* 958 * Initialize HFG*_EL2 registers with a default 959 * value so legacy systems unaware of FEAT_FGT 960 * do not get trapped due to their lack of 961 * initialization for this feature. 962 */ 963 write_hfgitr_el2(HFGITR_EL2_INIT_VAL); 964 write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL); 965 write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL); 966 } 967 } 968 969 970 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 971 /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ 972 sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), 973 CTX_SCTLR_EL1); 974 sctlr_elx &= SCTLR_EE_BIT; 975 sctlr_elx |= SCTLR_EL2_RES1; 976 #if ERRATA_A75_764081 977 /* 978 * If workaround of errata 764081 for Cortex-A75 is used 979 * then set SCTLR_EL2.IESB to enable Implicit Error 980 * Synchronization Barrier. 981 */ 982 sctlr_elx |= SCTLR_IESB_BIT; 983 #endif 984 write_sctlr_el2(sctlr_elx); 985 } else if (el2_implemented != EL_IMPL_NONE) { 986 init_nonsecure_el2_unused(ctx); 987 } 988 } 989 990 cm_el1_sysregs_context_restore(security_state); 991 cm_set_next_eret_context(security_state); 992 } 993 994 #if CTX_INCLUDE_EL2_REGS 995 996 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx) 997 { 998 write_ctx_reg(ctx, CTX_HDFGRTR_EL2, read_hdfgrtr_el2()); 999 if (is_feat_amu_supported()) { 1000 write_ctx_reg(ctx, CTX_HAFGRTR_EL2, read_hafgrtr_el2()); 1001 } 1002 write_ctx_reg(ctx, CTX_HDFGWTR_EL2, read_hdfgwtr_el2()); 1003 write_ctx_reg(ctx, CTX_HFGITR_EL2, read_hfgitr_el2()); 1004 write_ctx_reg(ctx, CTX_HFGRTR_EL2, read_hfgrtr_el2()); 1005 write_ctx_reg(ctx, CTX_HFGWTR_EL2, read_hfgwtr_el2()); 1006 } 1007 1008 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx) 1009 { 1010 write_hdfgrtr_el2(read_ctx_reg(ctx, CTX_HDFGRTR_EL2)); 1011 if (is_feat_amu_supported()) { 1012 write_hafgrtr_el2(read_ctx_reg(ctx, CTX_HAFGRTR_EL2)); 1013 } 1014 write_hdfgwtr_el2(read_ctx_reg(ctx, CTX_HDFGWTR_EL2)); 1015 write_hfgitr_el2(read_ctx_reg(ctx, CTX_HFGITR_EL2)); 1016 write_hfgrtr_el2(read_ctx_reg(ctx, CTX_HFGRTR_EL2)); 1017 write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2)); 1018 } 1019 1020 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx) 1021 { 1022 u_register_t mpam_idr = read_mpamidr_el1(); 1023 1024 write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2()); 1025 1026 /* 1027 * The context registers that we intend to save would be part of the 1028 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1. 1029 */ 1030 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1031 return; 1032 } 1033 1034 /* 1035 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if 1036 * MPAMIDR_HAS_HCR_BIT == 1. 1037 */ 1038 write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2()); 1039 write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2()); 1040 write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2()); 1041 1042 /* 1043 * The number of MPAMVPM registers is implementation defined, their 1044 * number is stored in the MPAMIDR_EL1 register. 1045 */ 1046 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1047 case 7: 1048 write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2()); 1049 __fallthrough; 1050 case 6: 1051 write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2()); 1052 __fallthrough; 1053 case 5: 1054 write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2()); 1055 __fallthrough; 1056 case 4: 1057 write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2()); 1058 __fallthrough; 1059 case 3: 1060 write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2()); 1061 __fallthrough; 1062 case 2: 1063 write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2()); 1064 __fallthrough; 1065 case 1: 1066 write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2()); 1067 break; 1068 } 1069 } 1070 1071 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx) 1072 { 1073 u_register_t mpam_idr = read_mpamidr_el1(); 1074 1075 write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2)); 1076 1077 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1078 return; 1079 } 1080 1081 write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2)); 1082 write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2)); 1083 write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2)); 1084 1085 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1086 case 7: 1087 write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2)); 1088 __fallthrough; 1089 case 6: 1090 write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2)); 1091 __fallthrough; 1092 case 5: 1093 write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2)); 1094 __fallthrough; 1095 case 4: 1096 write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2)); 1097 __fallthrough; 1098 case 3: 1099 write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2)); 1100 __fallthrough; 1101 case 2: 1102 write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2)); 1103 __fallthrough; 1104 case 1: 1105 write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2)); 1106 break; 1107 } 1108 } 1109 1110 /* ----------------------------------------------------- 1111 * The following registers are not added: 1112 * AMEVCNTVOFF0<n>_EL2 1113 * AMEVCNTVOFF1<n>_EL2 1114 * ICH_AP0R<n>_EL2 1115 * ICH_AP1R<n>_EL2 1116 * ICH_LR<n>_EL2 1117 * ----------------------------------------------------- 1118 */ 1119 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx) 1120 { 1121 write_ctx_reg(ctx, CTX_ACTLR_EL2, read_actlr_el2()); 1122 write_ctx_reg(ctx, CTX_AFSR0_EL2, read_afsr0_el2()); 1123 write_ctx_reg(ctx, CTX_AFSR1_EL2, read_afsr1_el2()); 1124 write_ctx_reg(ctx, CTX_AMAIR_EL2, read_amair_el2()); 1125 write_ctx_reg(ctx, CTX_CNTHCTL_EL2, read_cnthctl_el2()); 1126 write_ctx_reg(ctx, CTX_CNTVOFF_EL2, read_cntvoff_el2()); 1127 write_ctx_reg(ctx, CTX_CPTR_EL2, read_cptr_el2()); 1128 if (CTX_INCLUDE_AARCH32_REGS) { 1129 write_ctx_reg(ctx, CTX_DBGVCR32_EL2, read_dbgvcr32_el2()); 1130 } 1131 write_ctx_reg(ctx, CTX_ELR_EL2, read_elr_el2()); 1132 write_ctx_reg(ctx, CTX_ESR_EL2, read_esr_el2()); 1133 write_ctx_reg(ctx, CTX_FAR_EL2, read_far_el2()); 1134 write_ctx_reg(ctx, CTX_HACR_EL2, read_hacr_el2()); 1135 write_ctx_reg(ctx, CTX_HCR_EL2, read_hcr_el2()); 1136 write_ctx_reg(ctx, CTX_HPFAR_EL2, read_hpfar_el2()); 1137 write_ctx_reg(ctx, CTX_HSTR_EL2, read_hstr_el2()); 1138 1139 /* 1140 * Set the NS bit to be able to access the ICC_SRE_EL2 register 1141 * TODO: remove with root context 1142 */ 1143 u_register_t scr_el3 = read_scr_el3(); 1144 1145 write_scr_el3(scr_el3 | SCR_NS_BIT); 1146 isb(); 1147 write_ctx_reg(ctx, CTX_ICC_SRE_EL2, read_icc_sre_el2()); 1148 1149 write_scr_el3(scr_el3); 1150 isb(); 1151 1152 write_ctx_reg(ctx, CTX_ICH_HCR_EL2, read_ich_hcr_el2()); 1153 write_ctx_reg(ctx, CTX_ICH_VMCR_EL2, read_ich_vmcr_el2()); 1154 write_ctx_reg(ctx, CTX_MAIR_EL2, read_mair_el2()); 1155 write_ctx_reg(ctx, CTX_MDCR_EL2, read_mdcr_el2()); 1156 write_ctx_reg(ctx, CTX_SCTLR_EL2, read_sctlr_el2()); 1157 write_ctx_reg(ctx, CTX_SPSR_EL2, read_spsr_el2()); 1158 write_ctx_reg(ctx, CTX_SP_EL2, read_sp_el2()); 1159 write_ctx_reg(ctx, CTX_TCR_EL2, read_tcr_el2()); 1160 write_ctx_reg(ctx, CTX_TPIDR_EL2, read_tpidr_el2()); 1161 write_ctx_reg(ctx, CTX_TTBR0_EL2, read_ttbr0_el2()); 1162 write_ctx_reg(ctx, CTX_VBAR_EL2, read_vbar_el2()); 1163 write_ctx_reg(ctx, CTX_VMPIDR_EL2, read_vmpidr_el2()); 1164 write_ctx_reg(ctx, CTX_VPIDR_EL2, read_vpidr_el2()); 1165 write_ctx_reg(ctx, CTX_VTCR_EL2, read_vtcr_el2()); 1166 write_ctx_reg(ctx, CTX_VTTBR_EL2, read_vttbr_el2()); 1167 } 1168 1169 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx) 1170 { 1171 write_actlr_el2(read_ctx_reg(ctx, CTX_ACTLR_EL2)); 1172 write_afsr0_el2(read_ctx_reg(ctx, CTX_AFSR0_EL2)); 1173 write_afsr1_el2(read_ctx_reg(ctx, CTX_AFSR1_EL2)); 1174 write_amair_el2(read_ctx_reg(ctx, CTX_AMAIR_EL2)); 1175 write_cnthctl_el2(read_ctx_reg(ctx, CTX_CNTHCTL_EL2)); 1176 write_cntvoff_el2(read_ctx_reg(ctx, CTX_CNTVOFF_EL2)); 1177 write_cptr_el2(read_ctx_reg(ctx, CTX_CPTR_EL2)); 1178 if (CTX_INCLUDE_AARCH32_REGS) { 1179 write_dbgvcr32_el2(read_ctx_reg(ctx, CTX_DBGVCR32_EL2)); 1180 } 1181 write_elr_el2(read_ctx_reg(ctx, CTX_ELR_EL2)); 1182 write_esr_el2(read_ctx_reg(ctx, CTX_ESR_EL2)); 1183 write_far_el2(read_ctx_reg(ctx, CTX_FAR_EL2)); 1184 write_hacr_el2(read_ctx_reg(ctx, CTX_HACR_EL2)); 1185 write_hcr_el2(read_ctx_reg(ctx, CTX_HCR_EL2)); 1186 write_hpfar_el2(read_ctx_reg(ctx, CTX_HPFAR_EL2)); 1187 write_hstr_el2(read_ctx_reg(ctx, CTX_HSTR_EL2)); 1188 1189 /* 1190 * Set the NS bit to be able to access the ICC_SRE_EL2 register 1191 * TODO: remove with root context 1192 */ 1193 u_register_t scr_el3 = read_scr_el3(); 1194 1195 write_scr_el3(scr_el3 | SCR_NS_BIT); 1196 isb(); 1197 write_icc_sre_el2(read_ctx_reg(ctx, CTX_ICC_SRE_EL2)); 1198 1199 write_scr_el3(scr_el3); 1200 isb(); 1201 1202 write_ich_hcr_el2(read_ctx_reg(ctx, CTX_ICH_HCR_EL2)); 1203 write_ich_vmcr_el2(read_ctx_reg(ctx, CTX_ICH_VMCR_EL2)); 1204 write_mair_el2(read_ctx_reg(ctx, CTX_MAIR_EL2)); 1205 write_mdcr_el2(read_ctx_reg(ctx, CTX_MDCR_EL2)); 1206 write_sctlr_el2(read_ctx_reg(ctx, CTX_SCTLR_EL2)); 1207 write_spsr_el2(read_ctx_reg(ctx, CTX_SPSR_EL2)); 1208 write_sp_el2(read_ctx_reg(ctx, CTX_SP_EL2)); 1209 write_tcr_el2(read_ctx_reg(ctx, CTX_TCR_EL2)); 1210 write_tpidr_el2(read_ctx_reg(ctx, CTX_TPIDR_EL2)); 1211 write_ttbr0_el2(read_ctx_reg(ctx, CTX_TTBR0_EL2)); 1212 write_vbar_el2(read_ctx_reg(ctx, CTX_VBAR_EL2)); 1213 write_vmpidr_el2(read_ctx_reg(ctx, CTX_VMPIDR_EL2)); 1214 write_vpidr_el2(read_ctx_reg(ctx, CTX_VPIDR_EL2)); 1215 write_vtcr_el2(read_ctx_reg(ctx, CTX_VTCR_EL2)); 1216 write_vttbr_el2(read_ctx_reg(ctx, CTX_VTTBR_EL2)); 1217 } 1218 1219 /******************************************************************************* 1220 * Save EL2 sysreg context 1221 ******************************************************************************/ 1222 void cm_el2_sysregs_context_save(uint32_t security_state) 1223 { 1224 cpu_context_t *ctx; 1225 el2_sysregs_t *el2_sysregs_ctx; 1226 1227 ctx = cm_get_context(security_state); 1228 assert(ctx != NULL); 1229 1230 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1231 1232 el2_sysregs_context_save_common(el2_sysregs_ctx); 1233 #if CTX_INCLUDE_MTE_REGS 1234 write_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2, read_tfsr_el2()); 1235 #endif 1236 if (is_feat_mpam_supported()) { 1237 el2_sysregs_context_save_mpam(el2_sysregs_ctx); 1238 } 1239 1240 if (is_feat_fgt_supported()) { 1241 el2_sysregs_context_save_fgt(el2_sysregs_ctx); 1242 } 1243 1244 if (is_feat_ecv_v2_supported()) { 1245 write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2, read_cntpoff_el2()); 1246 } 1247 1248 if (is_feat_vhe_supported()) { 1249 write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2, read_contextidr_el2()); 1250 write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2, read_ttbr1_el2()); 1251 } 1252 1253 if (is_feat_ras_supported()) { 1254 write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2, read_vdisr_el2()); 1255 write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2, read_vsesr_el2()); 1256 } 1257 1258 if (is_feat_nv2_supported()) { 1259 write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2, read_vncr_el2()); 1260 } 1261 1262 if (is_feat_trf_supported()) { 1263 write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2()); 1264 } 1265 1266 if (is_feat_csv2_2_supported()) { 1267 write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2, read_scxtnum_el2()); 1268 } 1269 1270 if (is_feat_hcx_supported()) { 1271 write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2()); 1272 } 1273 if (is_feat_tcr2_supported()) { 1274 write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2()); 1275 } 1276 if (is_feat_sxpie_supported()) { 1277 write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2()); 1278 write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2()); 1279 } 1280 if (is_feat_s2pie_supported()) { 1281 write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2()); 1282 } 1283 if (is_feat_sxpoe_supported()) { 1284 write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2()); 1285 } 1286 if (is_feat_gcs_supported()) { 1287 write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2()); 1288 write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2()); 1289 } 1290 } 1291 1292 /******************************************************************************* 1293 * Restore EL2 sysreg context 1294 ******************************************************************************/ 1295 void cm_el2_sysregs_context_restore(uint32_t security_state) 1296 { 1297 cpu_context_t *ctx; 1298 el2_sysregs_t *el2_sysregs_ctx; 1299 1300 ctx = cm_get_context(security_state); 1301 assert(ctx != NULL); 1302 1303 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1304 1305 el2_sysregs_context_restore_common(el2_sysregs_ctx); 1306 #if CTX_INCLUDE_MTE_REGS 1307 write_tfsr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TFSR_EL2)); 1308 #endif 1309 if (is_feat_mpam_supported()) { 1310 el2_sysregs_context_restore_mpam(el2_sysregs_ctx); 1311 } 1312 1313 if (is_feat_fgt_supported()) { 1314 el2_sysregs_context_restore_fgt(el2_sysregs_ctx); 1315 } 1316 1317 if (is_feat_ecv_v2_supported()) { 1318 write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2)); 1319 } 1320 1321 if (is_feat_vhe_supported()) { 1322 write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2)); 1323 write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2)); 1324 } 1325 1326 if (is_feat_ras_supported()) { 1327 write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2)); 1328 write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2)); 1329 } 1330 1331 if (is_feat_nv2_supported()) { 1332 write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2)); 1333 } 1334 if (is_feat_trf_supported()) { 1335 write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2)); 1336 } 1337 1338 if (is_feat_csv2_2_supported()) { 1339 write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2)); 1340 } 1341 1342 if (is_feat_hcx_supported()) { 1343 write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2)); 1344 } 1345 if (is_feat_tcr2_supported()) { 1346 write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2)); 1347 } 1348 if (is_feat_sxpie_supported()) { 1349 write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2)); 1350 write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2)); 1351 } 1352 if (is_feat_s2pie_supported()) { 1353 write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2)); 1354 } 1355 if (is_feat_sxpoe_supported()) { 1356 write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2)); 1357 } 1358 if (is_feat_gcs_supported()) { 1359 write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2)); 1360 write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2)); 1361 } 1362 } 1363 #endif /* CTX_INCLUDE_EL2_REGS */ 1364 1365 /******************************************************************************* 1366 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS 1367 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly 1368 * updating EL1 and EL2 registers. Otherwise, it calls the generic 1369 * cm_prepare_el3_exit function. 1370 ******************************************************************************/ 1371 void cm_prepare_el3_exit_ns(void) 1372 { 1373 #if CTX_INCLUDE_EL2_REGS 1374 #if ENABLE_ASSERTIONS 1375 cpu_context_t *ctx = cm_get_context(NON_SECURE); 1376 assert(ctx != NULL); 1377 1378 /* Assert that EL2 is used. */ 1379 u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1380 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && 1381 (el_implemented(2U) != EL_IMPL_NONE)); 1382 #endif /* ENABLE_ASSERTIONS */ 1383 1384 /* Restore EL2 and EL1 sysreg contexts */ 1385 cm_el2_sysregs_context_restore(NON_SECURE); 1386 cm_el1_sysregs_context_restore(NON_SECURE); 1387 cm_set_next_eret_context(NON_SECURE); 1388 #else 1389 cm_prepare_el3_exit(NON_SECURE); 1390 #endif /* CTX_INCLUDE_EL2_REGS */ 1391 } 1392 1393 /******************************************************************************* 1394 * The next four functions are used by runtime services to save and restore 1395 * EL1 context on the 'cpu_context' structure for the specified security 1396 * state. 1397 ******************************************************************************/ 1398 void cm_el1_sysregs_context_save(uint32_t security_state) 1399 { 1400 cpu_context_t *ctx; 1401 1402 ctx = cm_get_context(security_state); 1403 assert(ctx != NULL); 1404 1405 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 1406 1407 #if IMAGE_BL31 1408 if (security_state == SECURE) 1409 PUBLISH_EVENT(cm_exited_secure_world); 1410 else 1411 PUBLISH_EVENT(cm_exited_normal_world); 1412 #endif 1413 } 1414 1415 void cm_el1_sysregs_context_restore(uint32_t security_state) 1416 { 1417 cpu_context_t *ctx; 1418 1419 ctx = cm_get_context(security_state); 1420 assert(ctx != NULL); 1421 1422 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 1423 1424 #if IMAGE_BL31 1425 if (security_state == SECURE) 1426 PUBLISH_EVENT(cm_entering_secure_world); 1427 else 1428 PUBLISH_EVENT(cm_entering_normal_world); 1429 #endif 1430 } 1431 1432 /******************************************************************************* 1433 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 1434 * given security state with the given entrypoint 1435 ******************************************************************************/ 1436 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 1437 { 1438 cpu_context_t *ctx; 1439 el3_state_t *state; 1440 1441 ctx = cm_get_context(security_state); 1442 assert(ctx != NULL); 1443 1444 /* Populate EL3 state so that ERET jumps to the correct entry */ 1445 state = get_el3state_ctx(ctx); 1446 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1447 } 1448 1449 /******************************************************************************* 1450 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 1451 * pertaining to the given security state 1452 ******************************************************************************/ 1453 void cm_set_elr_spsr_el3(uint32_t security_state, 1454 uintptr_t entrypoint, uint32_t spsr) 1455 { 1456 cpu_context_t *ctx; 1457 el3_state_t *state; 1458 1459 ctx = cm_get_context(security_state); 1460 assert(ctx != NULL); 1461 1462 /* Populate EL3 state so that ERET jumps to the correct entry */ 1463 state = get_el3state_ctx(ctx); 1464 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1465 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 1466 } 1467 1468 /******************************************************************************* 1469 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 1470 * pertaining to the given security state using the value and bit position 1471 * specified in the parameters. It preserves all other bits. 1472 ******************************************************************************/ 1473 void cm_write_scr_el3_bit(uint32_t security_state, 1474 uint32_t bit_pos, 1475 uint32_t value) 1476 { 1477 cpu_context_t *ctx; 1478 el3_state_t *state; 1479 u_register_t scr_el3; 1480 1481 ctx = cm_get_context(security_state); 1482 assert(ctx != NULL); 1483 1484 /* Ensure that the bit position is a valid one */ 1485 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 1486 1487 /* Ensure that the 'value' is only a bit wide */ 1488 assert(value <= 1U); 1489 1490 /* 1491 * Get the SCR_EL3 value from the cpu context, clear the desired bit 1492 * and set it to its new value. 1493 */ 1494 state = get_el3state_ctx(ctx); 1495 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 1496 scr_el3 &= ~(1UL << bit_pos); 1497 scr_el3 |= (u_register_t)value << bit_pos; 1498 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 1499 } 1500 1501 /******************************************************************************* 1502 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 1503 * given security state. 1504 ******************************************************************************/ 1505 u_register_t cm_get_scr_el3(uint32_t security_state) 1506 { 1507 cpu_context_t *ctx; 1508 el3_state_t *state; 1509 1510 ctx = cm_get_context(security_state); 1511 assert(ctx != NULL); 1512 1513 /* Populate EL3 state so that ERET jumps to the correct entry */ 1514 state = get_el3state_ctx(ctx); 1515 return read_ctx_reg(state, CTX_SCR_EL3); 1516 } 1517 1518 /******************************************************************************* 1519 * This function is used to program the context that's used for exception 1520 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 1521 * the required security state 1522 ******************************************************************************/ 1523 void cm_set_next_eret_context(uint32_t security_state) 1524 { 1525 cpu_context_t *ctx; 1526 1527 ctx = cm_get_context(security_state); 1528 assert(ctx != NULL); 1529 1530 cm_set_next_context(ctx); 1531 } 1532