1 /* 2 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <platform_def.h> 13 14 #include <arch.h> 15 #include <arch_helpers.h> 16 #include <arch_features.h> 17 #include <bl31/interrupt_mgmt.h> 18 #include <common/bl_common.h> 19 #include <common/debug.h> 20 #include <context.h> 21 #include <drivers/arm/gicv3.h> 22 #include <lib/el3_runtime/context_mgmt.h> 23 #include <lib/el3_runtime/cpu_data.h> 24 #include <lib/el3_runtime/pubsub_events.h> 25 #include <lib/extensions/amu.h> 26 #include <lib/extensions/brbe.h> 27 #include <lib/extensions/debug_v8p9.h> 28 #include <lib/extensions/mpam.h> 29 #include <lib/extensions/pmuv3.h> 30 #include <lib/extensions/sme.h> 31 #include <lib/extensions/spe.h> 32 #include <lib/extensions/sve.h> 33 #include <lib/extensions/sys_reg_trace.h> 34 #include <lib/extensions/trbe.h> 35 #include <lib/extensions/trf.h> 36 #include <lib/utils.h> 37 38 #if ENABLE_FEAT_TWED 39 /* Make sure delay value fits within the range(0-15) */ 40 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); 41 #endif /* ENABLE_FEAT_TWED */ 42 43 per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM]; 44 static bool has_secure_perworld_init; 45 46 static void manage_extensions_common(cpu_context_t *ctx); 47 static void manage_extensions_nonsecure(cpu_context_t *ctx); 48 static void manage_extensions_secure(cpu_context_t *ctx); 49 static void manage_extensions_secure_per_world(void); 50 51 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) 52 { 53 u_register_t sctlr_elx, actlr_elx; 54 55 /* 56 * Initialise SCTLR_EL1 to the reset value corresponding to the target 57 * execution state setting all fields rather than relying on the hw. 58 * Some fields have architecturally UNKNOWN reset values and these are 59 * set to zero. 60 * 61 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 62 * 63 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 64 * required by PSCI specification) 65 */ 66 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 67 if (GET_RW(ep->spsr) == MODE_RW_64) { 68 sctlr_elx |= SCTLR_EL1_RES1; 69 } else { 70 /* 71 * If the target execution state is AArch32 then the following 72 * fields need to be set. 73 * 74 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 75 * instructions are not trapped to EL1. 76 * 77 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 78 * instructions are not trapped to EL1. 79 * 80 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 81 * CP15DMB, CP15DSB, and CP15ISB instructions. 82 */ 83 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 84 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 85 } 86 87 #if ERRATA_A75_764081 88 /* 89 * If workaround of errata 764081 for Cortex-A75 is used then set 90 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 91 */ 92 sctlr_elx |= SCTLR_IESB_BIT; 93 #endif 94 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 95 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); 96 97 /* 98 * Base the context ACTLR_EL1 on the current value, as it is 99 * implementation defined. The context restore process will write 100 * the value from the context to the actual register and can cause 101 * problems for processor cores that don't expect certain bits to 102 * be zero. 103 */ 104 actlr_elx = read_actlr_el1(); 105 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); 106 } 107 108 /****************************************************************************** 109 * This function performs initializations that are specific to SECURE state 110 * and updates the cpu context specified by 'ctx'. 111 *****************************************************************************/ 112 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 113 { 114 u_register_t scr_el3; 115 el3_state_t *state; 116 117 state = get_el3state_ctx(ctx); 118 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 119 120 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 121 /* 122 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 123 * indicated by the interrupt routing model for BL31. 124 */ 125 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 126 #endif 127 128 /* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */ 129 if (is_feat_mte2_supported()) { 130 scr_el3 |= SCR_ATA_BIT; 131 } 132 133 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 134 135 /* 136 * Initialize EL1 context registers unless SPMC is running 137 * at S-EL2. 138 */ 139 #if !SPMD_SPM_AT_SEL2 140 setup_el1_context(ctx, ep); 141 #endif 142 143 manage_extensions_secure(ctx); 144 145 /** 146 * manage_extensions_secure_per_world api has to be executed once, 147 * as the registers getting initialised, maintain constant value across 148 * all the cpus for the secure world. 149 * Henceforth, this check ensures that the registers are initialised once 150 * and avoids re-initialization from multiple cores. 151 */ 152 if (!has_secure_perworld_init) { 153 manage_extensions_secure_per_world(); 154 } 155 156 } 157 158 #if ENABLE_RME 159 /****************************************************************************** 160 * This function performs initializations that are specific to REALM state 161 * and updates the cpu context specified by 'ctx'. 162 *****************************************************************************/ 163 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 164 { 165 u_register_t scr_el3; 166 el3_state_t *state; 167 168 state = get_el3state_ctx(ctx); 169 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 170 171 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT; 172 173 /* CSV2 version 2 and above */ 174 if (is_feat_csv2_2_supported()) { 175 /* Enable access to the SCXTNUM_ELx registers. */ 176 scr_el3 |= SCR_EnSCXT_BIT; 177 } 178 179 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 180 } 181 #endif /* ENABLE_RME */ 182 183 /****************************************************************************** 184 * This function performs initializations that are specific to NON-SECURE state 185 * and updates the cpu context specified by 'ctx'. 186 *****************************************************************************/ 187 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 188 { 189 u_register_t scr_el3; 190 el3_state_t *state; 191 192 state = get_el3state_ctx(ctx); 193 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 194 195 /* SCR_NS: Set the NS bit */ 196 scr_el3 |= SCR_NS_BIT; 197 198 /* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */ 199 if (is_feat_mte2_supported()) { 200 scr_el3 |= SCR_ATA_BIT; 201 } 202 203 #if !CTX_INCLUDE_PAUTH_REGS 204 /* 205 * Pointer Authentication feature, if present, is always enabled by default 206 * for Non secure lower exception levels. We do not have an explicit 207 * flag to set it. 208 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower 209 * exception levels of secure and realm worlds. 210 * 211 * To prevent the leakage between the worlds during world switch, 212 * we enable it only for the non-secure world. 213 * 214 * If the Secure/realm world wants to use pointer authentication, 215 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case 216 * it will be enabled globally for all the contexts. 217 * 218 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 219 * other than EL3 220 * 221 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 222 * than EL3 223 */ 224 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 225 226 #endif /* CTX_INCLUDE_PAUTH_REGS */ 227 228 #if HANDLE_EA_EL3_FIRST_NS 229 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ 230 scr_el3 |= SCR_EA_BIT; 231 #endif 232 233 #if RAS_TRAP_NS_ERR_REC_ACCESS 234 /* 235 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 236 * and RAS ERX registers from EL1 and EL2(from any security state) 237 * are trapped to EL3. 238 * Set here to trap only for NS EL1/EL2 239 * 240 */ 241 scr_el3 |= SCR_TERR_BIT; 242 #endif 243 244 /* CSV2 version 2 and above */ 245 if (is_feat_csv2_2_supported()) { 246 /* Enable access to the SCXTNUM_ELx registers. */ 247 scr_el3 |= SCR_EnSCXT_BIT; 248 } 249 250 #ifdef IMAGE_BL31 251 /* 252 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 253 * indicated by the interrupt routing model for BL31. 254 */ 255 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 256 #endif 257 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 258 259 /* Initialize EL1 context registers */ 260 setup_el1_context(ctx, ep); 261 262 /* Initialize EL2 context registers */ 263 #if CTX_INCLUDE_EL2_REGS 264 265 /* 266 * Initialize SCTLR_EL2 context register with reset value. 267 */ 268 write_el2_ctx_common(get_el2_sysregs_ctx(ctx), sctlr_el2, SCTLR_EL2_RES1); 269 270 if (is_feat_hcx_supported()) { 271 /* 272 * Initialize register HCRX_EL2 with its init value. 273 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a 274 * chance that this can lead to unexpected behavior in lower 275 * ELs that have not been updated since the introduction of 276 * this feature if not properly initialized, especially when 277 * it comes to those bits that enable/disable traps. 278 */ 279 write_el2_ctx_hcx(get_el2_sysregs_ctx(ctx), hcrx_el2, 280 HCRX_EL2_INIT_VAL); 281 } 282 283 if (is_feat_fgt_supported()) { 284 /* 285 * Initialize HFG*_EL2 registers with a default value so legacy 286 * systems unaware of FEAT_FGT do not get trapped due to their lack 287 * of initialization for this feature. 288 */ 289 write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgitr_el2, 290 HFGITR_EL2_INIT_VAL); 291 write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgrtr_el2, 292 HFGRTR_EL2_INIT_VAL); 293 write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgwtr_el2, 294 HFGWTR_EL2_INIT_VAL); 295 } 296 297 #endif /* CTX_INCLUDE_EL2_REGS */ 298 299 manage_extensions_nonsecure(ctx); 300 } 301 302 /******************************************************************************* 303 * The following function performs initialization of the cpu_context 'ctx' 304 * for first use that is common to all security states, and sets the 305 * initial entrypoint state as specified by the entry_point_info structure. 306 * 307 * The EE and ST attributes are used to configure the endianness and secure 308 * timer availability for the new execution context. 309 ******************************************************************************/ 310 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 311 { 312 u_register_t scr_el3; 313 u_register_t mdcr_el3; 314 el3_state_t *state; 315 gp_regs_t *gp_regs; 316 317 state = get_el3state_ctx(ctx); 318 319 /* Clear any residual register values from the context */ 320 zeromem(ctx, sizeof(*ctx)); 321 322 /* 323 * The lower-EL context is zeroed so that no stale values leak to a world. 324 * It is assumed that an all-zero lower-EL context is good enough for it 325 * to boot correctly. However, there are very few registers where this 326 * is not true and some values need to be recreated. 327 */ 328 #if CTX_INCLUDE_EL2_REGS 329 el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx); 330 331 /* 332 * These bits are set in the gicv3 driver. Losing them (especially the 333 * SRE bit) is problematic for all worlds. Henceforth recreate them. 334 */ 335 u_register_t icc_sre_el2_val = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | 336 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; 337 write_el2_ctx_common(el2_ctx, icc_sre_el2, icc_sre_el2_val); 338 339 /* 340 * The actlr_el2 register can be initialized in platform's reset handler 341 * and it may contain access control bits (e.g. CLUSTERPMUEN bit). 342 */ 343 write_el2_ctx_common(el2_ctx, actlr_el2, read_actlr_el2()); 344 #endif /* CTX_INCLUDE_EL2_REGS */ 345 346 /* Start with a clean SCR_EL3 copy as all relevant values are set */ 347 scr_el3 = SCR_RESET_VAL; 348 349 /* 350 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 351 * EL2, EL1 and EL0 are not trapped to EL3. 352 * 353 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 354 * EL2, EL1 and EL0 are not trapped to EL3. 355 * 356 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 357 * both Security states and both Execution states. 358 * 359 * SCR_EL3.SIF: Set to one to disable secure instruction execution from 360 * Non-secure memory. 361 */ 362 scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT); 363 364 scr_el3 |= SCR_SIF_BIT; 365 366 /* 367 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 368 * Exception level as specified by SPSR. 369 */ 370 if (GET_RW(ep->spsr) == MODE_RW_64) { 371 scr_el3 |= SCR_RW_BIT; 372 } 373 374 /* 375 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 376 * Secure timer registers to EL3, from AArch64 state only, if specified 377 * by the entrypoint attributes. If SEL2 is present and enabled, the ST 378 * bit always behaves as 1 (i.e. secure physical timer register access 379 * is not trapped) 380 */ 381 if (EP_GET_ST(ep->h.attr) != 0U) { 382 scr_el3 |= SCR_ST_BIT; 383 } 384 385 /* 386 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 387 * SCR_EL3.HXEn. 388 */ 389 if (is_feat_hcx_supported()) { 390 scr_el3 |= SCR_HXEn_BIT; 391 } 392 393 /* 394 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS 395 * registers are trapped to EL3. 396 */ 397 #if ENABLE_FEAT_RNG_TRAP 398 scr_el3 |= SCR_TRNDR_BIT; 399 #endif 400 401 #if FAULT_INJECTION_SUPPORT 402 /* Enable fault injection from lower ELs */ 403 scr_el3 |= SCR_FIEN_BIT; 404 #endif 405 406 #if CTX_INCLUDE_PAUTH_REGS 407 /* 408 * Enable Pointer Authentication globally for all the worlds. 409 * 410 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 411 * other than EL3 412 * 413 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 414 * than EL3 415 */ 416 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 417 #endif /* CTX_INCLUDE_PAUTH_REGS */ 418 419 /* 420 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present. 421 */ 422 if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) { 423 scr_el3 |= SCR_TCR2EN_BIT; 424 } 425 426 /* 427 * SCR_EL3.PIEN: Enable permission indirection and overlay 428 * registers for AArch64 if present. 429 */ 430 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) { 431 scr_el3 |= SCR_PIEN_BIT; 432 } 433 434 /* 435 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present. 436 */ 437 if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) { 438 scr_el3 |= SCR_GCSEn_BIT; 439 } 440 441 /* 442 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 443 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 444 * next mode is Hyp. 445 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 446 * same conditions as HVC instructions and when the processor supports 447 * ARMv8.6-FGT. 448 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 449 * CNTPOFF_EL2 register under the same conditions as HVC instructions 450 * and when the processor supports ECV. 451 */ 452 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 453 || ((GET_RW(ep->spsr) != MODE_RW_64) 454 && (GET_M32(ep->spsr) == MODE32_hyp))) { 455 scr_el3 |= SCR_HCE_BIT; 456 457 if (is_feat_fgt_supported()) { 458 scr_el3 |= SCR_FGTEN_BIT; 459 } 460 461 if (is_feat_ecv_supported()) { 462 scr_el3 |= SCR_ECVEN_BIT; 463 } 464 } 465 466 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 467 if (is_feat_twed_supported()) { 468 /* Set delay in SCR_EL3 */ 469 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 470 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) 471 << SCR_TWEDEL_SHIFT); 472 473 /* Enable WFE delay */ 474 scr_el3 |= SCR_TWEDEn_BIT; 475 } 476 477 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 478 /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */ 479 if (is_feat_sel2_supported()) { 480 scr_el3 |= SCR_EEL2_BIT; 481 } 482 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */ 483 484 /* 485 * Populate EL3 state so that we've the right context 486 * before doing ERET 487 */ 488 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 489 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 490 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 491 492 /* Start with a clean MDCR_EL3 copy as all relevant values are set */ 493 mdcr_el3 = MDCR_EL3_RESET_VAL; 494 495 /* --------------------------------------------------------------------- 496 * Initialise MDCR_EL3, setting all fields rather than relying on hw. 497 * Some fields are architecturally UNKNOWN on reset. 498 * 499 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug. 500 * Debug exceptions, other than Breakpoint Instruction exceptions, are 501 * disabled from all ELs in Secure state. 502 * 503 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted 504 * privileged debug from S-EL1. 505 * 506 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register 507 * access to the powerdown debug registers do not trap to EL3. 508 * 509 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the 510 * debug registers, other than those registers that are controlled by 511 * MDCR_EL3.TDOSA. 512 */ 513 mdcr_el3 |= ((MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) 514 & ~(MDCR_TDA_BIT | MDCR_TDOSA_BIT)) ; 515 write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3); 516 517 /* 518 * Configure MDCR_EL3 register as applicable for each world 519 * (NS/Secure/Realm) context. 520 */ 521 manage_extensions_common(ctx); 522 523 /* 524 * Store the X0-X7 value from the entrypoint into the context 525 * Use memcpy as we are in control of the layout of the structures 526 */ 527 gp_regs = get_gpregs_ctx(ctx); 528 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 529 } 530 531 /******************************************************************************* 532 * Context management library initialization routine. This library is used by 533 * runtime services to share pointers to 'cpu_context' structures for secure 534 * non-secure and realm states. Management of the structures and their associated 535 * memory is not done by the context management library e.g. the PSCI service 536 * manages the cpu context used for entry from and exit to the non-secure state. 537 * The Secure payload dispatcher service manages the context(s) corresponding to 538 * the secure state. It also uses this library to get access to the non-secure 539 * state cpu context pointers. 540 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 541 * which will be used for programming an entry into a lower EL. The same context 542 * will be used to save state upon exception entry from that EL. 543 ******************************************************************************/ 544 void __init cm_init(void) 545 { 546 /* 547 * The context management library has only global data to initialize, but 548 * that will be done when the BSS is zeroed out. 549 */ 550 } 551 552 /******************************************************************************* 553 * This is the high-level function used to initialize the cpu_context 'ctx' for 554 * first use. It performs initializations that are common to all security states 555 * and initializations specific to the security state specified in 'ep' 556 ******************************************************************************/ 557 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 558 { 559 unsigned int security_state; 560 561 assert(ctx != NULL); 562 563 /* 564 * Perform initializations that are common 565 * to all security states 566 */ 567 setup_context_common(ctx, ep); 568 569 security_state = GET_SECURITY_STATE(ep->h.attr); 570 571 /* Perform security state specific initializations */ 572 switch (security_state) { 573 case SECURE: 574 setup_secure_context(ctx, ep); 575 break; 576 #if ENABLE_RME 577 case REALM: 578 setup_realm_context(ctx, ep); 579 break; 580 #endif 581 case NON_SECURE: 582 setup_ns_context(ctx, ep); 583 break; 584 default: 585 ERROR("Invalid security state\n"); 586 panic(); 587 break; 588 } 589 } 590 591 /******************************************************************************* 592 * Enable architecture extensions for EL3 execution. This function only updates 593 * registers in-place which are expected to either never change or be 594 * overwritten by el3_exit. 595 ******************************************************************************/ 596 #if IMAGE_BL31 597 void cm_manage_extensions_el3(void) 598 { 599 if (is_feat_amu_supported()) { 600 amu_init_el3(); 601 } 602 603 if (is_feat_sme_supported()) { 604 sme_init_el3(); 605 } 606 607 pmuv3_init_el3(); 608 } 609 #endif /* IMAGE_BL31 */ 610 611 /****************************************************************************** 612 * Function to initialise the registers with the RESET values in the context 613 * memory, which are maintained per world. 614 ******************************************************************************/ 615 #if IMAGE_BL31 616 void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx) 617 { 618 /* 619 * Initialise CPTR_EL3, setting all fields rather than relying on hw. 620 * 621 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers 622 * by Advanced SIMD, floating-point or SVE instructions (if 623 * implemented) do not trap to EL3. 624 * 625 * CPTR_EL3.TCPAC: Set to zero so that accesses to CPACR_EL1, 626 * CPTR_EL2,CPACR, or HCPTR do not trap to EL3. 627 */ 628 uint64_t cptr_el3 = CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TFP_BIT); 629 630 per_world_ctx->ctx_cptr_el3 = cptr_el3; 631 632 /* 633 * Initialize MPAM3_EL3 to its default reset value 634 * 635 * MPAM3_EL3_RESET_VAL sets the MPAM3_EL3.TRAPLOWER bit that forces 636 * all lower ELn MPAM3_EL3 register access to, trap to EL3 637 */ 638 639 per_world_ctx->ctx_mpam3_el3 = MPAM3_EL3_RESET_VAL; 640 } 641 #endif /* IMAGE_BL31 */ 642 643 /******************************************************************************* 644 * Initialise per_world_context for Non-Secure world. 645 * This function enables the architecture extensions, which have same value 646 * across the cores for the non-secure world. 647 ******************************************************************************/ 648 #if IMAGE_BL31 649 void manage_extensions_nonsecure_per_world(void) 650 { 651 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]); 652 653 if (is_feat_sme_supported()) { 654 sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 655 } 656 657 if (is_feat_sve_supported()) { 658 sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 659 } 660 661 if (is_feat_amu_supported()) { 662 amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 663 } 664 665 if (is_feat_sys_reg_trace_supported()) { 666 sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 667 } 668 669 if (is_feat_mpam_supported()) { 670 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 671 } 672 } 673 #endif /* IMAGE_BL31 */ 674 675 /******************************************************************************* 676 * Initialise per_world_context for Secure world. 677 * This function enables the architecture extensions, which have same value 678 * across the cores for the secure world. 679 ******************************************************************************/ 680 static void manage_extensions_secure_per_world(void) 681 { 682 #if IMAGE_BL31 683 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 684 685 if (is_feat_sme_supported()) { 686 687 if (ENABLE_SME_FOR_SWD) { 688 /* 689 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure 690 * SME, SVE, and FPU/SIMD context properly managed. 691 */ 692 sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 693 } else { 694 /* 695 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 696 * world can safely use the associated registers. 697 */ 698 sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 699 } 700 } 701 if (is_feat_sve_supported()) { 702 if (ENABLE_SVE_FOR_SWD) { 703 /* 704 * Enable SVE and FPU in secure context, SPM must ensure 705 * that the SVE and FPU register contexts are properly managed. 706 */ 707 sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 708 } else { 709 /* 710 * Disable SVE and FPU in secure context so non-secure world 711 * can safely use them. 712 */ 713 sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 714 } 715 } 716 717 /* NS can access this but Secure shouldn't */ 718 if (is_feat_sys_reg_trace_supported()) { 719 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 720 } 721 722 has_secure_perworld_init = true; 723 #endif /* IMAGE_BL31 */ 724 } 725 726 /******************************************************************************* 727 * Enable architecture extensions on first entry to Non-secure world only 728 * and disable for secure world. 729 * 730 * NOTE: Arch features which have been provided with the capability of getting 731 * enabled only for non-secure world and being disabled for secure world are 732 * grouped here, as the MDCR_EL3 context value remains same across the worlds. 733 ******************************************************************************/ 734 static void manage_extensions_common(cpu_context_t *ctx) 735 { 736 #if IMAGE_BL31 737 if (is_feat_spe_supported()) { 738 /* 739 * Enable FEAT_SPE for Non-Secure and prohibit for Secure state. 740 */ 741 spe_enable(ctx); 742 } 743 744 if (is_feat_trbe_supported()) { 745 /* 746 * Enable FEAT_TRBE for Non-Secure and prohibit for Secure and 747 * Realm state. 748 */ 749 trbe_enable(ctx); 750 } 751 752 if (is_feat_trf_supported()) { 753 /* 754 * Enable FEAT_TRF for Non-Secure and prohibit for Secure state. 755 */ 756 trf_enable(ctx); 757 } 758 759 if (is_feat_brbe_supported()) { 760 /* 761 * Enable FEAT_BRBE for Non-Secure and prohibit for Secure state. 762 */ 763 brbe_enable(ctx); 764 } 765 #endif /* IMAGE_BL31 */ 766 } 767 768 /******************************************************************************* 769 * Enable architecture extensions on first entry to Non-secure world. 770 ******************************************************************************/ 771 static void manage_extensions_nonsecure(cpu_context_t *ctx) 772 { 773 #if IMAGE_BL31 774 if (is_feat_amu_supported()) { 775 amu_enable(ctx); 776 } 777 778 if (is_feat_sme_supported()) { 779 sme_enable(ctx); 780 } 781 782 if (is_feat_debugv8p9_supported()) { 783 debugv8p9_extended_bp_wp_enable(ctx); 784 } 785 786 pmuv3_enable(ctx); 787 #endif /* IMAGE_BL31 */ 788 } 789 790 /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */ 791 static __unused void enable_pauth_el2(void) 792 { 793 u_register_t hcr_el2 = read_hcr_el2(); 794 /* 795 * For Armv8.3 pointer authentication feature, disable traps to EL2 when 796 * accessing key registers or using pointer authentication instructions 797 * from lower ELs. 798 */ 799 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); 800 801 write_hcr_el2(hcr_el2); 802 } 803 804 #if INIT_UNUSED_NS_EL2 805 /******************************************************************************* 806 * Enable architecture extensions in-place at EL2 on first entry to Non-secure 807 * world when EL2 is empty and unused. 808 ******************************************************************************/ 809 static void manage_extensions_nonsecure_el2_unused(void) 810 { 811 #if IMAGE_BL31 812 if (is_feat_spe_supported()) { 813 spe_init_el2_unused(); 814 } 815 816 if (is_feat_amu_supported()) { 817 amu_init_el2_unused(); 818 } 819 820 if (is_feat_mpam_supported()) { 821 mpam_init_el2_unused(); 822 } 823 824 if (is_feat_trbe_supported()) { 825 trbe_init_el2_unused(); 826 } 827 828 if (is_feat_sys_reg_trace_supported()) { 829 sys_reg_trace_init_el2_unused(); 830 } 831 832 if (is_feat_trf_supported()) { 833 trf_init_el2_unused(); 834 } 835 836 pmuv3_init_el2_unused(); 837 838 if (is_feat_sve_supported()) { 839 sve_init_el2_unused(); 840 } 841 842 if (is_feat_sme_supported()) { 843 sme_init_el2_unused(); 844 } 845 846 #if ENABLE_PAUTH 847 enable_pauth_el2(); 848 #endif /* ENABLE_PAUTH */ 849 #endif /* IMAGE_BL31 */ 850 } 851 #endif /* INIT_UNUSED_NS_EL2 */ 852 853 /******************************************************************************* 854 * Enable architecture extensions on first entry to Secure world. 855 ******************************************************************************/ 856 static void manage_extensions_secure(cpu_context_t *ctx) 857 { 858 #if IMAGE_BL31 859 if (is_feat_sme_supported()) { 860 if (ENABLE_SME_FOR_SWD) { 861 /* 862 * Enable SME, SVE, FPU/SIMD in secure context, secure manager 863 * must ensure SME, SVE, and FPU/SIMD context properly managed. 864 */ 865 sme_init_el3(); 866 sme_enable(ctx); 867 } else { 868 /* 869 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 870 * world can safely use the associated registers. 871 */ 872 sme_disable(ctx); 873 } 874 } 875 #endif /* IMAGE_BL31 */ 876 } 877 878 #if !IMAGE_BL1 879 /******************************************************************************* 880 * The following function initializes the cpu_context for a CPU specified by 881 * its `cpu_idx` for first use, and sets the initial entrypoint state as 882 * specified by the entry_point_info structure. 883 ******************************************************************************/ 884 void cm_init_context_by_index(unsigned int cpu_idx, 885 const entry_point_info_t *ep) 886 { 887 cpu_context_t *ctx; 888 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 889 cm_setup_context(ctx, ep); 890 } 891 #endif /* !IMAGE_BL1 */ 892 893 /******************************************************************************* 894 * The following function initializes the cpu_context for the current CPU 895 * for first use, and sets the initial entrypoint state as specified by the 896 * entry_point_info structure. 897 ******************************************************************************/ 898 void cm_init_my_context(const entry_point_info_t *ep) 899 { 900 cpu_context_t *ctx; 901 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 902 cm_setup_context(ctx, ep); 903 } 904 905 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */ 906 static void init_nonsecure_el2_unused(cpu_context_t *ctx) 907 { 908 #if INIT_UNUSED_NS_EL2 909 u_register_t hcr_el2 = HCR_RESET_VAL; 910 u_register_t mdcr_el2; 911 u_register_t scr_el3; 912 913 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 914 915 /* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */ 916 if ((scr_el3 & SCR_RW_BIT) != 0U) { 917 hcr_el2 |= HCR_RW_BIT; 918 } 919 920 write_hcr_el2(hcr_el2); 921 922 /* 923 * Initialise CPTR_EL2 setting all fields rather than relying on the hw. 924 * All fields have architecturally UNKNOWN reset values. 925 */ 926 write_cptr_el2(CPTR_EL2_RESET_VAL); 927 928 /* 929 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on 930 * reset and are set to zero except for field(s) listed below. 931 * 932 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of 933 * Non-secure EL0 and EL1 accesses to the physical timer registers. 934 * 935 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of 936 * Non-secure EL0 and EL1 accesses to the physical counter registers. 937 */ 938 write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT); 939 940 /* 941 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally 942 * UNKNOWN value. 943 */ 944 write_cntvoff_el2(0); 945 946 /* 947 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1 948 * respectively. 949 */ 950 write_vpidr_el2(read_midr_el1()); 951 write_vmpidr_el2(read_mpidr_el1()); 952 953 /* 954 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset. 955 * 956 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address 957 * translation is disabled, cache maintenance operations depend on the 958 * VMID. 959 * 960 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is 961 * disabled. 962 */ 963 write_vttbr_el2(VTTBR_RESET_VAL & 964 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) | 965 (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 966 967 /* 968 * Initialise MDCR_EL2, setting all fields rather than relying on hw. 969 * Some fields are architecturally UNKNOWN on reset. 970 * 971 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System 972 * register accesses to the Debug ROM registers are not trapped to EL2. 973 * 974 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register 975 * accesses to the powerdown debug registers are not trapped to EL2. 976 * 977 * MDCR_EL2.TDA: Set to zero so that System register accesses to the 978 * debug registers do not trap to EL2. 979 * 980 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to 981 * EL2. 982 */ 983 mdcr_el2 = MDCR_EL2_RESET_VAL & 984 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT | 985 MDCR_EL2_TDE_BIT); 986 987 write_mdcr_el2(mdcr_el2); 988 989 /* 990 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset. 991 * 992 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or 993 * EL1 accesses to System registers do not trap to EL2. 994 */ 995 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 996 997 /* 998 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on 999 * reset. 1000 * 1001 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer 1002 * and prevent timer interrupts. 1003 */ 1004 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); 1005 1006 manage_extensions_nonsecure_el2_unused(); 1007 #endif /* INIT_UNUSED_NS_EL2 */ 1008 } 1009 1010 /******************************************************************************* 1011 * Prepare the CPU system registers for first entry into realm, secure, or 1012 * normal world. 1013 * 1014 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 1015 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 1016 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 1017 * For all entries, the EL1 registers are initialized from the cpu_context 1018 ******************************************************************************/ 1019 void cm_prepare_el3_exit(uint32_t security_state) 1020 { 1021 u_register_t sctlr_el2, scr_el3; 1022 cpu_context_t *ctx = cm_get_context(security_state); 1023 1024 assert(ctx != NULL); 1025 1026 if (security_state == NON_SECURE) { 1027 uint64_t el2_implemented = el_implemented(2); 1028 1029 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 1030 CTX_SCR_EL3); 1031 1032 if (el2_implemented != EL_IMPL_NONE) { 1033 1034 /* 1035 * If context is not being used for EL2, initialize 1036 * HCRX_EL2 with its init value here. 1037 */ 1038 if (is_feat_hcx_supported()) { 1039 write_hcrx_el2(HCRX_EL2_INIT_VAL); 1040 } 1041 1042 /* 1043 * Initialize Fine-grained trap registers introduced 1044 * by FEAT_FGT so all traps are initially disabled when 1045 * switching to EL2 or a lower EL, preventing undesired 1046 * behavior. 1047 */ 1048 if (is_feat_fgt_supported()) { 1049 /* 1050 * Initialize HFG*_EL2 registers with a default 1051 * value so legacy systems unaware of FEAT_FGT 1052 * do not get trapped due to their lack of 1053 * initialization for this feature. 1054 */ 1055 write_hfgitr_el2(HFGITR_EL2_INIT_VAL); 1056 write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL); 1057 write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL); 1058 } 1059 1060 /* Condition to ensure EL2 is being used. */ 1061 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 1062 /* Initialize SCTLR_EL2 register with reset value. */ 1063 sctlr_el2 = SCTLR_EL2_RES1; 1064 #if ERRATA_A75_764081 1065 /* 1066 * If workaround of errata 764081 for Cortex-A75 1067 * is used then set SCTLR_EL2.IESB to enable 1068 * Implicit Error Synchronization Barrier. 1069 */ 1070 sctlr_el2 |= SCTLR_IESB_BIT; 1071 #endif 1072 write_sctlr_el2(sctlr_el2); 1073 } else { 1074 /* 1075 * (scr_el3 & SCR_HCE_BIT==0) 1076 * EL2 implemented but unused. 1077 */ 1078 init_nonsecure_el2_unused(ctx); 1079 } 1080 } 1081 } 1082 cm_el1_sysregs_context_restore(security_state); 1083 cm_set_next_eret_context(security_state); 1084 } 1085 1086 #if CTX_INCLUDE_EL2_REGS 1087 1088 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx) 1089 { 1090 write_el2_ctx_fgt(ctx, hdfgrtr_el2, read_hdfgrtr_el2()); 1091 if (is_feat_amu_supported()) { 1092 write_el2_ctx_fgt(ctx, hafgrtr_el2, read_hafgrtr_el2()); 1093 } 1094 write_el2_ctx_fgt(ctx, hdfgwtr_el2, read_hdfgwtr_el2()); 1095 write_el2_ctx_fgt(ctx, hfgitr_el2, read_hfgitr_el2()); 1096 write_el2_ctx_fgt(ctx, hfgrtr_el2, read_hfgrtr_el2()); 1097 write_el2_ctx_fgt(ctx, hfgwtr_el2, read_hfgwtr_el2()); 1098 } 1099 1100 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx) 1101 { 1102 write_hdfgrtr_el2(read_el2_ctx_fgt(ctx, hdfgrtr_el2)); 1103 if (is_feat_amu_supported()) { 1104 write_hafgrtr_el2(read_el2_ctx_fgt(ctx, hafgrtr_el2)); 1105 } 1106 write_hdfgwtr_el2(read_el2_ctx_fgt(ctx, hdfgwtr_el2)); 1107 write_hfgitr_el2(read_el2_ctx_fgt(ctx, hfgitr_el2)); 1108 write_hfgrtr_el2(read_el2_ctx_fgt(ctx, hfgrtr_el2)); 1109 write_hfgwtr_el2(read_el2_ctx_fgt(ctx, hfgwtr_el2)); 1110 } 1111 1112 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx) 1113 { 1114 u_register_t mpam_idr = read_mpamidr_el1(); 1115 1116 write_el2_ctx_mpam(ctx, mpam2_el2, read_mpam2_el2()); 1117 1118 /* 1119 * The context registers that we intend to save would be part of the 1120 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1. 1121 */ 1122 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1123 return; 1124 } 1125 1126 /* 1127 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if 1128 * MPAMIDR_HAS_HCR_BIT == 1. 1129 */ 1130 write_el2_ctx_mpam(ctx, mpamhcr_el2, read_mpamhcr_el2()); 1131 write_el2_ctx_mpam(ctx, mpamvpm0_el2, read_mpamvpm0_el2()); 1132 write_el2_ctx_mpam(ctx, mpamvpmv_el2, read_mpamvpmv_el2()); 1133 1134 /* 1135 * The number of MPAMVPM registers is implementation defined, their 1136 * number is stored in the MPAMIDR_EL1 register. 1137 */ 1138 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1139 case 7: 1140 write_el2_ctx_mpam(ctx, mpamvpm7_el2, read_mpamvpm7_el2()); 1141 __fallthrough; 1142 case 6: 1143 write_el2_ctx_mpam(ctx, mpamvpm6_el2, read_mpamvpm6_el2()); 1144 __fallthrough; 1145 case 5: 1146 write_el2_ctx_mpam(ctx, mpamvpm5_el2, read_mpamvpm5_el2()); 1147 __fallthrough; 1148 case 4: 1149 write_el2_ctx_mpam(ctx, mpamvpm4_el2, read_mpamvpm4_el2()); 1150 __fallthrough; 1151 case 3: 1152 write_el2_ctx_mpam(ctx, mpamvpm3_el2, read_mpamvpm3_el2()); 1153 __fallthrough; 1154 case 2: 1155 write_el2_ctx_mpam(ctx, mpamvpm2_el2, read_mpamvpm2_el2()); 1156 __fallthrough; 1157 case 1: 1158 write_el2_ctx_mpam(ctx, mpamvpm1_el2, read_mpamvpm1_el2()); 1159 break; 1160 } 1161 } 1162 1163 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx) 1164 { 1165 u_register_t mpam_idr = read_mpamidr_el1(); 1166 1167 write_mpam2_el2(read_el2_ctx_mpam(ctx, mpam2_el2)); 1168 1169 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1170 return; 1171 } 1172 1173 write_mpamhcr_el2(read_el2_ctx_mpam(ctx, mpamhcr_el2)); 1174 write_mpamvpm0_el2(read_el2_ctx_mpam(ctx, mpamvpm0_el2)); 1175 write_mpamvpmv_el2(read_el2_ctx_mpam(ctx, mpamvpmv_el2)); 1176 1177 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1178 case 7: 1179 write_mpamvpm7_el2(read_el2_ctx_mpam(ctx, mpamvpm7_el2)); 1180 __fallthrough; 1181 case 6: 1182 write_mpamvpm6_el2(read_el2_ctx_mpam(ctx, mpamvpm6_el2)); 1183 __fallthrough; 1184 case 5: 1185 write_mpamvpm5_el2(read_el2_ctx_mpam(ctx, mpamvpm5_el2)); 1186 __fallthrough; 1187 case 4: 1188 write_mpamvpm4_el2(read_el2_ctx_mpam(ctx, mpamvpm4_el2)); 1189 __fallthrough; 1190 case 3: 1191 write_mpamvpm3_el2(read_el2_ctx_mpam(ctx, mpamvpm3_el2)); 1192 __fallthrough; 1193 case 2: 1194 write_mpamvpm2_el2(read_el2_ctx_mpam(ctx, mpamvpm2_el2)); 1195 __fallthrough; 1196 case 1: 1197 write_mpamvpm1_el2(read_el2_ctx_mpam(ctx, mpamvpm1_el2)); 1198 break; 1199 } 1200 } 1201 1202 /* --------------------------------------------------------------------------- 1203 * The following registers are not added: 1204 * ICH_AP0R<n>_EL2 1205 * ICH_AP1R<n>_EL2 1206 * ICH_LR<n>_EL2 1207 * 1208 * NOTE: For a system with S-EL2 present but not enabled, accessing 1209 * ICC_SRE_EL2 is undefined from EL3. To workaround this change the 1210 * SCR_EL3.NS = 1 before accessing this register. 1211 * --------------------------------------------------------------------------- 1212 */ 1213 static void el2_sysregs_context_save_gic(el2_sysregs_t *ctx) 1214 { 1215 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 1216 write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2()); 1217 #else 1218 u_register_t scr_el3 = read_scr_el3(); 1219 write_scr_el3(scr_el3 | SCR_NS_BIT); 1220 isb(); 1221 1222 write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2()); 1223 1224 write_scr_el3(scr_el3); 1225 isb(); 1226 #endif 1227 write_el2_ctx_common(ctx, ich_hcr_el2, read_ich_hcr_el2()); 1228 write_el2_ctx_common(ctx, ich_vmcr_el2, read_ich_vmcr_el2()); 1229 } 1230 1231 static void el2_sysregs_context_restore_gic(el2_sysregs_t *ctx) 1232 { 1233 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 1234 write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2)); 1235 #else 1236 u_register_t scr_el3 = read_scr_el3(); 1237 write_scr_el3(scr_el3 | SCR_NS_BIT); 1238 isb(); 1239 1240 write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2)); 1241 1242 write_scr_el3(scr_el3); 1243 isb(); 1244 #endif 1245 write_ich_hcr_el2(read_el2_ctx_common(ctx, ich_hcr_el2)); 1246 write_ich_vmcr_el2(read_el2_ctx_common(ctx, ich_vmcr_el2)); 1247 } 1248 1249 /* ----------------------------------------------------- 1250 * The following registers are not added: 1251 * AMEVCNTVOFF0<n>_EL2 1252 * AMEVCNTVOFF1<n>_EL2 1253 * ----------------------------------------------------- 1254 */ 1255 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx) 1256 { 1257 write_el2_ctx_common(ctx, actlr_el2, read_actlr_el2()); 1258 write_el2_ctx_common(ctx, afsr0_el2, read_afsr0_el2()); 1259 write_el2_ctx_common(ctx, afsr1_el2, read_afsr1_el2()); 1260 write_el2_ctx_common(ctx, amair_el2, read_amair_el2()); 1261 write_el2_ctx_common(ctx, cnthctl_el2, read_cnthctl_el2()); 1262 write_el2_ctx_common(ctx, cntvoff_el2, read_cntvoff_el2()); 1263 write_el2_ctx_common(ctx, cptr_el2, read_cptr_el2()); 1264 if (CTX_INCLUDE_AARCH32_REGS) { 1265 write_el2_ctx_common(ctx, dbgvcr32_el2, read_dbgvcr32_el2()); 1266 } 1267 write_el2_ctx_common(ctx, elr_el2, read_elr_el2()); 1268 write_el2_ctx_common(ctx, esr_el2, read_esr_el2()); 1269 write_el2_ctx_common(ctx, far_el2, read_far_el2()); 1270 write_el2_ctx_common(ctx, hacr_el2, read_hacr_el2()); 1271 write_el2_ctx_common(ctx, hcr_el2, read_hcr_el2()); 1272 write_el2_ctx_common(ctx, hpfar_el2, read_hpfar_el2()); 1273 write_el2_ctx_common(ctx, hstr_el2, read_hstr_el2()); 1274 write_el2_ctx_common(ctx, mair_el2, read_mair_el2()); 1275 write_el2_ctx_common(ctx, mdcr_el2, read_mdcr_el2()); 1276 write_el2_ctx_common(ctx, sctlr_el2, read_sctlr_el2()); 1277 write_el2_ctx_common(ctx, spsr_el2, read_spsr_el2()); 1278 write_el2_ctx_common(ctx, sp_el2, read_sp_el2()); 1279 write_el2_ctx_common(ctx, tcr_el2, read_tcr_el2()); 1280 write_el2_ctx_common(ctx, tpidr_el2, read_tpidr_el2()); 1281 write_el2_ctx_common(ctx, ttbr0_el2, read_ttbr0_el2()); 1282 write_el2_ctx_common(ctx, vbar_el2, read_vbar_el2()); 1283 write_el2_ctx_common(ctx, vmpidr_el2, read_vmpidr_el2()); 1284 write_el2_ctx_common(ctx, vpidr_el2, read_vpidr_el2()); 1285 write_el2_ctx_common(ctx, vtcr_el2, read_vtcr_el2()); 1286 write_el2_ctx_common(ctx, vttbr_el2, read_vttbr_el2()); 1287 } 1288 1289 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx) 1290 { 1291 write_actlr_el2(read_el2_ctx_common(ctx, actlr_el2)); 1292 write_afsr0_el2(read_el2_ctx_common(ctx, afsr0_el2)); 1293 write_afsr1_el2(read_el2_ctx_common(ctx, afsr1_el2)); 1294 write_amair_el2(read_el2_ctx_common(ctx, amair_el2)); 1295 write_cnthctl_el2(read_el2_ctx_common(ctx, cnthctl_el2)); 1296 write_cntvoff_el2(read_el2_ctx_common(ctx, cntvoff_el2)); 1297 write_cptr_el2(read_el2_ctx_common(ctx, cptr_el2)); 1298 if (CTX_INCLUDE_AARCH32_REGS) { 1299 write_dbgvcr32_el2(read_el2_ctx_common(ctx, dbgvcr32_el2)); 1300 } 1301 write_elr_el2(read_el2_ctx_common(ctx, elr_el2)); 1302 write_esr_el2(read_el2_ctx_common(ctx, esr_el2)); 1303 write_far_el2(read_el2_ctx_common(ctx, far_el2)); 1304 write_hacr_el2(read_el2_ctx_common(ctx, hacr_el2)); 1305 write_hcr_el2(read_el2_ctx_common(ctx, hcr_el2)); 1306 write_hpfar_el2(read_el2_ctx_common(ctx, hpfar_el2)); 1307 write_hstr_el2(read_el2_ctx_common(ctx, hstr_el2)); 1308 write_mair_el2(read_el2_ctx_common(ctx, mair_el2)); 1309 write_mdcr_el2(read_el2_ctx_common(ctx, mdcr_el2)); 1310 write_sctlr_el2(read_el2_ctx_common(ctx, sctlr_el2)); 1311 write_spsr_el2(read_el2_ctx_common(ctx, spsr_el2)); 1312 write_sp_el2(read_el2_ctx_common(ctx, sp_el2)); 1313 write_tcr_el2(read_el2_ctx_common(ctx, tcr_el2)); 1314 write_tpidr_el2(read_el2_ctx_common(ctx, tpidr_el2)); 1315 write_ttbr0_el2(read_el2_ctx_common(ctx, ttbr0_el2)); 1316 write_vbar_el2(read_el2_ctx_common(ctx, vbar_el2)); 1317 write_vmpidr_el2(read_el2_ctx_common(ctx, vmpidr_el2)); 1318 write_vpidr_el2(read_el2_ctx_common(ctx, vpidr_el2)); 1319 write_vtcr_el2(read_el2_ctx_common(ctx, vtcr_el2)); 1320 write_vttbr_el2(read_el2_ctx_common(ctx, vttbr_el2)); 1321 } 1322 1323 /******************************************************************************* 1324 * Save EL2 sysreg context 1325 ******************************************************************************/ 1326 void cm_el2_sysregs_context_save(uint32_t security_state) 1327 { 1328 cpu_context_t *ctx; 1329 el2_sysregs_t *el2_sysregs_ctx; 1330 1331 ctx = cm_get_context(security_state); 1332 assert(ctx != NULL); 1333 1334 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1335 1336 el2_sysregs_context_save_common(el2_sysregs_ctx); 1337 el2_sysregs_context_save_gic(el2_sysregs_ctx); 1338 1339 if (is_feat_mte2_supported()) { 1340 write_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2, read_tfsr_el2()); 1341 } 1342 1343 if (is_feat_mpam_supported()) { 1344 el2_sysregs_context_save_mpam(el2_sysregs_ctx); 1345 } 1346 1347 if (is_feat_fgt_supported()) { 1348 el2_sysregs_context_save_fgt(el2_sysregs_ctx); 1349 } 1350 1351 if (is_feat_ecv_v2_supported()) { 1352 write_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2, read_cntpoff_el2()); 1353 } 1354 1355 if (is_feat_vhe_supported()) { 1356 write_el2_ctx_vhe(el2_sysregs_ctx, contextidr_el2, 1357 read_contextidr_el2()); 1358 write_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2, read_ttbr1_el2()); 1359 } 1360 1361 if (is_feat_ras_supported()) { 1362 write_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2, read_vdisr_el2()); 1363 write_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2, read_vsesr_el2()); 1364 } 1365 1366 if (is_feat_nv2_supported()) { 1367 write_el2_ctx_neve(el2_sysregs_ctx, vncr_el2, read_vncr_el2()); 1368 } 1369 1370 if (is_feat_trf_supported()) { 1371 write_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2, read_trfcr_el2()); 1372 } 1373 1374 if (is_feat_csv2_2_supported()) { 1375 write_el2_ctx_csv2_2(el2_sysregs_ctx, scxtnum_el2, 1376 read_scxtnum_el2()); 1377 } 1378 1379 if (is_feat_hcx_supported()) { 1380 write_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2, read_hcrx_el2()); 1381 } 1382 1383 if (is_feat_tcr2_supported()) { 1384 write_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2, read_tcr2_el2()); 1385 } 1386 1387 if (is_feat_sxpie_supported()) { 1388 write_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2, read_pire0_el2()); 1389 write_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2, read_pir_el2()); 1390 } 1391 1392 if (is_feat_sxpoe_supported()) { 1393 write_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2, read_por_el2()); 1394 } 1395 1396 if (is_feat_s2pie_supported()) { 1397 write_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2, read_s2pir_el2()); 1398 } 1399 1400 if (is_feat_gcs_supported()) { 1401 write_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2, read_gcscr_el2()); 1402 write_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2, read_gcspr_el2()); 1403 } 1404 } 1405 1406 /******************************************************************************* 1407 * Restore EL2 sysreg context 1408 ******************************************************************************/ 1409 void cm_el2_sysregs_context_restore(uint32_t security_state) 1410 { 1411 cpu_context_t *ctx; 1412 el2_sysregs_t *el2_sysregs_ctx; 1413 1414 ctx = cm_get_context(security_state); 1415 assert(ctx != NULL); 1416 1417 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1418 1419 el2_sysregs_context_restore_common(el2_sysregs_ctx); 1420 el2_sysregs_context_restore_gic(el2_sysregs_ctx); 1421 1422 if (is_feat_mte2_supported()) { 1423 write_tfsr_el2(read_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2)); 1424 } 1425 1426 if (is_feat_mpam_supported()) { 1427 el2_sysregs_context_restore_mpam(el2_sysregs_ctx); 1428 } 1429 1430 if (is_feat_fgt_supported()) { 1431 el2_sysregs_context_restore_fgt(el2_sysregs_ctx); 1432 } 1433 1434 if (is_feat_ecv_v2_supported()) { 1435 write_cntpoff_el2(read_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2)); 1436 } 1437 1438 if (is_feat_vhe_supported()) { 1439 write_contextidr_el2(read_el2_ctx_vhe(el2_sysregs_ctx, 1440 contextidr_el2)); 1441 write_ttbr1_el2(read_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2)); 1442 } 1443 1444 if (is_feat_ras_supported()) { 1445 write_vdisr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2)); 1446 write_vsesr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2)); 1447 } 1448 1449 if (is_feat_nv2_supported()) { 1450 write_vncr_el2(read_el2_ctx_neve(el2_sysregs_ctx, vncr_el2)); 1451 } 1452 1453 if (is_feat_trf_supported()) { 1454 write_trfcr_el2(read_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2)); 1455 } 1456 1457 if (is_feat_csv2_2_supported()) { 1458 write_scxtnum_el2(read_el2_ctx_csv2_2(el2_sysregs_ctx, 1459 scxtnum_el2)); 1460 } 1461 1462 if (is_feat_hcx_supported()) { 1463 write_hcrx_el2(read_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2)); 1464 } 1465 1466 if (is_feat_tcr2_supported()) { 1467 write_tcr2_el2(read_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2)); 1468 } 1469 1470 if (is_feat_sxpie_supported()) { 1471 write_pire0_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2)); 1472 write_pir_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2)); 1473 } 1474 1475 if (is_feat_sxpoe_supported()) { 1476 write_por_el2(read_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2)); 1477 } 1478 1479 if (is_feat_s2pie_supported()) { 1480 write_s2pir_el2(read_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2)); 1481 } 1482 1483 if (is_feat_gcs_supported()) { 1484 write_gcscr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2)); 1485 write_gcspr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2)); 1486 } 1487 } 1488 #endif /* CTX_INCLUDE_EL2_REGS */ 1489 1490 /******************************************************************************* 1491 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS 1492 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly 1493 * updating EL1 and EL2 registers. Otherwise, it calls the generic 1494 * cm_prepare_el3_exit function. 1495 ******************************************************************************/ 1496 void cm_prepare_el3_exit_ns(void) 1497 { 1498 #if CTX_INCLUDE_EL2_REGS 1499 #if ENABLE_ASSERTIONS 1500 cpu_context_t *ctx = cm_get_context(NON_SECURE); 1501 assert(ctx != NULL); 1502 1503 /* Assert that EL2 is used. */ 1504 u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1505 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && 1506 (el_implemented(2U) != EL_IMPL_NONE)); 1507 #endif /* ENABLE_ASSERTIONS */ 1508 1509 /* Restore EL2 and EL1 sysreg contexts */ 1510 cm_el2_sysregs_context_restore(NON_SECURE); 1511 cm_el1_sysregs_context_restore(NON_SECURE); 1512 cm_set_next_eret_context(NON_SECURE); 1513 #else 1514 cm_prepare_el3_exit(NON_SECURE); 1515 #endif /* CTX_INCLUDE_EL2_REGS */ 1516 } 1517 1518 static void el1_sysregs_context_save(el1_sysregs_t *ctx) 1519 { 1520 write_ctx_reg(ctx, CTX_SPSR_EL1, read_spsr_el1()); 1521 write_ctx_reg(ctx, CTX_ELR_EL1, read_elr_el1()); 1522 1523 #if !ERRATA_SPECULATIVE_AT 1524 write_ctx_reg(ctx, CTX_SCTLR_EL1, read_sctlr_el1()); 1525 write_ctx_reg(ctx, CTX_TCR_EL1, read_tcr_el1()); 1526 #endif /* (!ERRATA_SPECULATIVE_AT) */ 1527 1528 write_ctx_reg(ctx, CTX_CPACR_EL1, read_cpacr_el1()); 1529 write_ctx_reg(ctx, CTX_CSSELR_EL1, read_csselr_el1()); 1530 write_ctx_reg(ctx, CTX_SP_EL1, read_sp_el1()); 1531 write_ctx_reg(ctx, CTX_ESR_EL1, read_esr_el1()); 1532 write_ctx_reg(ctx, CTX_TTBR0_EL1, read_ttbr0_el1()); 1533 write_ctx_reg(ctx, CTX_TTBR1_EL1, read_ttbr1_el1()); 1534 write_ctx_reg(ctx, CTX_MAIR_EL1, read_mair_el1()); 1535 write_ctx_reg(ctx, CTX_AMAIR_EL1, read_amair_el1()); 1536 write_ctx_reg(ctx, CTX_ACTLR_EL1, read_actlr_el1()); 1537 write_ctx_reg(ctx, CTX_TPIDR_EL1, read_tpidr_el1()); 1538 write_ctx_reg(ctx, CTX_TPIDR_EL0, read_tpidr_el0()); 1539 write_ctx_reg(ctx, CTX_TPIDRRO_EL0, read_tpidrro_el0()); 1540 write_ctx_reg(ctx, CTX_PAR_EL1, read_par_el1()); 1541 write_ctx_reg(ctx, CTX_FAR_EL1, read_far_el1()); 1542 write_ctx_reg(ctx, CTX_AFSR0_EL1, read_afsr0_el1()); 1543 write_ctx_reg(ctx, CTX_AFSR1_EL1, read_afsr1_el1()); 1544 write_ctx_reg(ctx, CTX_CONTEXTIDR_EL1, read_contextidr_el1()); 1545 write_ctx_reg(ctx, CTX_VBAR_EL1, read_vbar_el1()); 1546 write_ctx_reg(ctx, CTX_MDCCINT_EL1, read_mdccint_el1()); 1547 write_ctx_reg(ctx, CTX_MDSCR_EL1, read_mdscr_el1()); 1548 1549 #if CTX_INCLUDE_AARCH32_REGS 1550 write_ctx_reg(ctx, CTX_SPSR_ABT, read_spsr_abt()); 1551 write_ctx_reg(ctx, CTX_SPSR_UND, read_spsr_und()); 1552 write_ctx_reg(ctx, CTX_SPSR_IRQ, read_spsr_irq()); 1553 write_ctx_reg(ctx, CTX_SPSR_FIQ, read_spsr_fiq()); 1554 write_ctx_reg(ctx, CTX_DACR32_EL2, read_dacr32_el2()); 1555 write_ctx_reg(ctx, CTX_IFSR32_EL2, read_ifsr32_el2()); 1556 #endif /* CTX_INCLUDE_AARCH32_REGS */ 1557 1558 #if NS_TIMER_SWITCH 1559 write_ctx_reg(ctx, CTX_CNTP_CTL_EL0, read_cntp_ctl_el0()); 1560 write_ctx_reg(ctx, CTX_CNTP_CVAL_EL0, read_cntp_cval_el0()); 1561 write_ctx_reg(ctx, CTX_CNTV_CTL_EL0, read_cntv_ctl_el0()); 1562 write_ctx_reg(ctx, CTX_CNTV_CVAL_EL0, read_cntv_cval_el0()); 1563 write_ctx_reg(ctx, CTX_CNTKCTL_EL1, read_cntkctl_el1()); 1564 #endif /* NS_TIMER_SWITCH */ 1565 1566 #if ENABLE_FEAT_MTE2 1567 write_ctx_reg(ctx, CTX_TFSRE0_EL1, read_tfsre0_el1()); 1568 write_ctx_reg(ctx, CTX_TFSR_EL1, read_tfsr_el1()); 1569 write_ctx_reg(ctx, CTX_RGSR_EL1, read_rgsr_el1()); 1570 write_ctx_reg(ctx, CTX_GCR_EL1, read_gcr_el1()); 1571 #endif /* ENABLE_FEAT_MTE2 */ 1572 1573 #if ENABLE_FEAT_RAS 1574 if (is_feat_ras_supported()) { 1575 write_ctx_reg(ctx, CTX_DISR_EL1, read_disr_el1()); 1576 } 1577 #endif 1578 1579 #if ENABLE_FEAT_S1PIE 1580 if (is_feat_s1pie_supported()) { 1581 write_ctx_reg(ctx, CTX_PIRE0_EL1, read_pire0_el1()); 1582 write_ctx_reg(ctx, CTX_PIR_EL1, read_pir_el1()); 1583 } 1584 #endif 1585 1586 #if ENABLE_FEAT_S1POE 1587 if (is_feat_s1poe_supported()) { 1588 write_ctx_reg(ctx, CTX_POR_EL1, read_por_el1()); 1589 } 1590 #endif 1591 1592 #if ENABLE_FEAT_S2POE 1593 if (is_feat_s2poe_supported()) { 1594 write_ctx_reg(ctx, CTX_S2POR_EL1, read_s2por_el1()); 1595 } 1596 #endif 1597 1598 #if ENABLE_FEAT_TCR2 1599 if (is_feat_tcr2_supported()) { 1600 write_ctx_reg(ctx, CTX_TCR2_EL1, read_tcr2_el1()); 1601 } 1602 #endif 1603 1604 #if ENABLE_TRF_FOR_NS 1605 if (is_feat_trf_supported()) { 1606 write_ctx_reg(ctx, CTX_TRFCR_EL1, read_trfcr_el1()); 1607 } 1608 #endif 1609 1610 #if ENABLE_FEAT_CSV2_2 1611 if (is_feat_csv2_2_supported()) { 1612 write_ctx_reg(ctx, CTX_SCXTNUM_EL0, read_scxtnum_el0()); 1613 write_ctx_reg(ctx, CTX_SCXTNUM_EL1, read_scxtnum_el1()); 1614 } 1615 #endif 1616 1617 #if ENABLE_FEAT_GCS 1618 if (is_feat_gcs_supported()) { 1619 write_ctx_reg(ctx, CTX_GCSCR_EL1, read_gcscr_el1()); 1620 write_ctx_reg(ctx, CTX_GCSCRE0_EL1, read_gcscre0_el1()); 1621 write_ctx_reg(ctx, CTX_GCSPR_EL1, read_gcspr_el1()); 1622 write_ctx_reg(ctx, CTX_GCSPR_EL0, read_gcspr_el0()); 1623 } 1624 #endif 1625 } 1626 1627 static void el1_sysregs_context_restore(el1_sysregs_t *ctx) 1628 { 1629 write_spsr_el1(read_ctx_reg(ctx, CTX_SPSR_EL1)); 1630 write_elr_el1(read_ctx_reg(ctx, CTX_ELR_EL1)); 1631 1632 #if !ERRATA_SPECULATIVE_AT 1633 write_sctlr_el1(read_ctx_reg(ctx, CTX_SCTLR_EL1)); 1634 write_tcr_el1(read_ctx_reg(ctx, CTX_TCR_EL1)); 1635 #endif /* (!ERRATA_SPECULATIVE_AT) */ 1636 1637 write_cpacr_el1(read_ctx_reg(ctx, CTX_CPACR_EL1)); 1638 write_csselr_el1(read_ctx_reg(ctx, CTX_CSSELR_EL1)); 1639 write_sp_el1(read_ctx_reg(ctx, CTX_SP_EL1)); 1640 write_esr_el1(read_ctx_reg(ctx, CTX_ESR_EL1)); 1641 write_ttbr0_el1(read_ctx_reg(ctx, CTX_TTBR0_EL1)); 1642 write_ttbr1_el1(read_ctx_reg(ctx, CTX_TTBR1_EL1)); 1643 write_mair_el1(read_ctx_reg(ctx, CTX_MAIR_EL1)); 1644 write_amair_el1(read_ctx_reg(ctx, CTX_AMAIR_EL1)); 1645 write_actlr_el1(read_ctx_reg(ctx, CTX_ACTLR_EL1)); 1646 write_tpidr_el1(read_ctx_reg(ctx, CTX_TPIDR_EL1)); 1647 write_tpidr_el0(read_ctx_reg(ctx, CTX_TPIDR_EL0)); 1648 write_tpidrro_el0(read_ctx_reg(ctx, CTX_TPIDRRO_EL0)); 1649 write_par_el1(read_ctx_reg(ctx, CTX_PAR_EL1)); 1650 write_far_el1(read_ctx_reg(ctx, CTX_FAR_EL1)); 1651 write_afsr0_el1(read_ctx_reg(ctx, CTX_AFSR0_EL1)); 1652 write_afsr1_el1(read_ctx_reg(ctx, CTX_AFSR1_EL1)); 1653 write_contextidr_el1(read_ctx_reg(ctx, CTX_CONTEXTIDR_EL1)); 1654 write_vbar_el1(read_ctx_reg(ctx, CTX_VBAR_EL1)); 1655 write_mdccint_el1(read_ctx_reg(ctx, CTX_MDCCINT_EL1)); 1656 write_mdscr_el1(read_ctx_reg(ctx, CTX_MDSCR_EL1)); 1657 1658 #if CTX_INCLUDE_AARCH32_REGS 1659 write_spsr_abt(read_ctx_reg(ctx, CTX_SPSR_ABT)); 1660 write_spsr_und(read_ctx_reg(ctx, CTX_SPSR_UND)); 1661 write_spsr_irq(read_ctx_reg(ctx, CTX_SPSR_IRQ)); 1662 write_spsr_fiq(read_ctx_reg(ctx, CTX_SPSR_FIQ)); 1663 write_dacr32_el2(read_ctx_reg(ctx, CTX_DACR32_EL2)); 1664 write_ifsr32_el2(read_ctx_reg(ctx, CTX_IFSR32_EL2)); 1665 #endif /* CTX_INCLUDE_AARCH32_REGS */ 1666 1667 #if NS_TIMER_SWITCH 1668 write_cntp_ctl_el0(read_ctx_reg(ctx, CTX_CNTP_CTL_EL0)); 1669 write_cntp_cval_el0(read_ctx_reg(ctx, CTX_CNTP_CVAL_EL0)); 1670 write_cntv_ctl_el0(read_ctx_reg(ctx, CTX_CNTV_CTL_EL0)); 1671 write_cntv_cval_el0(read_ctx_reg(ctx, CTX_CNTV_CVAL_EL0)); 1672 write_cntkctl_el1(read_ctx_reg(ctx, CTX_CNTKCTL_EL1)); 1673 #endif /* NS_TIMER_SWITCH */ 1674 1675 #if ENABLE_FEAT_MTE2 1676 write_tfsre0_el1(read_ctx_reg(ctx, CTX_TFSRE0_EL1)); 1677 write_tfsr_el1(read_ctx_reg(ctx, CTX_TFSR_EL1)); 1678 write_rgsr_el1(read_ctx_reg(ctx, CTX_RGSR_EL1)); 1679 write_gcr_el1(read_ctx_reg(ctx, CTX_GCR_EL1)); 1680 #endif /* ENABLE_FEAT_MTE2 */ 1681 1682 #if ENABLE_FEAT_RAS 1683 if (is_feat_ras_supported()) { 1684 write_disr_el1(read_ctx_reg(ctx, CTX_DISR_EL1)); 1685 } 1686 #endif 1687 1688 #if ENABLE_FEAT_S1PIE 1689 if (is_feat_s1pie_supported()) { 1690 write_pire0_el1(read_ctx_reg(ctx, CTX_PIRE0_EL1)); 1691 write_pir_el1(read_ctx_reg(ctx, CTX_PIR_EL1)); 1692 } 1693 #endif 1694 1695 #if ENABLE_FEAT_S1POE 1696 if (is_feat_s1poe_supported()) { 1697 write_por_el1(read_ctx_reg(ctx, CTX_POR_EL1)); 1698 } 1699 #endif 1700 1701 #if ENABLE_FEAT_S2POE 1702 if (is_feat_s2poe_supported()) { 1703 write_s2por_el1(read_ctx_reg(ctx, CTX_S2POR_EL1)); 1704 } 1705 #endif 1706 1707 #if ENABLE_FEAT_TCR2 1708 if (is_feat_tcr2_supported()) { 1709 write_tcr2_el1(read_ctx_reg(ctx, CTX_TCR2_EL1)); 1710 } 1711 #endif 1712 1713 #if ENABLE_TRF_FOR_NS 1714 if (is_feat_trf_supported()) { 1715 write_trfcr_el1(read_ctx_reg(ctx, CTX_TRFCR_EL1)); 1716 } 1717 #endif 1718 1719 #if ENABLE_FEAT_CSV2_2 1720 if (is_feat_csv2_2_supported()) { 1721 write_scxtnum_el0(read_ctx_reg(ctx, CTX_SCXTNUM_EL0)); 1722 write_scxtnum_el1(read_ctx_reg(ctx, CTX_SCXTNUM_EL1)); 1723 } 1724 #endif 1725 1726 #if ENABLE_FEAT_GCS 1727 if (is_feat_gcs_supported()) { 1728 write_gcscr_el1(read_ctx_reg(ctx, CTX_GCSCR_EL1)); 1729 write_gcscre0_el1(read_ctx_reg(ctx, CTX_GCSCRE0_EL1)); 1730 write_gcspr_el1(read_ctx_reg(ctx, CTX_GCSPR_EL1)); 1731 write_gcspr_el0(read_ctx_reg(ctx, CTX_GCSPR_EL0)); 1732 } 1733 #endif 1734 } 1735 1736 /******************************************************************************* 1737 * The next four functions are used by runtime services to save and restore 1738 * EL1 context on the 'cpu_context' structure for the specified security 1739 * state. 1740 ******************************************************************************/ 1741 void cm_el1_sysregs_context_save(uint32_t security_state) 1742 { 1743 cpu_context_t *ctx; 1744 1745 ctx = cm_get_context(security_state); 1746 assert(ctx != NULL); 1747 1748 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 1749 1750 #if IMAGE_BL31 1751 if (security_state == SECURE) 1752 PUBLISH_EVENT(cm_exited_secure_world); 1753 else 1754 PUBLISH_EVENT(cm_exited_normal_world); 1755 #endif 1756 } 1757 1758 void cm_el1_sysregs_context_restore(uint32_t security_state) 1759 { 1760 cpu_context_t *ctx; 1761 1762 ctx = cm_get_context(security_state); 1763 assert(ctx != NULL); 1764 1765 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 1766 1767 #if IMAGE_BL31 1768 if (security_state == SECURE) 1769 PUBLISH_EVENT(cm_entering_secure_world); 1770 else 1771 PUBLISH_EVENT(cm_entering_normal_world); 1772 #endif 1773 } 1774 1775 /******************************************************************************* 1776 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 1777 * given security state with the given entrypoint 1778 ******************************************************************************/ 1779 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 1780 { 1781 cpu_context_t *ctx; 1782 el3_state_t *state; 1783 1784 ctx = cm_get_context(security_state); 1785 assert(ctx != NULL); 1786 1787 /* Populate EL3 state so that ERET jumps to the correct entry */ 1788 state = get_el3state_ctx(ctx); 1789 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1790 } 1791 1792 /******************************************************************************* 1793 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 1794 * pertaining to the given security state 1795 ******************************************************************************/ 1796 void cm_set_elr_spsr_el3(uint32_t security_state, 1797 uintptr_t entrypoint, uint32_t spsr) 1798 { 1799 cpu_context_t *ctx; 1800 el3_state_t *state; 1801 1802 ctx = cm_get_context(security_state); 1803 assert(ctx != NULL); 1804 1805 /* Populate EL3 state so that ERET jumps to the correct entry */ 1806 state = get_el3state_ctx(ctx); 1807 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1808 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 1809 } 1810 1811 /******************************************************************************* 1812 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 1813 * pertaining to the given security state using the value and bit position 1814 * specified in the parameters. It preserves all other bits. 1815 ******************************************************************************/ 1816 void cm_write_scr_el3_bit(uint32_t security_state, 1817 uint32_t bit_pos, 1818 uint32_t value) 1819 { 1820 cpu_context_t *ctx; 1821 el3_state_t *state; 1822 u_register_t scr_el3; 1823 1824 ctx = cm_get_context(security_state); 1825 assert(ctx != NULL); 1826 1827 /* Ensure that the bit position is a valid one */ 1828 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 1829 1830 /* Ensure that the 'value' is only a bit wide */ 1831 assert(value <= 1U); 1832 1833 /* 1834 * Get the SCR_EL3 value from the cpu context, clear the desired bit 1835 * and set it to its new value. 1836 */ 1837 state = get_el3state_ctx(ctx); 1838 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 1839 scr_el3 &= ~(1UL << bit_pos); 1840 scr_el3 |= (u_register_t)value << bit_pos; 1841 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 1842 } 1843 1844 /******************************************************************************* 1845 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 1846 * given security state. 1847 ******************************************************************************/ 1848 u_register_t cm_get_scr_el3(uint32_t security_state) 1849 { 1850 cpu_context_t *ctx; 1851 el3_state_t *state; 1852 1853 ctx = cm_get_context(security_state); 1854 assert(ctx != NULL); 1855 1856 /* Populate EL3 state so that ERET jumps to the correct entry */ 1857 state = get_el3state_ctx(ctx); 1858 return read_ctx_reg(state, CTX_SCR_EL3); 1859 } 1860 1861 /******************************************************************************* 1862 * This function is used to program the context that's used for exception 1863 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 1864 * the required security state 1865 ******************************************************************************/ 1866 void cm_set_next_eret_context(uint32_t security_state) 1867 { 1868 cpu_context_t *ctx; 1869 1870 ctx = cm_get_context(security_state); 1871 assert(ctx != NULL); 1872 1873 cm_set_next_context(ctx); 1874 } 1875