1 /* 2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <platform_def.h> 13 14 #include <arch.h> 15 #include <arch_helpers.h> 16 #include <arch_features.h> 17 #include <bl31/interrupt_mgmt.h> 18 #include <common/bl_common.h> 19 #include <common/debug.h> 20 #include <context.h> 21 #include <drivers/arm/gicv3.h> 22 #include <lib/cpus/cpu_ops.h> 23 #include <lib/cpus/errata.h> 24 #include <lib/el3_runtime/context_mgmt.h> 25 #include <lib/el3_runtime/cpu_data.h> 26 #include <lib/el3_runtime/pubsub_events.h> 27 #include <lib/extensions/amu.h> 28 #include <lib/extensions/brbe.h> 29 #include <lib/extensions/cpa2.h> 30 #include <lib/extensions/debug_v8p9.h> 31 #include <lib/extensions/fgt2.h> 32 #include <lib/extensions/mpam.h> 33 #include <lib/extensions/pauth.h> 34 #include <lib/extensions/pmuv3.h> 35 #include <lib/extensions/sme.h> 36 #include <lib/extensions/spe.h> 37 #include <lib/extensions/sve.h> 38 #include <lib/extensions/sysreg128.h> 39 #include <lib/extensions/sys_reg_trace.h> 40 #include <lib/extensions/tcr2.h> 41 #include <lib/extensions/trbe.h> 42 #include <lib/extensions/trf.h> 43 #include <lib/utils.h> 44 45 #if ENABLE_FEAT_TWED 46 /* Make sure delay value fits within the range(0-15) */ 47 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); 48 #endif /* ENABLE_FEAT_TWED */ 49 50 per_world_context_t per_world_context[CPU_CONTEXT_NUM]; 51 52 static void manage_extensions_nonsecure(cpu_context_t *ctx); 53 static void manage_extensions_secure(cpu_context_t *ctx); 54 55 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) 56 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) 57 { 58 u_register_t sctlr_elx, actlr_elx; 59 60 /* 61 * Initialise SCTLR_EL1 to the reset value corresponding to the target 62 * execution state setting all fields rather than relying on the hw. 63 * Some fields have architecturally UNKNOWN reset values and these are 64 * set to zero. 65 * 66 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 67 * 68 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 69 * required by PSCI specification) 70 */ 71 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 72 if (GET_RW(ep->spsr) == MODE_RW_64) { 73 sctlr_elx |= SCTLR_EL1_RES1; 74 } else { 75 /* 76 * If the target execution state is AArch32 then the following 77 * fields need to be set. 78 * 79 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 80 * instructions are not trapped to EL1. 81 * 82 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 83 * instructions are not trapped to EL1. 84 * 85 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 86 * CP15DMB, CP15DSB, and CP15ISB instructions. 87 */ 88 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 89 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 90 } 91 92 /* 93 * If workaround of errata 764081 for Cortex-A75 is used then set 94 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 95 */ 96 if (errata_a75_764081_applies()) { 97 sctlr_elx |= SCTLR_IESB_BIT; 98 } 99 100 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 101 write_ctx_sctlr_el1_reg_errata(ctx, sctlr_elx); 102 103 /* 104 * Base the context ACTLR_EL1 on the current value, as it is 105 * implementation defined. The context restore process will write 106 * the value from the context to the actual register and can cause 107 * problems for processor cores that don't expect certain bits to 108 * be zero. 109 */ 110 actlr_elx = read_actlr_el1(); 111 write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx); 112 } 113 #endif /* (IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)) */ 114 115 /****************************************************************************** 116 * This function performs initializations that are specific to SECURE state 117 * and updates the cpu context specified by 'ctx'. 118 *****************************************************************************/ 119 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 120 { 121 u_register_t scr_el3; 122 el3_state_t *state; 123 124 state = get_el3state_ctx(ctx); 125 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 126 127 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 128 /* 129 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 130 * indicated by the interrupt routing model for BL31. 131 */ 132 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 133 #endif 134 135 /* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */ 136 if (is_feat_mte2_supported()) { 137 scr_el3 |= SCR_ATA_BIT; 138 } 139 140 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 141 142 /* 143 * Initialize EL1 context registers unless SPMC is running 144 * at S-EL2. 145 */ 146 #if !CTX_INCLUDE_EL2_REGS || IMAGE_BL1 147 setup_el1_context(ctx, ep); 148 #endif 149 150 manage_extensions_secure(ctx); 151 } 152 153 #if ENABLE_RME && IMAGE_BL31 154 /****************************************************************************** 155 * This function performs initializations that are specific to REALM state 156 * and updates the cpu context specified by 'ctx'. 157 * 158 * NOTE: any changes to this function must be verified by an RMMD maintainer. 159 *****************************************************************************/ 160 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 161 { 162 u_register_t scr_el3; 163 el3_state_t *state; 164 el2_sysregs_t *el2_ctx; 165 166 state = get_el3state_ctx(ctx); 167 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 168 el2_ctx = get_el2_sysregs_ctx(ctx); 169 170 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT; 171 172 write_el2_ctx_common(el2_ctx, spsr_el2, SPSR_EL2_REALM); 173 174 /* CSV2 version 2 and above */ 175 if (is_feat_csv2_2_supported()) { 176 /* Enable access to the SCXTNUM_ELx registers. */ 177 scr_el3 |= SCR_EnSCXT_BIT; 178 } 179 180 if (is_feat_sctlr2_supported()) { 181 /* Set the SCTLR2En bit in SCR_EL3 to enable access to 182 * SCTLR2_ELx registers. 183 */ 184 scr_el3 |= SCR_SCTLR2En_BIT; 185 } 186 187 if (is_feat_d128_supported()) { 188 /* 189 * Set the D128En bit in SCR_EL3 to enable access to 128-bit 190 * versions of TTBR0_EL1, TTBR1_EL1, RCWMASK_EL1, RCWSMASK_EL1, 191 * PAR_EL1 and TTBR1_EL2, TTBR0_EL2 and VTTBR_EL2 registers. 192 */ 193 scr_el3 |= SCR_D128En_BIT; 194 } 195 196 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 197 198 if (is_feat_fgt2_supported()) { 199 fgt2_enable(ctx); 200 } 201 202 if (is_feat_debugv8p9_supported()) { 203 debugv8p9_extended_bp_wp_enable(ctx); 204 } 205 206 if (is_feat_brbe_supported()) { 207 brbe_enable(ctx); 208 } 209 210 /* 211 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world. 212 */ 213 if (is_feat_sme_supported()) { 214 sme_enable(ctx); 215 } 216 217 if (is_feat_spe_supported()) { 218 spe_disable_realm(ctx); 219 } 220 221 if (is_feat_trbe_supported()) { 222 trbe_disable_realm(ctx); 223 } 224 } 225 #endif /* ENABLE_RME && IMAGE_BL31 */ 226 227 /****************************************************************************** 228 * This function performs initializations that are specific to NON-SECURE state 229 * and updates the cpu context specified by 'ctx'. 230 *****************************************************************************/ 231 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 232 { 233 u_register_t scr_el3; 234 el3_state_t *state; 235 236 state = get_el3state_ctx(ctx); 237 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 238 239 /* SCR_NS: Set the NS bit */ 240 scr_el3 |= SCR_NS_BIT; 241 242 /* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */ 243 if (is_feat_mte2_supported()) { 244 scr_el3 |= SCR_ATA_BIT; 245 } 246 247 /* 248 * Pointer Authentication feature, if present, is always enabled by 249 * default for Non secure lower exception levels. We do not have an 250 * explicit flag to set it. To prevent the leakage between the worlds 251 * during world switch, we enable it only for the non-secure world. 252 * 253 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower 254 * exception levels of secure and realm worlds. 255 * 256 * If the Secure/realm world wants to use pointer authentication, 257 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case 258 * it will be enabled globally for all the contexts. 259 * 260 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 261 * other than EL3 262 * 263 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 264 * than EL3 265 */ 266 if (!is_ctx_pauth_supported()) { 267 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 268 } 269 270 #if HANDLE_EA_EL3_FIRST_NS 271 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ 272 scr_el3 |= SCR_EA_BIT; 273 #endif 274 275 #if RAS_TRAP_NS_ERR_REC_ACCESS 276 /* 277 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 278 * and RAS ERX registers from EL1 and EL2(from any security state) 279 * are trapped to EL3. 280 * Set here to trap only for NS EL1/EL2 281 */ 282 scr_el3 |= SCR_TERR_BIT; 283 #endif 284 285 /* CSV2 version 2 and above */ 286 if (is_feat_csv2_2_supported()) { 287 /* Enable access to the SCXTNUM_ELx registers. */ 288 scr_el3 |= SCR_EnSCXT_BIT; 289 } 290 291 #ifdef IMAGE_BL31 292 /* 293 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 294 * indicated by the interrupt routing model for BL31. 295 */ 296 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 297 #endif 298 299 if (is_feat_the_supported()) { 300 /* Set the RCWMASKEn bit in SCR_EL3 to enable access to 301 * RCWMASK_EL1 and RCWSMASK_EL1 registers. 302 */ 303 scr_el3 |= SCR_RCWMASKEn_BIT; 304 } 305 306 if (is_feat_sctlr2_supported()) { 307 /* Set the SCTLR2En bit in SCR_EL3 to enable access to 308 * SCTLR2_ELx registers. 309 */ 310 scr_el3 |= SCR_SCTLR2En_BIT; 311 } 312 313 if (is_feat_d128_supported()) { 314 /* Set the D128En bit in SCR_EL3 to enable access to 128-bit 315 * versions of TTBR0_EL1, TTBR1_EL1, RCWMASK_EL1, RCWSMASK_EL1, 316 * PAR_EL1 and TTBR1_EL2, TTBR0_EL2 and VTTBR_EL2 registers. 317 */ 318 scr_el3 |= SCR_D128En_BIT; 319 } 320 321 if (is_feat_fpmr_supported()) { 322 /* Set the EnFPM bit in SCR_EL3 to enable access to FPMR 323 * register. 324 */ 325 scr_el3 |= SCR_EnFPM_BIT; 326 } 327 328 if (is_feat_aie_supported()) { 329 /* Set the AIEn bit in SCR_EL3 to enable access to (A)MAIR2 330 * system registers from NS world. 331 */ 332 scr_el3 |= SCR_AIEn_BIT; 333 } 334 335 if (is_feat_pfar_supported()) { 336 /* Set the PFAREn bit in SCR_EL3 to enable access to the PFAR 337 * system registers from NS world. 338 */ 339 scr_el3 |= SCR_PFAREn_BIT; 340 } 341 342 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 343 344 /* Initialize EL2 context registers */ 345 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 346 if (is_feat_hcx_supported()) { 347 /* 348 * Initialize register HCRX_EL2 with its init value. 349 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a 350 * chance that this can lead to unexpected behavior in lower 351 * ELs that have not been updated since the introduction of 352 * this feature if not properly initialized, especially when 353 * it comes to those bits that enable/disable traps. 354 */ 355 write_el2_ctx_hcx(get_el2_sysregs_ctx(ctx), hcrx_el2, 356 HCRX_EL2_INIT_VAL); 357 } 358 359 if (is_feat_fgt_supported()) { 360 /* 361 * Initialize HFG*_EL2 registers with a default value so legacy 362 * systems unaware of FEAT_FGT do not get trapped due to their lack 363 * of initialization for this feature. 364 */ 365 write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgitr_el2, 366 HFGITR_EL2_INIT_VAL); 367 write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgrtr_el2, 368 HFGRTR_EL2_INIT_VAL); 369 write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgwtr_el2, 370 HFGWTR_EL2_INIT_VAL); 371 } 372 #else 373 /* Initialize EL1 context registers */ 374 setup_el1_context(ctx, ep); 375 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */ 376 377 manage_extensions_nonsecure(ctx); 378 } 379 380 /******************************************************************************* 381 * The following function performs initialization of the cpu_context 'ctx' 382 * for first use that is common to all security states, and sets the 383 * initial entrypoint state as specified by the entry_point_info structure. 384 * 385 * The EE and ST attributes are used to configure the endianness and secure 386 * timer availability for the new execution context. 387 ******************************************************************************/ 388 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 389 { 390 u_register_t scr_el3; 391 u_register_t mdcr_el3; 392 el3_state_t *state; 393 gp_regs_t *gp_regs; 394 395 state = get_el3state_ctx(ctx); 396 397 /* Clear any residual register values from the context */ 398 zeromem(ctx, sizeof(*ctx)); 399 400 /* 401 * The lower-EL context is zeroed so that no stale values leak to a world. 402 * It is assumed that an all-zero lower-EL context is good enough for it 403 * to boot correctly. However, there are very few registers where this 404 * is not true and some values need to be recreated. 405 */ 406 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 407 el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx); 408 409 /* 410 * These bits are set in the gicv3 driver. Losing them (especially the 411 * SRE bit) is problematic for all worlds. Henceforth recreate them. 412 */ 413 u_register_t icc_sre_el2_val = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | 414 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; 415 write_el2_ctx_common(el2_ctx, icc_sre_el2, icc_sre_el2_val); 416 417 /* 418 * The actlr_el2 register can be initialized in platform's reset handler 419 * and it may contain access control bits (e.g. CLUSTERPMUEN bit). 420 */ 421 write_el2_ctx_common(el2_ctx, actlr_el2, read_actlr_el2()); 422 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */ 423 424 /* Start with a clean SCR_EL3 copy as all relevant values are set */ 425 scr_el3 = SCR_RESET_VAL; 426 427 /* 428 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 429 * EL2, EL1 and EL0 are not trapped to EL3. 430 * 431 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 432 * EL2, EL1 and EL0 are not trapped to EL3. 433 * 434 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 435 * both Security states and both Execution states. 436 * 437 * SCR_EL3.SIF: Set to one to disable secure instruction execution from 438 * Non-secure memory. 439 */ 440 scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT); 441 442 scr_el3 |= SCR_SIF_BIT; 443 444 /* 445 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 446 * Exception level as specified by SPSR. 447 */ 448 if (GET_RW(ep->spsr) == MODE_RW_64) { 449 scr_el3 |= SCR_RW_BIT; 450 } 451 452 /* 453 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 454 * Secure timer registers to EL3, from AArch64 state only, if specified 455 * by the entrypoint attributes. If SEL2 is present and enabled, the ST 456 * bit always behaves as 1 (i.e. secure physical timer register access 457 * is not trapped) 458 */ 459 if (EP_GET_ST(ep->h.attr) != 0U) { 460 scr_el3 |= SCR_ST_BIT; 461 } 462 463 /* 464 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 465 * SCR_EL3.HXEn. 466 */ 467 if (is_feat_hcx_supported()) { 468 scr_el3 |= SCR_HXEn_BIT; 469 } 470 471 /* 472 * If FEAT_LS64_ACCDATA is enabled, enable access to ACCDATA_EL1 by 473 * setting SCR_EL3.ADEn and allow the ST64BV0 instruction by setting 474 * SCR_EL3.EnAS0. 475 */ 476 if (is_feat_ls64_accdata_supported()) { 477 scr_el3 |= SCR_ADEn_BIT | SCR_EnAS0_BIT; 478 } 479 480 /* 481 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS 482 * registers are trapped to EL3. 483 */ 484 if (is_feat_rng_trap_supported()) { 485 scr_el3 |= SCR_TRNDR_BIT; 486 } 487 488 #if FAULT_INJECTION_SUPPORT 489 /* Enable fault injection from lower ELs */ 490 scr_el3 |= SCR_FIEN_BIT; 491 #endif 492 493 /* 494 * Enable Pointer Authentication globally for all the worlds. 495 * 496 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 497 * other than EL3 498 * 499 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 500 * than EL3 501 */ 502 if (is_ctx_pauth_supported()) { 503 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 504 } 505 506 /* 507 * SCR_EL3.PIEN: Enable permission indirection and overlay 508 * registers for AArch64 if present. 509 */ 510 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) { 511 scr_el3 |= SCR_PIEN_BIT; 512 } 513 514 /* 515 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present. 516 */ 517 if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) { 518 scr_el3 |= SCR_GCSEn_BIT; 519 } 520 521 /* 522 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 523 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 524 * next mode is Hyp. 525 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the 526 * same conditions as HVC instructions and when the processor supports 527 * ARMv8.6-FGT. 528 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) 529 * CNTPOFF_EL2 register under the same conditions as HVC instructions 530 * and when the processor supports ECV. 531 */ 532 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 533 || ((GET_RW(ep->spsr) != MODE_RW_64) 534 && (GET_M32(ep->spsr) == MODE32_hyp))) { 535 scr_el3 |= SCR_HCE_BIT; 536 537 if (is_feat_fgt_supported()) { 538 scr_el3 |= SCR_FGTEN_BIT; 539 } 540 541 if (is_feat_ecv_supported()) { 542 scr_el3 |= SCR_ECVEN_BIT; 543 } 544 } 545 546 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 547 if (is_feat_twed_supported()) { 548 /* Set delay in SCR_EL3 */ 549 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 550 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) 551 << SCR_TWEDEL_SHIFT); 552 553 /* Enable WFE delay */ 554 scr_el3 |= SCR_TWEDEn_BIT; 555 } 556 557 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 558 /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */ 559 if (is_feat_sel2_supported()) { 560 scr_el3 |= SCR_EEL2_BIT; 561 } 562 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */ 563 564 if (is_feat_mec_supported()) { 565 scr_el3 |= SCR_MECEn_BIT; 566 } 567 568 /* 569 * Populate EL3 state so that we've the right context 570 * before doing ERET 571 */ 572 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 573 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 574 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 575 576 /* Start with a clean MDCR_EL3 copy as all relevant values are set */ 577 mdcr_el3 = MDCR_EL3_RESET_VAL; 578 579 /* --------------------------------------------------------------------- 580 * Initialise MDCR_EL3, setting all fields rather than relying on hw. 581 * Some fields are architecturally UNKNOWN on reset. 582 * 583 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug. 584 * Debug exceptions, other than Breakpoint Instruction exceptions, are 585 * disabled from all ELs in Secure state. 586 * 587 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted 588 * privileged debug from S-EL1. 589 * 590 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register 591 * access to the powerdown debug registers do not trap to EL3. 592 * 593 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the 594 * debug registers, other than those registers that are controlled by 595 * MDCR_EL3.TDOSA. 596 */ 597 mdcr_el3 |= ((MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) 598 & ~(MDCR_TDA_BIT | MDCR_TDOSA_BIT)) ; 599 write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3); 600 601 #if IMAGE_BL31 602 /* Enable FEAT_TRF for Non-Secure and prohibit for Secure state. */ 603 if (is_feat_trf_supported()) { 604 trf_enable(ctx); 605 } 606 607 if (is_feat_tcr2_supported()) { 608 tcr2_enable(ctx); 609 } 610 611 pmuv3_enable(ctx); 612 613 #if CTX_INCLUDE_EL2_REGS && IMAGE_BL31 614 /* 615 * Initialize SCTLR_EL2 context register with reset value. 616 */ 617 write_el2_ctx_common(get_el2_sysregs_ctx(ctx), sctlr_el2, SCTLR_EL2_RES1); 618 #endif /* CTX_INCLUDE_EL2_REGS */ 619 #endif /* IMAGE_BL31 */ 620 621 /* 622 * Store the X0-X7 value from the entrypoint into the context 623 * Use memcpy as we are in control of the layout of the structures 624 */ 625 gp_regs = get_gpregs_ctx(ctx); 626 memcpy((void *)gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 627 } 628 629 /******************************************************************************* 630 * Context management library initialization routine. This library is used by 631 * runtime services to share pointers to 'cpu_context' structures for secure 632 * non-secure and realm states. Management of the structures and their associated 633 * memory is not done by the context management library e.g. the PSCI service 634 * manages the cpu context used for entry from and exit to the non-secure state. 635 * The Secure payload dispatcher service manages the context(s) corresponding to 636 * the secure state. It also uses this library to get access to the non-secure 637 * state cpu context pointers. 638 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 639 * which will be used for programming an entry into a lower EL. The same context 640 * will be used to save state upon exception entry from that EL. 641 ******************************************************************************/ 642 void __init cm_init(void) 643 { 644 /* 645 * The context management library has only global data to initialize, but 646 * that will be done when the BSS is zeroed out. 647 */ 648 } 649 650 /******************************************************************************* 651 * This is the high-level function used to initialize the cpu_context 'ctx' for 652 * first use. It performs initializations that are common to all security states 653 * and initializations specific to the security state specified in 'ep' 654 ******************************************************************************/ 655 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 656 { 657 size_t security_state; 658 659 assert(ctx != NULL); 660 661 /* 662 * Perform initializations that are common 663 * to all security states 664 */ 665 setup_context_common(ctx, ep); 666 667 security_state = GET_SECURITY_STATE(ep->h.attr); 668 669 /* Perform security state specific initializations */ 670 switch (security_state) { 671 case SECURE: 672 setup_secure_context(ctx, ep); 673 break; 674 #if ENABLE_RME && IMAGE_BL31 675 case REALM: 676 setup_realm_context(ctx, ep); 677 break; 678 #endif 679 case NON_SECURE: 680 setup_ns_context(ctx, ep); 681 break; 682 default: 683 ERROR("Invalid security state\n"); 684 panic(); 685 break; 686 } 687 } 688 689 /******************************************************************************* 690 * Enable architecture extensions for EL3 execution. This function only updates 691 * registers in-place which are expected to either never change or be 692 * overwritten by el3_exit. Expects the core_pos of the current core as argument. 693 ******************************************************************************/ 694 void __no_pauth cm_manage_extensions_el3(unsigned int my_idx) 695 { 696 if (is_feat_pauth_supported()) { 697 pauth_init_enable_el3(); 698 } 699 700 #if IMAGE_BL31 701 if (is_feat_sve_supported()) { 702 sve_init_el3(); 703 } 704 705 if (is_feat_amu_supported()) { 706 amu_init_el3(my_idx); 707 } 708 709 if (is_feat_sme_supported()) { 710 sme_init_el3(); 711 } 712 713 if (is_feat_fgwte3_supported()) { 714 write_fgwte3_el3(FGWTE3_EL3_EARLY_INIT_VAL); 715 } 716 717 if (is_feat_mpam_supported()) { 718 mpam_init_el3(); 719 } 720 721 if (is_feat_cpa2_supported()) { 722 cpa2_enable_el3(); 723 } 724 725 pmuv3_init_el3(); 726 #endif /* IMAGE_BL31 */ 727 } 728 729 /****************************************************************************** 730 * Function to initialise the registers with the RESET values in the context 731 * memory, which are maintained per world. 732 ******************************************************************************/ 733 static void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx) 734 { 735 per_world_ctx->ctx_cptr_el3 = CPTR_EL3_RESET_VAL; 736 per_world_ctx->ctx_mpam3_el3 = MPAM3_EL3_RESET_VAL; 737 } 738 739 /******************************************************************************* 740 * Initialise per_world_context for Non-Secure world. 741 * This function enables the architecture extensions, which have same value 742 * across the cores for the non-secure world. 743 ******************************************************************************/ 744 static void manage_extensions_nonsecure_per_world(void) 745 { 746 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]); 747 748 #if IMAGE_BL31 749 if (is_feat_sme_supported()) { 750 sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 751 } 752 753 if (is_feat_sve_supported()) { 754 sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 755 } 756 757 if (is_feat_amu_supported()) { 758 amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 759 } 760 761 if (is_feat_sys_reg_trace_supported()) { 762 sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 763 } 764 765 if (is_feat_mpam_supported()) { 766 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 767 } 768 #endif /* IMAGE_BL31 */ 769 } 770 771 /******************************************************************************* 772 * Initialise per_world_context for Secure world. 773 * This function enables the architecture extensions, which have same value 774 * across the cores for the secure world. 775 ******************************************************************************/ 776 static void manage_extensions_secure_per_world(void) 777 { 778 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 779 780 #if IMAGE_BL31 781 if (is_feat_sme_supported()) { 782 783 if (ENABLE_SME_FOR_SWD) { 784 /* 785 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure 786 * SME, SVE, and FPU/SIMD context properly managed. 787 */ 788 sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 789 } else { 790 /* 791 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 792 * world can safely use the associated registers. 793 */ 794 sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 795 } 796 } 797 if (is_feat_sve_supported()) { 798 if (ENABLE_SVE_FOR_SWD) { 799 /* 800 * Enable SVE and FPU in secure context, SPM must ensure 801 * that the SVE and FPU register contexts are properly managed. 802 */ 803 sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 804 } else { 805 /* 806 * Disable SVE and FPU in secure context so non-secure world 807 * can safely use them. 808 */ 809 sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 810 } 811 } 812 813 /* NS can access this but Secure shouldn't */ 814 if (is_feat_sys_reg_trace_supported()) { 815 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 816 } 817 #endif /* IMAGE_BL31 */ 818 } 819 820 static void manage_extensions_realm_per_world(void) 821 { 822 #if ENABLE_RME && IMAGE_BL31 823 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_REALM]); 824 825 if (is_feat_sve_supported()) { 826 /* 827 * Enable SVE and FPU in realm context when it is enabled for NS. 828 * Realm manager must ensure that the SVE and FPU register 829 * contexts are properly managed. 830 */ 831 sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 832 } 833 834 /* NS can access this but Realm shouldn't */ 835 if (is_feat_sys_reg_trace_supported()) { 836 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 837 } 838 839 /* 840 * If SME/SME2 is supported and enabled for NS world, then disable trapping 841 * of SME instructions for Realm world. RMM will save/restore required 842 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE. 843 */ 844 if (is_feat_sme_supported()) { 845 sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 846 } 847 848 /* 849 * If FEAT_MPAM is supported and enabled, then disable trapping access 850 * to the MPAM registers for Realm world. Instead, RMM will configure 851 * the access to be trapped by itself so it can inject undefined aborts 852 * back to the Realm. 853 */ 854 if (is_feat_mpam_supported()) { 855 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 856 } 857 #endif /* ENABLE_RME && IMAGE_BL31 */ 858 } 859 860 void cm_manage_extensions_per_world(void) 861 { 862 manage_extensions_nonsecure_per_world(); 863 manage_extensions_secure_per_world(); 864 manage_extensions_realm_per_world(); 865 } 866 867 /******************************************************************************* 868 * Enable architecture extensions on first entry to Non-secure world. 869 ******************************************************************************/ 870 static void manage_extensions_nonsecure(cpu_context_t *ctx) 871 { 872 #if IMAGE_BL31 873 /* NOTE: registers are not context switched */ 874 if (is_feat_amu_supported()) { 875 amu_enable(ctx); 876 } 877 878 if (is_feat_sme_supported()) { 879 sme_enable(ctx); 880 } 881 882 if (is_feat_fgt2_supported()) { 883 fgt2_enable(ctx); 884 } 885 886 if (is_feat_debugv8p9_supported()) { 887 debugv8p9_extended_bp_wp_enable(ctx); 888 } 889 890 if (is_feat_spe_supported()) { 891 spe_enable_ns(ctx); 892 } 893 894 if (is_feat_trbe_supported()) { 895 if (check_if_trbe_disable_affected_core()) { 896 trbe_disable_ns(ctx); 897 } else { 898 trbe_enable_ns(ctx); 899 } 900 } 901 902 if (is_feat_brbe_supported()) { 903 brbe_enable(ctx); 904 } 905 #endif /* IMAGE_BL31 */ 906 } 907 908 #if INIT_UNUSED_NS_EL2 909 /******************************************************************************* 910 * Enable architecture extensions in-place at EL2 on first entry to Non-secure 911 * world when EL2 is empty and unused. 912 ******************************************************************************/ 913 static void manage_extensions_nonsecure_el2_unused(void) 914 { 915 #if IMAGE_BL31 916 if (is_feat_spe_supported()) { 917 spe_init_el2_unused(); 918 } 919 920 if (is_feat_amu_supported()) { 921 amu_init_el2_unused(); 922 } 923 924 if (is_feat_mpam_supported()) { 925 mpam_init_el2_unused(); 926 } 927 928 if (is_feat_trbe_supported()) { 929 trbe_init_el2_unused(); 930 } 931 932 if (is_feat_sys_reg_trace_supported()) { 933 sys_reg_trace_init_el2_unused(); 934 } 935 936 if (is_feat_trf_supported()) { 937 trf_init_el2_unused(); 938 } 939 940 pmuv3_init_el2_unused(); 941 942 if (is_feat_sve_supported()) { 943 sve_init_el2_unused(); 944 } 945 946 if (is_feat_sme_supported()) { 947 sme_init_el2_unused(); 948 } 949 950 if (is_feat_mops_supported() && is_feat_hcx_supported()) { 951 write_hcrx_el2(read_hcrx_el2() | HCRX_EL2_MSCEn_BIT); 952 } 953 954 if (is_feat_pauth_supported()) { 955 pauth_enable_el2(); 956 } 957 #endif /* IMAGE_BL31 */ 958 } 959 #endif /* INIT_UNUSED_NS_EL2 */ 960 961 /******************************************************************************* 962 * Enable architecture extensions on first entry to Secure world. 963 ******************************************************************************/ 964 static void manage_extensions_secure(cpu_context_t *ctx) 965 { 966 #if IMAGE_BL31 967 if (is_feat_sme_supported()) { 968 if (ENABLE_SME_FOR_SWD) { 969 /* 970 * Enable SME, SVE, FPU/SIMD in secure context, secure manager 971 * must ensure SME, SVE, and FPU/SIMD context properly managed. 972 */ 973 sme_init_el3(); 974 sme_enable(ctx); 975 } else { 976 /* 977 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 978 * world can safely use the associated registers. 979 */ 980 sme_disable(ctx); 981 } 982 } 983 984 if (is_feat_spe_supported()) { 985 spe_disable_secure(ctx); 986 } 987 988 if (is_feat_trbe_supported()) { 989 trbe_disable_secure(ctx); 990 } 991 #endif /* IMAGE_BL31 */ 992 } 993 994 /******************************************************************************* 995 * The following function initializes the cpu_context for the current CPU 996 * for first use, and sets the initial entrypoint state as specified by the 997 * entry_point_info structure. 998 ******************************************************************************/ 999 void cm_init_my_context(const entry_point_info_t *ep) 1000 { 1001 cpu_context_t *ctx; 1002 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 1003 cm_setup_context(ctx, ep); 1004 } 1005 1006 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */ 1007 static void init_nonsecure_el2_unused(cpu_context_t *ctx) 1008 { 1009 #if INIT_UNUSED_NS_EL2 1010 u_register_t hcr_el2 = HCR_RESET_VAL; 1011 u_register_t mdcr_el2; 1012 u_register_t scr_el3; 1013 1014 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1015 1016 /* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */ 1017 if ((scr_el3 & SCR_RW_BIT) != 0U) { 1018 hcr_el2 |= HCR_RW_BIT; 1019 } 1020 1021 write_hcr_el2(hcr_el2); 1022 1023 /* 1024 * Initialise CPTR_EL2 setting all fields rather than relying on the hw. 1025 * All fields have architecturally UNKNOWN reset values. 1026 */ 1027 write_cptr_el2(CPTR_EL2_RESET_VAL); 1028 1029 /* 1030 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on 1031 * reset and are set to zero except for field(s) listed below. 1032 * 1033 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of 1034 * Non-secure EL0 and EL1 accesses to the physical timer registers. 1035 * 1036 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of 1037 * Non-secure EL0 and EL1 accesses to the physical counter registers. 1038 */ 1039 write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT); 1040 1041 /* 1042 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally 1043 * UNKNOWN value. 1044 */ 1045 write_cntvoff_el2(0); 1046 1047 /* 1048 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1 1049 * respectively. 1050 */ 1051 write_vpidr_el2(read_midr_el1()); 1052 write_vmpidr_el2(read_mpidr_el1()); 1053 1054 /* 1055 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset. 1056 * 1057 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address 1058 * translation is disabled, cache maintenance operations depend on the 1059 * VMID. 1060 * 1061 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is 1062 * disabled. 1063 */ 1064 write_vttbr_el2(VTTBR_RESET_VAL & 1065 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) | 1066 (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 1067 1068 /* 1069 * Initialise MDCR_EL2, setting all fields rather than relying on hw. 1070 * Some fields are architecturally UNKNOWN on reset. 1071 * 1072 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System 1073 * register accesses to the Debug ROM registers are not trapped to EL2. 1074 * 1075 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register 1076 * accesses to the powerdown debug registers are not trapped to EL2. 1077 * 1078 * MDCR_EL2.TDA: Set to zero so that System register accesses to the 1079 * debug registers do not trap to EL2. 1080 * 1081 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to 1082 * EL2. 1083 */ 1084 mdcr_el2 = MDCR_EL2_RESET_VAL & 1085 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT | 1086 MDCR_EL2_TDE_BIT); 1087 1088 write_mdcr_el2(mdcr_el2); 1089 1090 /* 1091 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset. 1092 * 1093 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or 1094 * EL1 accesses to System registers do not trap to EL2. 1095 */ 1096 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 1097 1098 /* 1099 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on 1100 * reset. 1101 * 1102 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer 1103 * and prevent timer interrupts. 1104 */ 1105 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); 1106 1107 manage_extensions_nonsecure_el2_unused(); 1108 #endif /* INIT_UNUSED_NS_EL2 */ 1109 } 1110 1111 /******************************************************************************* 1112 * Prepare the CPU system registers for first entry into realm, secure, or 1113 * normal world. 1114 * 1115 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 1116 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 1117 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 1118 * For all entries, the EL1 registers are initialized from the cpu_context 1119 ******************************************************************************/ 1120 void cm_prepare_el3_exit(size_t security_state) 1121 { 1122 u_register_t sctlr_el2, scr_el3; 1123 cpu_context_t *ctx = cm_get_context(security_state); 1124 1125 assert(ctx != NULL); 1126 1127 if (security_state == NON_SECURE) { 1128 uint64_t el2_implemented = el_implemented(2); 1129 1130 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 1131 CTX_SCR_EL3); 1132 1133 if (el2_implemented != EL_IMPL_NONE) { 1134 1135 /* 1136 * If context is not being used for EL2, initialize 1137 * HCRX_EL2 with its init value here. 1138 */ 1139 if (is_feat_hcx_supported()) { 1140 write_hcrx_el2(HCRX_EL2_INIT_VAL); 1141 } 1142 1143 /* 1144 * Initialize Fine-grained trap registers introduced 1145 * by FEAT_FGT so all traps are initially disabled when 1146 * switching to EL2 or a lower EL, preventing undesired 1147 * behavior. 1148 */ 1149 if (is_feat_fgt_supported()) { 1150 /* 1151 * Initialize HFG*_EL2 registers with a default 1152 * value so legacy systems unaware of FEAT_FGT 1153 * do not get trapped due to their lack of 1154 * initialization for this feature. 1155 */ 1156 write_hfgitr_el2(HFGITR_EL2_INIT_VAL); 1157 write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL); 1158 write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL); 1159 } 1160 1161 /* Condition to ensure EL2 is being used. */ 1162 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 1163 /* Initialize SCTLR_EL2 register with reset value. */ 1164 sctlr_el2 = SCTLR_EL2_RES1; 1165 1166 /* 1167 * If workaround of errata 764081 for Cortex-A75 1168 * is used then set SCTLR_EL2.IESB to enable 1169 * Implicit Error Synchronization Barrier. 1170 */ 1171 if (errata_a75_764081_applies()) { 1172 sctlr_el2 |= SCTLR_IESB_BIT; 1173 } 1174 1175 write_sctlr_el2(sctlr_el2); 1176 } else { 1177 /* 1178 * (scr_el3 & SCR_HCE_BIT==0) 1179 * EL2 implemented but unused. 1180 */ 1181 init_nonsecure_el2_unused(ctx); 1182 } 1183 } 1184 1185 if (is_feat_fgwte3_supported()) { 1186 /* 1187 * TCR_EL3 and ACTLR_EL3 could be overwritten 1188 * by platforms and hence is locked a bit late. 1189 */ 1190 write_fgwte3_el3(FGWTE3_EL3_LATE_INIT_VAL); 1191 } 1192 } 1193 #if !CTX_INCLUDE_EL2_REGS || IMAGE_BL1 1194 /* Restore EL1 system registers, only when CTX_INCLUDE_EL2_REGS=0 */ 1195 cm_el1_sysregs_context_restore(security_state); 1196 #endif 1197 cm_set_next_eret_context(security_state); 1198 } 1199 1200 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 1201 1202 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx) 1203 { 1204 write_el2_ctx_fgt(ctx, hdfgrtr_el2, read_hdfgrtr_el2()); 1205 if (is_feat_amu_supported()) { 1206 write_el2_ctx_fgt(ctx, hafgrtr_el2, read_hafgrtr_el2()); 1207 } 1208 write_el2_ctx_fgt(ctx, hdfgwtr_el2, read_hdfgwtr_el2()); 1209 write_el2_ctx_fgt(ctx, hfgitr_el2, read_hfgitr_el2()); 1210 write_el2_ctx_fgt(ctx, hfgrtr_el2, read_hfgrtr_el2()); 1211 write_el2_ctx_fgt(ctx, hfgwtr_el2, read_hfgwtr_el2()); 1212 } 1213 1214 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx) 1215 { 1216 write_hdfgrtr_el2(read_el2_ctx_fgt(ctx, hdfgrtr_el2)); 1217 if (is_feat_amu_supported()) { 1218 write_hafgrtr_el2(read_el2_ctx_fgt(ctx, hafgrtr_el2)); 1219 } 1220 write_hdfgwtr_el2(read_el2_ctx_fgt(ctx, hdfgwtr_el2)); 1221 write_hfgitr_el2(read_el2_ctx_fgt(ctx, hfgitr_el2)); 1222 write_hfgrtr_el2(read_el2_ctx_fgt(ctx, hfgrtr_el2)); 1223 write_hfgwtr_el2(read_el2_ctx_fgt(ctx, hfgwtr_el2)); 1224 } 1225 1226 static void el2_sysregs_context_save_fgt2(el2_sysregs_t *ctx) 1227 { 1228 write_el2_ctx_fgt2(ctx, hdfgrtr2_el2, read_hdfgrtr2_el2()); 1229 write_el2_ctx_fgt2(ctx, hdfgwtr2_el2, read_hdfgwtr2_el2()); 1230 write_el2_ctx_fgt2(ctx, hfgitr2_el2, read_hfgitr2_el2()); 1231 write_el2_ctx_fgt2(ctx, hfgrtr2_el2, read_hfgrtr2_el2()); 1232 write_el2_ctx_fgt2(ctx, hfgwtr2_el2, read_hfgwtr2_el2()); 1233 } 1234 1235 static void el2_sysregs_context_restore_fgt2(el2_sysregs_t *ctx) 1236 { 1237 write_hdfgrtr2_el2(read_el2_ctx_fgt2(ctx, hdfgrtr2_el2)); 1238 write_hdfgwtr2_el2(read_el2_ctx_fgt2(ctx, hdfgwtr2_el2)); 1239 write_hfgitr2_el2(read_el2_ctx_fgt2(ctx, hfgitr2_el2)); 1240 write_hfgrtr2_el2(read_el2_ctx_fgt2(ctx, hfgrtr2_el2)); 1241 write_hfgwtr2_el2(read_el2_ctx_fgt2(ctx, hfgwtr2_el2)); 1242 } 1243 1244 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx) 1245 { 1246 u_register_t mpam_idr = read_mpamidr_el1(); 1247 1248 write_el2_ctx_mpam(ctx, mpam2_el2, read_mpam2_el2()); 1249 1250 /* 1251 * The context registers that we intend to save would be part of the 1252 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1. 1253 */ 1254 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1255 return; 1256 } 1257 1258 /* 1259 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if 1260 * MPAMIDR_HAS_HCR_BIT == 1. 1261 */ 1262 write_el2_ctx_mpam(ctx, mpamhcr_el2, read_mpamhcr_el2()); 1263 write_el2_ctx_mpam(ctx, mpamvpm0_el2, read_mpamvpm0_el2()); 1264 write_el2_ctx_mpam(ctx, mpamvpmv_el2, read_mpamvpmv_el2()); 1265 1266 /* 1267 * The number of MPAMVPM registers is implementation defined, their 1268 * number is stored in the MPAMIDR_EL1 register. 1269 */ 1270 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1271 case 7: 1272 write_el2_ctx_mpam(ctx, mpamvpm7_el2, read_mpamvpm7_el2()); 1273 __fallthrough; 1274 case 6: 1275 write_el2_ctx_mpam(ctx, mpamvpm6_el2, read_mpamvpm6_el2()); 1276 __fallthrough; 1277 case 5: 1278 write_el2_ctx_mpam(ctx, mpamvpm5_el2, read_mpamvpm5_el2()); 1279 __fallthrough; 1280 case 4: 1281 write_el2_ctx_mpam(ctx, mpamvpm4_el2, read_mpamvpm4_el2()); 1282 __fallthrough; 1283 case 3: 1284 write_el2_ctx_mpam(ctx, mpamvpm3_el2, read_mpamvpm3_el2()); 1285 __fallthrough; 1286 case 2: 1287 write_el2_ctx_mpam(ctx, mpamvpm2_el2, read_mpamvpm2_el2()); 1288 __fallthrough; 1289 case 1: 1290 write_el2_ctx_mpam(ctx, mpamvpm1_el2, read_mpamvpm1_el2()); 1291 break; 1292 } 1293 } 1294 1295 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx) 1296 { 1297 u_register_t mpam_idr = read_mpamidr_el1(); 1298 1299 write_mpam2_el2(read_el2_ctx_mpam(ctx, mpam2_el2)); 1300 1301 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1302 return; 1303 } 1304 1305 write_mpamhcr_el2(read_el2_ctx_mpam(ctx, mpamhcr_el2)); 1306 write_mpamvpm0_el2(read_el2_ctx_mpam(ctx, mpamvpm0_el2)); 1307 write_mpamvpmv_el2(read_el2_ctx_mpam(ctx, mpamvpmv_el2)); 1308 1309 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1310 case 7: 1311 write_mpamvpm7_el2(read_el2_ctx_mpam(ctx, mpamvpm7_el2)); 1312 __fallthrough; 1313 case 6: 1314 write_mpamvpm6_el2(read_el2_ctx_mpam(ctx, mpamvpm6_el2)); 1315 __fallthrough; 1316 case 5: 1317 write_mpamvpm5_el2(read_el2_ctx_mpam(ctx, mpamvpm5_el2)); 1318 __fallthrough; 1319 case 4: 1320 write_mpamvpm4_el2(read_el2_ctx_mpam(ctx, mpamvpm4_el2)); 1321 __fallthrough; 1322 case 3: 1323 write_mpamvpm3_el2(read_el2_ctx_mpam(ctx, mpamvpm3_el2)); 1324 __fallthrough; 1325 case 2: 1326 write_mpamvpm2_el2(read_el2_ctx_mpam(ctx, mpamvpm2_el2)); 1327 __fallthrough; 1328 case 1: 1329 write_mpamvpm1_el2(read_el2_ctx_mpam(ctx, mpamvpm1_el2)); 1330 break; 1331 } 1332 } 1333 1334 /* --------------------------------------------------------------------------- 1335 * The following registers are not added: 1336 * ICH_AP0R<n>_EL2 1337 * ICH_AP1R<n>_EL2 1338 * ICH_LR<n>_EL2 1339 * 1340 * NOTE: For a system with S-EL2 present but not enabled, accessing 1341 * ICC_SRE_EL2 is undefined from EL3. To workaround this change the 1342 * SCR_EL3.NS = 1 before accessing this register. 1343 * --------------------------------------------------------------------------- 1344 */ 1345 static void el2_sysregs_context_save_gic(el2_sysregs_t *ctx, uint32_t security_state) 1346 { 1347 u_register_t scr_el3 = read_scr_el3(); 1348 1349 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 1350 write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2()); 1351 #else 1352 write_scr_el3(scr_el3 | SCR_NS_BIT); 1353 isb(); 1354 1355 write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2()); 1356 1357 write_scr_el3(scr_el3); 1358 isb(); 1359 #endif 1360 write_el2_ctx_common(ctx, ich_hcr_el2, read_ich_hcr_el2()); 1361 1362 if (errata_ich_vmcr_el2_applies()) { 1363 if (security_state == SECURE) { 1364 write_scr_el3(scr_el3 & ~SCR_NS_BIT); 1365 } else { 1366 write_scr_el3(scr_el3 | SCR_NS_BIT); 1367 } 1368 isb(); 1369 } 1370 1371 write_el2_ctx_common(ctx, ich_vmcr_el2, read_ich_vmcr_el2()); 1372 1373 if (errata_ich_vmcr_el2_applies()) { 1374 write_scr_el3(scr_el3); 1375 isb(); 1376 } 1377 } 1378 1379 static void el2_sysregs_context_restore_gic(el2_sysregs_t *ctx, uint32_t security_state) 1380 { 1381 u_register_t scr_el3 = read_scr_el3(); 1382 1383 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 1384 write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2)); 1385 #else 1386 write_scr_el3(scr_el3 | SCR_NS_BIT); 1387 isb(); 1388 1389 write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2)); 1390 1391 write_scr_el3(scr_el3); 1392 isb(); 1393 #endif 1394 write_ich_hcr_el2(read_el2_ctx_common(ctx, ich_hcr_el2)); 1395 1396 if (errata_ich_vmcr_el2_applies()) { 1397 if (security_state == SECURE) { 1398 write_scr_el3(scr_el3 & ~SCR_NS_BIT); 1399 } else { 1400 write_scr_el3(scr_el3 | SCR_NS_BIT); 1401 } 1402 isb(); 1403 } 1404 1405 write_ich_vmcr_el2(read_el2_ctx_common(ctx, ich_vmcr_el2)); 1406 1407 if (errata_ich_vmcr_el2_applies()) { 1408 write_scr_el3(scr_el3); 1409 isb(); 1410 } 1411 } 1412 1413 /* ----------------------------------------------------- 1414 * The following registers are not added: 1415 * AMEVCNTVOFF0<n>_EL2 1416 * AMEVCNTVOFF1<n>_EL2 1417 * ----------------------------------------------------- 1418 */ 1419 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx) 1420 { 1421 write_el2_ctx_common(ctx, actlr_el2, read_actlr_el2()); 1422 write_el2_ctx_common(ctx, afsr0_el2, read_afsr0_el2()); 1423 write_el2_ctx_common(ctx, afsr1_el2, read_afsr1_el2()); 1424 write_el2_ctx_common(ctx, amair_el2, read_amair_el2()); 1425 write_el2_ctx_common(ctx, cnthctl_el2, read_cnthctl_el2()); 1426 write_el2_ctx_common(ctx, cntvoff_el2, read_cntvoff_el2()); 1427 write_el2_ctx_common(ctx, cptr_el2, read_cptr_el2()); 1428 if (CTX_INCLUDE_AARCH32_REGS) { 1429 write_el2_ctx_common(ctx, dbgvcr32_el2, read_dbgvcr32_el2()); 1430 } 1431 write_el2_ctx_common(ctx, elr_el2, read_elr_el2()); 1432 write_el2_ctx_common(ctx, esr_el2, read_esr_el2()); 1433 write_el2_ctx_common(ctx, far_el2, read_far_el2()); 1434 write_el2_ctx_common(ctx, hacr_el2, read_hacr_el2()); 1435 write_el2_ctx_common(ctx, hcr_el2, read_hcr_el2()); 1436 write_el2_ctx_common(ctx, hpfar_el2, read_hpfar_el2()); 1437 write_el2_ctx_common(ctx, hstr_el2, read_hstr_el2()); 1438 write_el2_ctx_common(ctx, mair_el2, read_mair_el2()); 1439 write_el2_ctx_common(ctx, mdcr_el2, read_mdcr_el2()); 1440 write_el2_ctx_common(ctx, sctlr_el2, read_sctlr_el2()); 1441 write_el2_ctx_common(ctx, spsr_el2, read_spsr_el2()); 1442 write_el2_ctx_common(ctx, sp_el2, read_sp_el2()); 1443 write_el2_ctx_common(ctx, tcr_el2, read_tcr_el2()); 1444 write_el2_ctx_common(ctx, tpidr_el2, read_tpidr_el2()); 1445 write_el2_ctx_common(ctx, vbar_el2, read_vbar_el2()); 1446 write_el2_ctx_common(ctx, vmpidr_el2, read_vmpidr_el2()); 1447 write_el2_ctx_common(ctx, vpidr_el2, read_vpidr_el2()); 1448 write_el2_ctx_common(ctx, vtcr_el2, read_vtcr_el2()); 1449 1450 write_el2_ctx_common_sysreg128(ctx, ttbr0_el2, read_ttbr0_el2()); 1451 write_el2_ctx_common_sysreg128(ctx, vttbr_el2, read_vttbr_el2()); 1452 } 1453 1454 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx) 1455 { 1456 write_actlr_el2(read_el2_ctx_common(ctx, actlr_el2)); 1457 write_afsr0_el2(read_el2_ctx_common(ctx, afsr0_el2)); 1458 write_afsr1_el2(read_el2_ctx_common(ctx, afsr1_el2)); 1459 write_amair_el2(read_el2_ctx_common(ctx, amair_el2)); 1460 write_cnthctl_el2(read_el2_ctx_common(ctx, cnthctl_el2)); 1461 write_cntvoff_el2(read_el2_ctx_common(ctx, cntvoff_el2)); 1462 write_cptr_el2(read_el2_ctx_common(ctx, cptr_el2)); 1463 if (CTX_INCLUDE_AARCH32_REGS) { 1464 write_dbgvcr32_el2(read_el2_ctx_common(ctx, dbgvcr32_el2)); 1465 } 1466 write_elr_el2(read_el2_ctx_common(ctx, elr_el2)); 1467 write_esr_el2(read_el2_ctx_common(ctx, esr_el2)); 1468 write_far_el2(read_el2_ctx_common(ctx, far_el2)); 1469 write_hacr_el2(read_el2_ctx_common(ctx, hacr_el2)); 1470 write_hcr_el2(read_el2_ctx_common(ctx, hcr_el2)); 1471 write_hpfar_el2(read_el2_ctx_common(ctx, hpfar_el2)); 1472 write_hstr_el2(read_el2_ctx_common(ctx, hstr_el2)); 1473 write_mair_el2(read_el2_ctx_common(ctx, mair_el2)); 1474 write_mdcr_el2(read_el2_ctx_common(ctx, mdcr_el2)); 1475 write_sctlr_el2(read_el2_ctx_common(ctx, sctlr_el2)); 1476 write_spsr_el2(read_el2_ctx_common(ctx, spsr_el2)); 1477 write_sp_el2(read_el2_ctx_common(ctx, sp_el2)); 1478 write_tcr_el2(read_el2_ctx_common(ctx, tcr_el2)); 1479 write_tpidr_el2(read_el2_ctx_common(ctx, tpidr_el2)); 1480 write_ttbr0_el2(read_el2_ctx_common(ctx, ttbr0_el2)); 1481 write_vbar_el2(read_el2_ctx_common(ctx, vbar_el2)); 1482 write_vmpidr_el2(read_el2_ctx_common(ctx, vmpidr_el2)); 1483 write_vpidr_el2(read_el2_ctx_common(ctx, vpidr_el2)); 1484 write_vtcr_el2(read_el2_ctx_common(ctx, vtcr_el2)); 1485 write_vttbr_el2(read_el2_ctx_common(ctx, vttbr_el2)); 1486 } 1487 1488 /******************************************************************************* 1489 * Save EL2 sysreg context 1490 ******************************************************************************/ 1491 void cm_el2_sysregs_context_save(uint32_t security_state) 1492 { 1493 cpu_context_t *ctx; 1494 el2_sysregs_t *el2_sysregs_ctx; 1495 1496 ctx = cm_get_context(security_state); 1497 assert(ctx != NULL); 1498 1499 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1500 1501 el2_sysregs_context_save_common(el2_sysregs_ctx); 1502 el2_sysregs_context_save_gic(el2_sysregs_ctx, security_state); 1503 1504 if (is_feat_mte2_supported()) { 1505 write_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2, read_tfsr_el2()); 1506 } 1507 1508 if (is_feat_mpam_supported()) { 1509 el2_sysregs_context_save_mpam(el2_sysregs_ctx); 1510 } 1511 1512 if (is_feat_fgt_supported()) { 1513 el2_sysregs_context_save_fgt(el2_sysregs_ctx); 1514 } 1515 1516 if (is_feat_fgt2_supported()) { 1517 el2_sysregs_context_save_fgt2(el2_sysregs_ctx); 1518 } 1519 1520 if (is_feat_ecv_v2_supported()) { 1521 write_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2, read_cntpoff_el2()); 1522 } 1523 1524 if (is_feat_vhe_supported()) { 1525 write_el2_ctx_vhe(el2_sysregs_ctx, contextidr_el2, 1526 read_contextidr_el2()); 1527 write_el2_ctx_vhe_sysreg128(el2_sysregs_ctx, ttbr1_el2, read_ttbr1_el2()); 1528 } 1529 1530 if (is_feat_ras_supported()) { 1531 write_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2, read_vdisr_el2()); 1532 write_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2, read_vsesr_el2()); 1533 } 1534 1535 if (is_feat_nv2_supported()) { 1536 write_el2_ctx_neve(el2_sysregs_ctx, vncr_el2, read_vncr_el2()); 1537 } 1538 1539 if (is_feat_trf_supported()) { 1540 write_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2, read_trfcr_el2()); 1541 } 1542 1543 if (is_feat_csv2_2_supported()) { 1544 write_el2_ctx_csv2_2(el2_sysregs_ctx, scxtnum_el2, 1545 read_scxtnum_el2()); 1546 } 1547 1548 if (is_feat_hcx_supported()) { 1549 write_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2, read_hcrx_el2()); 1550 } 1551 1552 if (is_feat_tcr2_supported()) { 1553 write_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2, read_tcr2_el2()); 1554 } 1555 1556 if (is_feat_s1pie_supported()) { 1557 write_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2, read_pire0_el2()); 1558 write_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2, read_pir_el2()); 1559 } 1560 1561 if (is_feat_s1poe_supported()) { 1562 write_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2, read_por_el2()); 1563 } 1564 1565 if (is_feat_brbe_supported()) { 1566 write_el2_ctx_brbe(el2_sysregs_ctx, brbcr_el2, read_brbcr_el2()); 1567 } 1568 1569 if (is_feat_s2pie_supported()) { 1570 write_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2, read_s2pir_el2()); 1571 } 1572 1573 if (is_feat_gcs_supported()) { 1574 write_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2, read_gcscr_el2()); 1575 write_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2, read_gcspr_el2()); 1576 } 1577 1578 if (is_feat_sctlr2_supported()) { 1579 write_el2_ctx_sctlr2(el2_sysregs_ctx, sctlr2_el2, read_sctlr2_el2()); 1580 } 1581 } 1582 1583 /******************************************************************************* 1584 * Restore EL2 sysreg context 1585 ******************************************************************************/ 1586 void cm_el2_sysregs_context_restore(uint32_t security_state) 1587 { 1588 cpu_context_t *ctx; 1589 el2_sysregs_t *el2_sysregs_ctx; 1590 1591 ctx = cm_get_context(security_state); 1592 assert(ctx != NULL); 1593 1594 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1595 1596 el2_sysregs_context_restore_common(el2_sysregs_ctx); 1597 el2_sysregs_context_restore_gic(el2_sysregs_ctx, security_state); 1598 1599 if (is_feat_mte2_supported()) { 1600 write_tfsr_el2(read_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2)); 1601 } 1602 1603 if (is_feat_mpam_supported()) { 1604 el2_sysregs_context_restore_mpam(el2_sysregs_ctx); 1605 } 1606 1607 if (is_feat_fgt_supported()) { 1608 el2_sysregs_context_restore_fgt(el2_sysregs_ctx); 1609 } 1610 1611 if (is_feat_fgt2_supported()) { 1612 el2_sysregs_context_restore_fgt2(el2_sysregs_ctx); 1613 } 1614 1615 if (is_feat_ecv_v2_supported()) { 1616 write_cntpoff_el2(read_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2)); 1617 } 1618 1619 if (is_feat_vhe_supported()) { 1620 write_contextidr_el2(read_el2_ctx_vhe(el2_sysregs_ctx, 1621 contextidr_el2)); 1622 write_ttbr1_el2(read_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2)); 1623 } 1624 1625 if (is_feat_ras_supported()) { 1626 write_vdisr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2)); 1627 write_vsesr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2)); 1628 } 1629 1630 if (is_feat_nv2_supported()) { 1631 write_vncr_el2(read_el2_ctx_neve(el2_sysregs_ctx, vncr_el2)); 1632 } 1633 1634 if (is_feat_trf_supported()) { 1635 write_trfcr_el2(read_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2)); 1636 } 1637 1638 if (is_feat_csv2_2_supported()) { 1639 write_scxtnum_el2(read_el2_ctx_csv2_2(el2_sysregs_ctx, 1640 scxtnum_el2)); 1641 } 1642 1643 if (is_feat_hcx_supported()) { 1644 write_hcrx_el2(read_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2)); 1645 } 1646 1647 if (is_feat_tcr2_supported()) { 1648 write_tcr2_el2(read_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2)); 1649 } 1650 1651 if (is_feat_s1pie_supported()) { 1652 write_pire0_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2)); 1653 write_pir_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2)); 1654 } 1655 1656 if (is_feat_s1poe_supported()) { 1657 write_por_el2(read_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2)); 1658 } 1659 1660 if (is_feat_s2pie_supported()) { 1661 write_s2pir_el2(read_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2)); 1662 } 1663 1664 if (is_feat_gcs_supported()) { 1665 write_gcscr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2)); 1666 write_gcspr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2)); 1667 } 1668 1669 if (is_feat_sctlr2_supported()) { 1670 write_sctlr2_el2(read_el2_ctx_sctlr2(el2_sysregs_ctx, sctlr2_el2)); 1671 } 1672 1673 if (is_feat_brbe_supported()) { 1674 write_brbcr_el2(read_el2_ctx_brbe(el2_sysregs_ctx, brbcr_el2)); 1675 } 1676 } 1677 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */ 1678 1679 /******************************************************************************* 1680 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS 1681 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly 1682 * updating EL1 and EL2 registers. Otherwise, it calls the generic 1683 * cm_prepare_el3_exit function. 1684 ******************************************************************************/ 1685 void cm_prepare_el3_exit_ns(void) 1686 { 1687 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 1688 #if ENABLE_ASSERTIONS 1689 cpu_context_t *ctx = cm_get_context(NON_SECURE); 1690 assert(ctx != NULL); 1691 1692 /* Assert that EL2 is used. */ 1693 u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1694 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && 1695 (el_implemented(2U) != EL_IMPL_NONE)); 1696 #endif /* ENABLE_ASSERTIONS */ 1697 1698 /* Restore EL2 sysreg contexts */ 1699 cm_el2_sysregs_context_restore(NON_SECURE); 1700 cm_set_next_eret_context(NON_SECURE); 1701 #else 1702 cm_prepare_el3_exit(NON_SECURE); 1703 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */ 1704 } 1705 1706 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) 1707 /******************************************************************************* 1708 * The next set of six functions are used by runtime services to save and restore 1709 * EL1 context on the 'cpu_context' structure for the specified security state. 1710 ******************************************************************************/ 1711 static void el1_sysregs_context_save(el1_sysregs_t *ctx) 1712 { 1713 write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1()); 1714 write_el1_ctx_common(ctx, elr_el1, read_elr_el1()); 1715 1716 #if (!ERRATA_SPECULATIVE_AT) 1717 write_el1_ctx_common(ctx, sctlr_el1, read_sctlr_el1()); 1718 write_el1_ctx_common(ctx, tcr_el1, read_tcr_el1()); 1719 #endif /* (!ERRATA_SPECULATIVE_AT) */ 1720 1721 write_el1_ctx_common(ctx, cpacr_el1, read_cpacr_el1()); 1722 write_el1_ctx_common(ctx, csselr_el1, read_csselr_el1()); 1723 write_el1_ctx_common(ctx, sp_el1, read_sp_el1()); 1724 write_el1_ctx_common(ctx, esr_el1, read_esr_el1()); 1725 write_el1_ctx_common(ctx, mair_el1, read_mair_el1()); 1726 write_el1_ctx_common(ctx, amair_el1, read_amair_el1()); 1727 write_el1_ctx_common(ctx, actlr_el1, read_actlr_el1()); 1728 write_el1_ctx_common(ctx, tpidr_el1, read_tpidr_el1()); 1729 write_el1_ctx_common(ctx, tpidr_el0, read_tpidr_el0()); 1730 write_el1_ctx_common(ctx, tpidrro_el0, read_tpidrro_el0()); 1731 write_el1_ctx_common(ctx, far_el1, read_far_el1()); 1732 write_el1_ctx_common(ctx, afsr0_el1, read_afsr0_el1()); 1733 write_el1_ctx_common(ctx, afsr1_el1, read_afsr1_el1()); 1734 write_el1_ctx_common(ctx, contextidr_el1, read_contextidr_el1()); 1735 write_el1_ctx_common(ctx, vbar_el1, read_vbar_el1()); 1736 write_el1_ctx_common(ctx, mdccint_el1, read_mdccint_el1()); 1737 write_el1_ctx_common(ctx, mdscr_el1, read_mdscr_el1()); 1738 1739 write_el1_ctx_common_sysreg128(ctx, par_el1, read_par_el1()); 1740 write_el1_ctx_common_sysreg128(ctx, ttbr0_el1, read_ttbr0_el1()); 1741 write_el1_ctx_common_sysreg128(ctx, ttbr1_el1, read_ttbr1_el1()); 1742 1743 if (CTX_INCLUDE_AARCH32_REGS) { 1744 /* Save Aarch32 registers */ 1745 write_el1_ctx_aarch32(ctx, spsr_abt, read_spsr_abt()); 1746 write_el1_ctx_aarch32(ctx, spsr_und, read_spsr_und()); 1747 write_el1_ctx_aarch32(ctx, spsr_irq, read_spsr_irq()); 1748 write_el1_ctx_aarch32(ctx, spsr_fiq, read_spsr_fiq()); 1749 write_el1_ctx_aarch32(ctx, dacr32_el2, read_dacr32_el2()); 1750 write_el1_ctx_aarch32(ctx, ifsr32_el2, read_ifsr32_el2()); 1751 } 1752 1753 /* Save counter-timer kernel control register */ 1754 write_el1_ctx_arch_timer(ctx, cntkctl_el1, read_cntkctl_el1()); 1755 #if NS_TIMER_SWITCH 1756 /* Save NS Timer registers */ 1757 write_el1_ctx_arch_timer(ctx, cntp_ctl_el0, read_cntp_ctl_el0()); 1758 write_el1_ctx_arch_timer(ctx, cntp_cval_el0, read_cntp_cval_el0()); 1759 write_el1_ctx_arch_timer(ctx, cntv_ctl_el0, read_cntv_ctl_el0()); 1760 write_el1_ctx_arch_timer(ctx, cntv_cval_el0, read_cntv_cval_el0()); 1761 #endif 1762 1763 if (is_feat_mte2_supported()) { 1764 write_el1_ctx_mte2(ctx, tfsre0_el1, read_tfsre0_el1()); 1765 write_el1_ctx_mte2(ctx, tfsr_el1, read_tfsr_el1()); 1766 write_el1_ctx_mte2(ctx, rgsr_el1, read_rgsr_el1()); 1767 write_el1_ctx_mte2(ctx, gcr_el1, read_gcr_el1()); 1768 } 1769 1770 if (is_feat_ras_supported()) { 1771 write_el1_ctx_ras(ctx, disr_el1, read_disr_el1()); 1772 } 1773 1774 if (is_feat_s1pie_supported()) { 1775 write_el1_ctx_s1pie(ctx, pire0_el1, read_pire0_el1()); 1776 write_el1_ctx_s1pie(ctx, pir_el1, read_pir_el1()); 1777 } 1778 1779 if (is_feat_s1poe_supported()) { 1780 write_el1_ctx_s1poe(ctx, por_el1, read_por_el1()); 1781 } 1782 1783 if (is_feat_s2poe_supported()) { 1784 write_el1_ctx_s2poe(ctx, s2por_el1, read_s2por_el1()); 1785 } 1786 1787 if (is_feat_tcr2_supported()) { 1788 write_el1_ctx_tcr2(ctx, tcr2_el1, read_tcr2_el1()); 1789 } 1790 1791 if (is_feat_trf_supported()) { 1792 write_el1_ctx_trf(ctx, trfcr_el1, read_trfcr_el1()); 1793 } 1794 1795 if (is_feat_csv2_2_supported()) { 1796 write_el1_ctx_csv2_2(ctx, scxtnum_el0, read_scxtnum_el0()); 1797 write_el1_ctx_csv2_2(ctx, scxtnum_el1, read_scxtnum_el1()); 1798 } 1799 1800 if (is_feat_gcs_supported()) { 1801 write_el1_ctx_gcs(ctx, gcscr_el1, read_gcscr_el1()); 1802 write_el1_ctx_gcs(ctx, gcscre0_el1, read_gcscre0_el1()); 1803 write_el1_ctx_gcs(ctx, gcspr_el1, read_gcspr_el1()); 1804 write_el1_ctx_gcs(ctx, gcspr_el0, read_gcspr_el0()); 1805 } 1806 1807 if (is_feat_the_supported()) { 1808 write_el1_ctx_the_sysreg128(ctx, rcwmask_el1, read_rcwmask_el1()); 1809 write_el1_ctx_the_sysreg128(ctx, rcwsmask_el1, read_rcwsmask_el1()); 1810 } 1811 1812 if (is_feat_sctlr2_supported()) { 1813 write_el1_ctx_sctlr2(ctx, sctlr2_el1, read_sctlr2_el1()); 1814 } 1815 1816 if (is_feat_ls64_accdata_supported()) { 1817 write_el1_ctx_ls64(ctx, accdata_el1, read_accdata_el1()); 1818 } 1819 } 1820 1821 static void el1_sysregs_context_restore(el1_sysregs_t *ctx) 1822 { 1823 write_spsr_el1(read_el1_ctx_common(ctx, spsr_el1)); 1824 write_elr_el1(read_el1_ctx_common(ctx, elr_el1)); 1825 1826 #if (!ERRATA_SPECULATIVE_AT) 1827 write_sctlr_el1(read_el1_ctx_common(ctx, sctlr_el1)); 1828 write_tcr_el1(read_el1_ctx_common(ctx, tcr_el1)); 1829 #endif /* (!ERRATA_SPECULATIVE_AT) */ 1830 1831 write_cpacr_el1(read_el1_ctx_common(ctx, cpacr_el1)); 1832 write_csselr_el1(read_el1_ctx_common(ctx, csselr_el1)); 1833 write_sp_el1(read_el1_ctx_common(ctx, sp_el1)); 1834 write_esr_el1(read_el1_ctx_common(ctx, esr_el1)); 1835 write_ttbr0_el1(read_el1_ctx_common(ctx, ttbr0_el1)); 1836 write_ttbr1_el1(read_el1_ctx_common(ctx, ttbr1_el1)); 1837 write_mair_el1(read_el1_ctx_common(ctx, mair_el1)); 1838 write_amair_el1(read_el1_ctx_common(ctx, amair_el1)); 1839 write_actlr_el1(read_el1_ctx_common(ctx, actlr_el1)); 1840 write_tpidr_el1(read_el1_ctx_common(ctx, tpidr_el1)); 1841 write_tpidr_el0(read_el1_ctx_common(ctx, tpidr_el0)); 1842 write_tpidrro_el0(read_el1_ctx_common(ctx, tpidrro_el0)); 1843 write_par_el1(read_el1_ctx_common(ctx, par_el1)); 1844 write_far_el1(read_el1_ctx_common(ctx, far_el1)); 1845 write_afsr0_el1(read_el1_ctx_common(ctx, afsr0_el1)); 1846 write_afsr1_el1(read_el1_ctx_common(ctx, afsr1_el1)); 1847 write_contextidr_el1(read_el1_ctx_common(ctx, contextidr_el1)); 1848 write_vbar_el1(read_el1_ctx_common(ctx, vbar_el1)); 1849 write_mdccint_el1(read_el1_ctx_common(ctx, mdccint_el1)); 1850 write_mdscr_el1(read_el1_ctx_common(ctx, mdscr_el1)); 1851 1852 if (CTX_INCLUDE_AARCH32_REGS) { 1853 /* Restore Aarch32 registers */ 1854 write_spsr_abt(read_el1_ctx_aarch32(ctx, spsr_abt)); 1855 write_spsr_und(read_el1_ctx_aarch32(ctx, spsr_und)); 1856 write_spsr_irq(read_el1_ctx_aarch32(ctx, spsr_irq)); 1857 write_spsr_fiq(read_el1_ctx_aarch32(ctx, spsr_fiq)); 1858 write_dacr32_el2(read_el1_ctx_aarch32(ctx, dacr32_el2)); 1859 write_ifsr32_el2(read_el1_ctx_aarch32(ctx, ifsr32_el2)); 1860 } 1861 1862 /* Restore counter-timer kernel control register */ 1863 write_cntkctl_el1(read_el1_ctx_arch_timer(ctx, cntkctl_el1)); 1864 #if NS_TIMER_SWITCH 1865 /* Restore NS Timer registers */ 1866 write_cntp_ctl_el0(read_el1_ctx_arch_timer(ctx, cntp_ctl_el0)); 1867 write_cntp_cval_el0(read_el1_ctx_arch_timer(ctx, cntp_cval_el0)); 1868 write_cntv_ctl_el0(read_el1_ctx_arch_timer(ctx, cntv_ctl_el0)); 1869 write_cntv_cval_el0(read_el1_ctx_arch_timer(ctx, cntv_cval_el0)); 1870 #endif 1871 1872 if (is_feat_mte2_supported()) { 1873 write_tfsre0_el1(read_el1_ctx_mte2(ctx, tfsre0_el1)); 1874 write_tfsr_el1(read_el1_ctx_mte2(ctx, tfsr_el1)); 1875 write_rgsr_el1(read_el1_ctx_mte2(ctx, rgsr_el1)); 1876 write_gcr_el1(read_el1_ctx_mte2(ctx, gcr_el1)); 1877 } 1878 1879 if (is_feat_ras_supported()) { 1880 write_disr_el1(read_el1_ctx_ras(ctx, disr_el1)); 1881 } 1882 1883 if (is_feat_s1pie_supported()) { 1884 write_pire0_el1(read_el1_ctx_s1pie(ctx, pire0_el1)); 1885 write_pir_el1(read_el1_ctx_s1pie(ctx, pir_el1)); 1886 } 1887 1888 if (is_feat_s1poe_supported()) { 1889 write_por_el1(read_el1_ctx_s1poe(ctx, por_el1)); 1890 } 1891 1892 if (is_feat_s2poe_supported()) { 1893 write_s2por_el1(read_el1_ctx_s2poe(ctx, s2por_el1)); 1894 } 1895 1896 if (is_feat_tcr2_supported()) { 1897 write_tcr2_el1(read_el1_ctx_tcr2(ctx, tcr2_el1)); 1898 } 1899 1900 if (is_feat_trf_supported()) { 1901 write_trfcr_el1(read_el1_ctx_trf(ctx, trfcr_el1)); 1902 } 1903 1904 if (is_feat_csv2_2_supported()) { 1905 write_scxtnum_el0(read_el1_ctx_csv2_2(ctx, scxtnum_el0)); 1906 write_scxtnum_el1(read_el1_ctx_csv2_2(ctx, scxtnum_el1)); 1907 } 1908 1909 if (is_feat_gcs_supported()) { 1910 write_gcscr_el1(read_el1_ctx_gcs(ctx, gcscr_el1)); 1911 write_gcscre0_el1(read_el1_ctx_gcs(ctx, gcscre0_el1)); 1912 write_gcspr_el1(read_el1_ctx_gcs(ctx, gcspr_el1)); 1913 write_gcspr_el0(read_el1_ctx_gcs(ctx, gcspr_el0)); 1914 } 1915 1916 if (is_feat_the_supported()) { 1917 write_rcwmask_el1(read_el1_ctx_the(ctx, rcwmask_el1)); 1918 write_rcwsmask_el1(read_el1_ctx_the(ctx, rcwsmask_el1)); 1919 } 1920 1921 if (is_feat_sctlr2_supported()) { 1922 write_sctlr2_el1(read_el1_ctx_sctlr2(ctx, sctlr2_el1)); 1923 } 1924 1925 if (is_feat_ls64_accdata_supported()) { 1926 write_accdata_el1(read_el1_ctx_ls64(ctx, accdata_el1)); 1927 } 1928 } 1929 1930 /******************************************************************************* 1931 * The next couple of functions are used by runtime services to save and restore 1932 * EL1 context on the 'cpu_context' structure for the specified security state. 1933 ******************************************************************************/ 1934 void cm_el1_sysregs_context_save(uint32_t security_state) 1935 { 1936 cpu_context_t *ctx; 1937 1938 ctx = cm_get_context(security_state); 1939 assert(ctx != NULL); 1940 1941 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 1942 1943 #if IMAGE_BL31 1944 if (security_state == SECURE) { 1945 PUBLISH_EVENT(cm_exited_secure_world); 1946 } else { 1947 PUBLISH_EVENT(cm_exited_normal_world); 1948 } 1949 #endif 1950 } 1951 1952 void cm_el1_sysregs_context_restore(uint32_t security_state) 1953 { 1954 cpu_context_t *ctx; 1955 1956 ctx = cm_get_context(security_state); 1957 assert(ctx != NULL); 1958 1959 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 1960 1961 #if IMAGE_BL31 1962 if (security_state == SECURE) { 1963 PUBLISH_EVENT(cm_entering_secure_world); 1964 } else { 1965 PUBLISH_EVENT(cm_entering_normal_world); 1966 } 1967 #endif 1968 } 1969 1970 #endif /* ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) */ 1971 1972 /******************************************************************************* 1973 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 1974 * given security state with the given entrypoint 1975 ******************************************************************************/ 1976 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 1977 { 1978 cpu_context_t *ctx; 1979 el3_state_t *state; 1980 1981 ctx = cm_get_context(security_state); 1982 assert(ctx != NULL); 1983 1984 /* Populate EL3 state so that ERET jumps to the correct entry */ 1985 state = get_el3state_ctx(ctx); 1986 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 1987 } 1988 1989 /******************************************************************************* 1990 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 1991 * pertaining to the given security state 1992 ******************************************************************************/ 1993 void cm_set_elr_spsr_el3(uint32_t security_state, 1994 uintptr_t entrypoint, uint32_t spsr) 1995 { 1996 cpu_context_t *ctx; 1997 el3_state_t *state; 1998 1999 ctx = cm_get_context(security_state); 2000 assert(ctx != NULL); 2001 2002 /* Populate EL3 state so that ERET jumps to the correct entry */ 2003 state = get_el3state_ctx(ctx); 2004 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 2005 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 2006 } 2007 2008 /******************************************************************************* 2009 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 2010 * pertaining to the given security state using the value and bit position 2011 * specified in the parameters. It preserves all other bits. 2012 ******************************************************************************/ 2013 void cm_write_scr_el3_bit(uint32_t security_state, 2014 uint32_t bit_pos, 2015 uint32_t value) 2016 { 2017 cpu_context_t *ctx; 2018 el3_state_t *state; 2019 u_register_t scr_el3; 2020 2021 ctx = cm_get_context(security_state); 2022 assert(ctx != NULL); 2023 2024 /* Ensure that the bit position is a valid one */ 2025 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 2026 2027 /* Ensure that the 'value' is only a bit wide */ 2028 assert(value <= 1U); 2029 2030 /* 2031 * Get the SCR_EL3 value from the cpu context, clear the desired bit 2032 * and set it to its new value. 2033 */ 2034 state = get_el3state_ctx(ctx); 2035 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 2036 scr_el3 &= ~(1UL << bit_pos); 2037 scr_el3 |= (u_register_t)value << bit_pos; 2038 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 2039 } 2040 2041 /******************************************************************************* 2042 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 2043 * given security state. 2044 ******************************************************************************/ 2045 u_register_t cm_get_scr_el3(uint32_t security_state) 2046 { 2047 const cpu_context_t *ctx; 2048 const el3_state_t *state; 2049 2050 ctx = cm_get_context(security_state); 2051 assert(ctx != NULL); 2052 2053 /* Populate EL3 state so that ERET jumps to the correct entry */ 2054 state = get_el3state_ctx(ctx); 2055 return read_ctx_reg(state, CTX_SCR_EL3); 2056 } 2057 2058 /******************************************************************************* 2059 * This function is used to program the context that's used for exception 2060 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 2061 * the required security state 2062 ******************************************************************************/ 2063 void cm_set_next_eret_context(uint32_t security_state) 2064 { 2065 cpu_context_t *ctx; 2066 2067 ctx = cm_get_context(security_state); 2068 assert(ctx != NULL); 2069 2070 cm_set_next_context(ctx); 2071 } 2072