1 /* 2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <stdbool.h> 10 #include <string.h> 11 12 #include <platform_def.h> 13 14 #include <arch.h> 15 #include <arch_helpers.h> 16 #include <arch_features.h> 17 #include <bl31/interrupt_mgmt.h> 18 #include <common/bl_common.h> 19 #include <common/debug.h> 20 #include <context.h> 21 #include <drivers/arm/gicv3.h> 22 #include <lib/cpus/cpu_ops.h> 23 #include <lib/cpus/errata.h> 24 #include <lib/el3_runtime/context_mgmt.h> 25 #include <lib/el3_runtime/cpu_data.h> 26 #include <lib/el3_runtime/pubsub_events.h> 27 #include <lib/extensions/amu.h> 28 #include <lib/extensions/brbe.h> 29 #include <lib/extensions/cpa2.h> 30 #include <lib/extensions/debug_v8p9.h> 31 #include <lib/extensions/fgt2.h> 32 #include <lib/extensions/idte3.h> 33 #include <lib/extensions/mpam.h> 34 #include <lib/extensions/pauth.h> 35 #include <lib/extensions/pmuv3.h> 36 #include <lib/extensions/sme.h> 37 #include <lib/extensions/spe.h> 38 #include <lib/extensions/sve.h> 39 #include <lib/extensions/sysreg128.h> 40 #include <lib/extensions/sys_reg_trace.h> 41 #include <lib/extensions/tcr2.h> 42 #include <lib/extensions/trbe.h> 43 #include <lib/extensions/trf.h> 44 #include <lib/utils.h> 45 46 #if ENABLE_FEAT_TWED 47 /* Make sure delay value fits within the range(0-15) */ 48 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); 49 #endif /* ENABLE_FEAT_TWED */ 50 51 per_world_context_t per_world_context[CPU_CONTEXT_NUM]; 52 PER_CPU_DEFINE(world_amu_regs_t, world_amu_ctx[CPU_CONTEXT_NUM]); 53 54 static void manage_extensions_nonsecure(cpu_context_t *ctx); 55 static void manage_extensions_secure(cpu_context_t *ctx); 56 57 /* 58 * Set up EL1 context unless there is something running at EL2 and we must 59 * context switch. 60 */ 61 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) 62 { 63 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) 64 u_register_t sctlr_elx, actlr_elx; 65 66 /* 67 * Initialise SCTLR_EL1 to the reset value corresponding to the target 68 * execution state setting all fields rather than relying on the hw. 69 * Some fields have architecturally UNKNOWN reset values and these are 70 * set to zero. 71 * 72 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 73 * 74 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 75 * required by PSCI specification) 76 */ 77 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; 78 if (GET_RW(ep->spsr) == MODE_RW_64) { 79 sctlr_elx |= SCTLR_EL1_RES1; 80 } else { 81 /* 82 * If the target execution state is AArch32 then the following 83 * fields need to be set. 84 * 85 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE 86 * instructions are not trapped to EL1. 87 * 88 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI 89 * instructions are not trapped to EL1. 90 * 91 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the 92 * CP15DMB, CP15DSB, and CP15ISB instructions. 93 */ 94 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT 95 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; 96 } 97 98 /* 99 * If workaround of errata 764081 for Cortex-A75 is used then set 100 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. 101 */ 102 if (errata_a75_764081_applies()) { 103 sctlr_elx |= SCTLR_IESB_BIT; 104 } 105 106 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 107 write_ctx_sctlr_el1_reg_errata(ctx, sctlr_elx); 108 109 /* 110 * Base the context ACTLR_EL1 on the current value, as it is 111 * implementation defined. The context restore process will write 112 * the value from the context to the actual register and can cause 113 * problems for processor cores that don't expect certain bits to 114 * be zero. 115 */ 116 actlr_elx = read_actlr_el1(); 117 write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx); 118 #endif /* (IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)) */ 119 } 120 121 /* 122 * The lower-EL context is zeroed so that no stale values leak to a world. 123 * It is assumed that an all-zero lower-EL context is good enough for it 124 * to boot correctly. However, there are very few registers where this 125 * is not true and some values need to be (re)created. 126 */ 127 static void setup_el2_context(cpu_context_t *ctx) 128 { 129 #if CTX_INCLUDE_EL2_REGS && IMAGE_BL31 130 el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx); 131 132 /* 133 * These bits are set in the gicv3 driver. Losing them (especially the 134 * SRE bit) is problematic for all worlds. Henceforth recreate them. 135 */ 136 u_register_t icc_sre_el2_val = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | 137 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; 138 write_el2_ctx_common(el2_ctx, icc_sre_el2, icc_sre_el2_val); 139 140 /* 141 * The actlr_el2 register can be initialized in platform's reset handler 142 * and it may contain access control bits (e.g. CLUSTERPMUEN bit). 143 */ 144 write_el2_ctx_common(el2_ctx, actlr_el2, read_actlr_el2()); 145 146 write_el2_ctx_common(el2_ctx, sctlr_el2, SCTLR_EL2_RES1); 147 148 /* 149 * Initialize registers with known disabled init values. 150 * 151 * As their value is zeroed at init, there is a chance that this can 152 * lead to unexpected behavior in lower ELs that do not initialise these 153 * registers themselves. 154 * 155 * NOTE: this is duplicate, and mutually exclusive, of the same writes 156 * to setup_el2_regs(). They must be kept in sync. 157 * 158 * Some registers' disabled init value is all zeroes which is carried 159 * forward from init. These are: 160 * * HCRX_EL2 161 * * TCR2_EL2 162 * * HAFGRTR_EL2 163 */ 164 if (is_feat_fgt_supported()) { 165 write_el2_ctx_fgt(el2_ctx, hfgitr_el2, HFGITR_EL2_INIT_VAL); 166 write_el2_ctx_fgt(el2_ctx, hfgrtr_el2, HFGRTR_EL2_INIT_VAL); 167 write_el2_ctx_fgt(el2_ctx, hfgwtr_el2, HFGWTR_EL2_INIT_VAL); 168 } 169 #endif 170 } 171 172 /* 173 * Write safe values into EL2 registers that reset into an UNKNOWN state. 174 * 175 * As their value is UNKNOWN at init, there is a chance that this can lead to 176 * unexpected behavior in lower ELs that do not initialise these registers 177 * themselves. 178 * 179 * NOTE: this is duplicate, and mutually exclusive, of the same writes to 180 * setup_el2_context(). They must be kept in sync. 181 */ 182 static void setup_el2_regs(void) 183 { 184 #if !(CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 185 if (is_feat_hcx_supported()) { 186 write_hcrx_el2(HCRX_EL2_INIT_VAL); 187 } 188 189 if (is_feat_fgt_supported()) { 190 write_hfgitr_el2(HFGITR_EL2_INIT_VAL); 191 write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL); 192 write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL); 193 } 194 195 if (is_feat_tcr2_supported()) { 196 write_tcr2_el2(TCR2_EL2_INIT_VAL); 197 } 198 199 if (is_feat_fgt_supported() && is_feat_amu_supported()) { 200 write_hafgrtr_el2(HAFGRTR_EL2_INIT_VAL); 201 } 202 #endif 203 } 204 205 /****************************************************************************** 206 * This function performs initializations that are specific to SECURE state 207 * and updates the cpu context specified by 'ctx'. 208 *****************************************************************************/ 209 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) 210 { 211 u_register_t scr_el3; 212 el3_state_t *state; 213 214 state = get_el3state_ctx(ctx); 215 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 216 217 #if defined(IMAGE_BL31) && !defined(SPD_spmd) 218 /* 219 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 220 * indicated by the interrupt routing model for BL31. 221 */ 222 scr_el3 |= get_scr_el3_from_routing_model(SECURE); 223 #endif 224 225 /* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */ 226 if (is_feat_mte2_supported()) { 227 scr_el3 |= SCR_ATA_BIT; 228 } 229 230 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 231 232 manage_extensions_secure(ctx); 233 } 234 235 #if ENABLE_RME && IMAGE_BL31 236 /****************************************************************************** 237 * This function performs initializations that are specific to REALM state 238 * and updates the cpu context specified by 'ctx'. 239 * 240 * NOTE: any changes to this function must be verified by an RMMD maintainer. 241 *****************************************************************************/ 242 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) 243 { 244 u_register_t scr_el3; 245 el3_state_t *state; 246 el2_sysregs_t *el2_ctx; 247 248 state = get_el3state_ctx(ctx); 249 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 250 el2_ctx = get_el2_sysregs_ctx(ctx); 251 252 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT; 253 254 write_el2_ctx_common(el2_ctx, spsr_el2, SPSR_EL2_REALM); 255 256 /* CSV2 version 2 and above */ 257 if (is_feat_csv2_2_supported()) { 258 /* Enable access to the SCXTNUM_ELx registers. */ 259 scr_el3 |= SCR_EnSCXT_BIT; 260 } 261 262 if (is_feat_sctlr2_supported()) { 263 /* Set the SCTLR2En bit in SCR_EL3 to enable access to 264 * SCTLR2_ELx registers. 265 */ 266 scr_el3 |= SCR_SCTLR2En_BIT; 267 } 268 269 if (is_feat_d128_supported()) { 270 /* 271 * Set the D128En bit in SCR_EL3 to enable access to 128-bit 272 * versions of TTBR0_EL1, TTBR1_EL1, RCWMASK_EL1, RCWSMASK_EL1, 273 * PAR_EL1 and TTBR1_EL2, TTBR0_EL2 and VTTBR_EL2 registers. 274 */ 275 scr_el3 |= SCR_D128En_BIT; 276 } 277 278 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 279 280 if (is_feat_fgt2_supported()) { 281 fgt2_enable(ctx); 282 } 283 284 if (is_feat_debugv8p9_supported()) { 285 debugv8p9_extended_bp_wp_enable(ctx); 286 } 287 288 if (is_feat_brbe_supported()) { 289 brbe_enable(ctx); 290 } 291 292 /* 293 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world. 294 */ 295 if (is_feat_sme_supported()) { 296 sme_enable(ctx); 297 } 298 299 if (is_feat_spe_supported()) { 300 spe_disable_realm(ctx); 301 } 302 303 if (is_feat_trbe_supported()) { 304 trbe_disable_realm(ctx); 305 } 306 } 307 #endif /* ENABLE_RME && IMAGE_BL31 */ 308 309 /****************************************************************************** 310 * This function performs initializations that are specific to NON-SECURE state 311 * and updates the cpu context specified by 'ctx'. 312 *****************************************************************************/ 313 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) 314 { 315 u_register_t scr_el3; 316 el3_state_t *state; 317 318 state = get_el3state_ctx(ctx); 319 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 320 321 /* SCR_NS: Set the NS bit */ 322 scr_el3 |= SCR_NS_BIT; 323 324 /* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */ 325 if (is_feat_mte2_supported()) { 326 scr_el3 |= SCR_ATA_BIT; 327 } 328 329 /* 330 * Pointer Authentication feature, if present, is always enabled by 331 * default for Non secure lower exception levels. We do not have an 332 * explicit flag to set it. To prevent the leakage between the worlds 333 * during world switch, we enable it only for the non-secure world. 334 * 335 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower 336 * exception levels of secure and realm worlds. 337 * 338 * If the Secure/realm world wants to use pointer authentication, 339 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case 340 * it will be enabled globally for all the contexts. 341 * 342 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 343 * other than EL3 344 * 345 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 346 * than EL3 347 */ 348 if (!is_ctx_pauth_supported()) { 349 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 350 } 351 352 #if HANDLE_EA_EL3_FIRST_NS 353 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ 354 scr_el3 |= SCR_EA_BIT; 355 #endif 356 357 #if RAS_TRAP_NS_ERR_REC_ACCESS 358 /* 359 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR 360 * and RAS ERX registers from EL1 and EL2(from any security state) 361 * are trapped to EL3. 362 * Set here to trap only for NS EL1/EL2 363 */ 364 scr_el3 |= SCR_TERR_BIT; 365 #endif 366 367 /* CSV2 version 2 and above */ 368 if (is_feat_csv2_2_supported()) { 369 /* Enable access to the SCXTNUM_ELx registers. */ 370 scr_el3 |= SCR_EnSCXT_BIT; 371 } 372 373 #ifdef IMAGE_BL31 374 /* 375 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as 376 * indicated by the interrupt routing model for BL31. 377 */ 378 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); 379 #endif 380 381 if (is_feat_the_supported()) { 382 /* Set the RCWMASKEn bit in SCR_EL3 to enable access to 383 * RCWMASK_EL1 and RCWSMASK_EL1 registers. 384 */ 385 scr_el3 |= SCR_RCWMASKEn_BIT; 386 } 387 388 if (is_feat_sctlr2_supported()) { 389 /* Set the SCTLR2En bit in SCR_EL3 to enable access to 390 * SCTLR2_ELx registers. 391 */ 392 scr_el3 |= SCR_SCTLR2En_BIT; 393 } 394 395 if (is_feat_d128_supported()) { 396 /* Set the D128En bit in SCR_EL3 to enable access to 128-bit 397 * versions of TTBR0_EL1, TTBR1_EL1, RCWMASK_EL1, RCWSMASK_EL1, 398 * PAR_EL1 and TTBR1_EL2, TTBR0_EL2 and VTTBR_EL2 registers. 399 */ 400 scr_el3 |= SCR_D128En_BIT; 401 } 402 403 if (is_feat_fpmr_supported()) { 404 /* Set the EnFPM bit in SCR_EL3 to enable access to FPMR 405 * register. 406 */ 407 scr_el3 |= SCR_EnFPM_BIT; 408 } 409 410 if (is_feat_aie_supported()) { 411 /* Set the AIEn bit in SCR_EL3 to enable access to (A)MAIR2 412 * system registers from NS world. 413 */ 414 scr_el3 |= SCR_AIEn_BIT; 415 } 416 417 if (is_feat_pfar_supported()) { 418 /* Set the PFAREn bit in SCR_EL3 to enable access to the PFAR 419 * system registers from NS world. 420 */ 421 scr_el3 |= SCR_PFAREn_BIT; 422 } 423 424 if (is_feat_hdbss_supported()) { 425 /* Set the HDBSSEn bit to enable access to hdbssbr_el2 and 426 * hdbssprod_el2 427 */ 428 scr_el3 |= SCR_HDBSSEn_BIT; 429 } 430 431 if (is_feat_hacdbs_supported()) { 432 /* Set the HACDBSEn bit to enable access to hacdbsbr_el2 and 433 * hacdbscons_el2 434 */ 435 scr_el3 |= SCR_HACDBSEn_BIT; 436 } 437 438 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 439 440 manage_extensions_nonsecure(ctx); 441 } 442 443 static inline ddc_cap_t read_ddc_el0 (void) 444 { 445 ddc_cap_t val = NULL; 446 #if ENABLE_FEAT_MORELLO 447 __asm__ volatile ("msr spsel, #1 \n" 448 "mrs %0, ddc \n" 449 "msr spsel, #0 \n" 450 : "=C"(val) 451 : 452 : "memory" 453 ); 454 #endif 455 return val; 456 } 457 458 /******************************************************************************* 459 * The following function performs initialization of the cpu_context 'ctx' 460 * for first use that is common to all security states, and sets the 461 * initial entrypoint state as specified by the entry_point_info structure. 462 * 463 * The EE and ST attributes are used to configure the endianness and secure 464 * timer availability for the new execution context. 465 ******************************************************************************/ 466 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) 467 { 468 u_register_t scr_el3; 469 u_register_t mdcr_el3; 470 el3_state_t *state; 471 gp_regs_t *gp_regs; 472 473 state = get_el3state_ctx(ctx); 474 475 /* Clear any residual register values from the context */ 476 zeromem(ctx, sizeof(*ctx)); 477 478 /* Start with a clean SCR_EL3 copy as all relevant values are set */ 479 scr_el3 = SCR_RESET_VAL; 480 481 /* 482 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at 483 * EL2, EL1 and EL0 are not trapped to EL3. 484 * 485 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at 486 * EL2, EL1 and EL0 are not trapped to EL3. 487 * 488 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from 489 * both Security states and both Execution states. 490 * 491 * SCR_EL3.SIF: Set to one to disable secure instruction execution from 492 * Non-secure memory. 493 */ 494 scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT); 495 496 scr_el3 |= SCR_SIF_BIT; 497 498 /* 499 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next 500 * Exception level as specified by SPSR. 501 */ 502 if (GET_RW(ep->spsr) == MODE_RW_64) { 503 scr_el3 |= SCR_RW_BIT; 504 } 505 506 /* 507 * SCR_EL3.HCE: Enable HVC instructions if next execution state is 508 * AArch64 and next EL is EL2, or if next execution state is AArch32 and 509 * next mode is Hyp. 510 */ 511 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) 512 || ((GET_RW(ep->spsr) != MODE_RW_64) 513 && (GET_M32(ep->spsr) == MODE32_hyp))) { 514 scr_el3 |= SCR_HCE_BIT; 515 } 516 517 /* 518 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical 519 * Secure timer registers to EL3, from AArch64 state only, if specified 520 * by the entrypoint attributes. If SEL2 is present and enabled, the ST 521 * bit always behaves as 1 (i.e. secure physical timer register access 522 * is not trapped) 523 */ 524 if (EP_GET_ST(ep->h.attr) != 0U) { 525 scr_el3 |= SCR_ST_BIT; 526 } 527 528 /* 529 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting 530 * SCR_EL3.HXEn. 531 */ 532 if (is_feat_hcx_supported()) { 533 scr_el3 |= SCR_HXEn_BIT; 534 } 535 536 /* 537 * If FEAT_LS64_ACCDATA is enabled, enable access to ACCDATA_EL1 by 538 * setting SCR_EL3.ADEn and allow the ST64BV0 instruction by setting 539 * SCR_EL3.EnAS0. 540 */ 541 if (is_feat_ls64_accdata_supported()) { 542 scr_el3 |= SCR_ADEn_BIT | SCR_EnAS0_BIT; 543 } 544 545 /* 546 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS 547 * registers are trapped to EL3. 548 */ 549 if (is_feat_rng_trap_supported()) { 550 scr_el3 |= SCR_TRNDR_BIT; 551 } 552 553 #if FAULT_INJECTION_SUPPORT 554 /* Enable fault injection from lower ELs */ 555 scr_el3 |= SCR_FIEN_BIT; 556 #endif 557 558 /* 559 * Enable Pointer Authentication globally for all the worlds. 560 * 561 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs 562 * other than EL3 563 * 564 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other 565 * than EL3 566 */ 567 if (is_ctx_pauth_supported()) { 568 scr_el3 |= SCR_API_BIT | SCR_APK_BIT; 569 } 570 571 /* 572 * SCR_EL3.PIEN: Enable permission indirection and overlay 573 * registers for AArch64 if present. 574 */ 575 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) { 576 scr_el3 |= SCR_PIEN_BIT; 577 } 578 579 /* SCR_EL3.GCSEn: Enable GCS registers. */ 580 if (is_feat_gcs_supported()) { 581 scr_el3 |= SCR_GCSEn_BIT; 582 } 583 584 /* SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps */ 585 if (is_feat_fgt_supported()) { 586 scr_el3 |= SCR_FGTEN_BIT; 587 } 588 589 /* SCR_EL3.ECVEn: Do not trap the CNTPOFF_EL2 register */ 590 if (is_feat_ecv_supported()) { 591 scr_el3 |= SCR_ECVEN_BIT; 592 } 593 594 /* Enable WFE trap delay in SCR_EL3 if supported and configured */ 595 if (is_feat_twed_supported()) { 596 /* Set delay in SCR_EL3 */ 597 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); 598 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) 599 << SCR_TWEDEL_SHIFT); 600 601 /* Enable WFE delay */ 602 scr_el3 |= SCR_TWEDEn_BIT; 603 } 604 605 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2 606 /* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */ 607 if (is_feat_sel2_supported()) { 608 scr_el3 |= SCR_EEL2_BIT; 609 } 610 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */ 611 612 if (is_feat_mec_supported()) { 613 scr_el3 |= SCR_MECEn_BIT; 614 } 615 616 /* 617 * Populate EL3 state so that we've the right context 618 * before doing ERET 619 */ 620 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 621 write_ctx_reg(state, CTX_ELR_EL3, ep->pc); 622 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); 623 624 /* Start with a clean MDCR_EL3 copy as all relevant values are set */ 625 mdcr_el3 = MDCR_EL3_RESET_VAL; 626 627 /* --------------------------------------------------------------------- 628 * Initialise MDCR_EL3, setting all fields rather than relying on hw. 629 * Some fields are architecturally UNKNOWN on reset. 630 * 631 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug. 632 * Debug exceptions, other than Breakpoint Instruction exceptions, are 633 * disabled from all ELs in Secure state. 634 * 635 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted 636 * privileged debug from S-EL1. 637 * 638 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register 639 * access to the powerdown debug registers do not trap to EL3. 640 * 641 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the 642 * debug registers, other than those registers that are controlled by 643 * MDCR_EL3.TDOSA. 644 */ 645 mdcr_el3 |= MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE); 646 mdcr_el3 &= ~(MDCR_TDA_BIT | MDCR_TDOSA_BIT); 647 648 /* MDCR_EL3.EnSTEPOP: allow access to MDSTEPOP_EL1 */ 649 if (is_feat_step2_supported()) { 650 mdcr_el3 |= MDCR_EnSTEPOP_BIT; 651 } 652 653 write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3); 654 655 #if IMAGE_BL31 656 /* Enable FEAT_TRF for Non-Secure and prohibit for Secure state. */ 657 if (is_feat_trf_supported()) { 658 trf_enable(ctx); 659 } 660 661 if (is_feat_tcr2_supported()) { 662 tcr2_enable(ctx); 663 } 664 665 pmuv3_enable(ctx); 666 667 if (is_feat_idte3_supported()) { 668 idte3_enable(ctx); 669 } 670 #endif /* IMAGE_BL31 */ 671 672 setup_el2_context(ctx); 673 674 setup_el1_context(ctx, ep); 675 676 if (is_feat_morello_supported()) { 677 ctx->ddc_el0 = read_ddc_el0(); 678 } 679 680 /* 681 * Store the X0-X7 value from the entrypoint into the context 682 * Use memcpy as we are in control of the layout of the structures 683 */ 684 gp_regs = get_gpregs_ctx(ctx); 685 memcpy((void *)gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); 686 } 687 688 /******************************************************************************* 689 * Context management library initialization routine. This library is used by 690 * runtime services to share pointers to 'cpu_context' structures for secure 691 * non-secure and realm states. Management of the structures and their associated 692 * memory is not done by the context management library e.g. the PSCI service 693 * manages the cpu context used for entry from and exit to the non-secure state. 694 * The Secure payload dispatcher service manages the context(s) corresponding to 695 * the secure state. It also uses this library to get access to the non-secure 696 * state cpu context pointers. 697 * Lastly, this library provides the API to make SP_EL3 point to the cpu context 698 * which will be used for programming an entry into a lower EL. The same context 699 * will be used to save state upon exception entry from that EL. 700 ******************************************************************************/ 701 void __init cm_init(void) 702 { 703 /* 704 * The context management library has only global data to initialize, but 705 * that will be done when the BSS is zeroed out. 706 */ 707 } 708 709 /******************************************************************************* 710 * This is the high-level function used to initialize the cpu_context 'ctx' for 711 * first use. It performs initializations that are common to all security states 712 * and initializations specific to the security state specified in 'ep' 713 ******************************************************************************/ 714 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 715 { 716 size_t security_state; 717 718 assert(ctx != NULL); 719 720 /* 721 * Perform initializations that are common 722 * to all security states 723 */ 724 setup_context_common(ctx, ep); 725 726 security_state = GET_SECURITY_STATE(ep->h.attr); 727 728 /* Perform security state specific initializations */ 729 switch (security_state) { 730 case SECURE: 731 setup_secure_context(ctx, ep); 732 break; 733 #if ENABLE_RME && IMAGE_BL31 734 case REALM: 735 setup_realm_context(ctx, ep); 736 break; 737 #endif 738 case NON_SECURE: 739 setup_ns_context(ctx, ep); 740 break; 741 default: 742 ERROR("Invalid security state\n"); 743 panic(); 744 break; 745 } 746 } 747 748 /******************************************************************************* 749 * Enable architecture extensions for EL3 execution. This function only updates 750 * registers in-place which are expected to either never change or be 751 * overwritten by el3_exit. Expects the core_pos of the current core as argument. 752 ******************************************************************************/ 753 void __no_pauth cm_manage_extensions_el3(unsigned int my_idx) 754 { 755 if (is_feat_pauth_supported()) { 756 pauth_init_enable_el3(); 757 } 758 759 #if IMAGE_BL31 760 if (is_feat_sve_supported()) { 761 sve_init_el3(); 762 } 763 764 if (is_feat_amu_supported()) { 765 amu_init_el3(my_idx); 766 } 767 768 if (is_feat_sme_supported()) { 769 sme_init_el3(); 770 } 771 772 if (is_feat_mpam_supported()) { 773 mpam_init_el3(); 774 } 775 776 if (is_feat_cpa2_supported()) { 777 cpa2_enable_el3(); 778 } 779 780 pmuv3_init_el3(); 781 782 /* NOTE: must be done last, makes the configuration immutable */ 783 if (is_feat_fgwte3_supported()) { 784 write_fgwte3_el3(FGWTE3_EL3_EARLY_INIT_VAL); 785 } 786 #endif /* IMAGE_BL31 */ 787 } 788 789 /****************************************************************************** 790 * Function to initialise the registers with the RESET values in the context 791 * memory, which are maintained per world. 792 ******************************************************************************/ 793 static void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx) 794 { 795 per_world_ctx->ctx_cptr_el3 = CPTR_EL3_RESET_VAL; 796 per_world_ctx->ctx_mpam3_el3 = MPAM3_EL3_RESET_VAL; 797 } 798 799 /******************************************************************************* 800 * Initialise per_world_context for Non-Secure world. 801 * This function enables the architecture extensions, which have same value 802 * across the cores for the non-secure world. 803 ******************************************************************************/ 804 static void manage_extensions_nonsecure_per_world(void) 805 { 806 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]); 807 808 #if IMAGE_BL31 809 if (is_feat_sme_supported()) { 810 sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 811 } 812 813 if (is_feat_sve_supported()) { 814 sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 815 } 816 817 if (is_feat_amu_supported()) { 818 amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 819 } 820 821 if (is_feat_sys_reg_trace_supported()) { 822 sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 823 } 824 825 if (is_feat_mpam_supported()) { 826 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_NS]); 827 } 828 829 if (is_feat_idte3_supported()) { 830 idte3_init_cached_idregs_per_world(CPU_CONTEXT_NS); 831 } 832 #endif /* IMAGE_BL31 */ 833 } 834 835 /******************************************************************************* 836 * Initialise per_world_context for Secure world. 837 * This function enables the architecture extensions, which have same value 838 * across the cores for the secure world. 839 ******************************************************************************/ 840 static void manage_extensions_secure_per_world(void) 841 { 842 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 843 844 #if IMAGE_BL31 845 if (is_feat_sme_supported()) { 846 847 if (ENABLE_SME_FOR_SWD) { 848 /* 849 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure 850 * SME, SVE, and FPU/SIMD context properly managed. 851 */ 852 sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 853 } else { 854 /* 855 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 856 * world can safely use the associated registers. 857 */ 858 sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 859 } 860 } 861 if (is_feat_sve_supported()) { 862 if (ENABLE_SVE_FOR_SWD) { 863 /* 864 * Enable SVE and FPU in secure context, SPM must ensure 865 * that the SVE and FPU register contexts are properly managed. 866 */ 867 sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 868 } else { 869 /* 870 * Disable SVE and FPU in secure context so non-secure world 871 * can safely use them. 872 */ 873 sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 874 } 875 } 876 877 /* NS can access this but Secure shouldn't */ 878 if (is_feat_sys_reg_trace_supported()) { 879 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]); 880 } 881 882 if (is_feat_idte3_supported()) { 883 idte3_init_cached_idregs_per_world(CPU_CONTEXT_SECURE); 884 } 885 #endif /* IMAGE_BL31 */ 886 } 887 888 static void manage_extensions_realm_per_world(void) 889 { 890 #if ENABLE_RME && IMAGE_BL31 891 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_REALM]); 892 893 if (is_feat_sve_supported()) { 894 /* 895 * Enable SVE and FPU in realm context when it is enabled for NS. 896 * Realm manager must ensure that the SVE and FPU register 897 * contexts are properly managed. 898 */ 899 sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 900 } 901 902 /* NS can access this but Realm shouldn't */ 903 if (is_feat_sys_reg_trace_supported()) { 904 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 905 } 906 907 /* 908 * If SME/SME2 is supported and enabled for NS world, then disable trapping 909 * of SME instructions for Realm world. RMM will save/restore required 910 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE. 911 */ 912 if (is_feat_sme_supported()) { 913 sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 914 } 915 916 /* 917 * If FEAT_MPAM is supported and enabled, then disable trapping access 918 * to the MPAM registers for Realm world. Instead, RMM will configure 919 * the access to be trapped by itself so it can inject undefined aborts 920 * back to the Realm. 921 */ 922 if (is_feat_mpam_supported()) { 923 mpam_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 924 } 925 926 if (is_feat_idte3_supported()) { 927 idte3_init_cached_idregs_per_world(CPU_CONTEXT_REALM); 928 } 929 #endif /* ENABLE_RME && IMAGE_BL31 */ 930 } 931 932 void cm_manage_extensions_per_world(void) 933 { 934 manage_extensions_nonsecure_per_world(); 935 manage_extensions_secure_per_world(); 936 manage_extensions_realm_per_world(); 937 } 938 939 void cm_init_percpu_once_regs(void) 940 { 941 #if IMAGE_BL31 942 if (is_feat_idte3_supported()) { 943 idte3_init_percpu_once_regs(CPU_CONTEXT_NS); 944 idte3_init_percpu_once_regs(CPU_CONTEXT_SECURE); 945 #if ENABLE_RME 946 idte3_init_percpu_once_regs(CPU_CONTEXT_REALM); 947 #endif /* ENABLE_RME */ 948 } 949 #endif /* IMAGE_BL31 */ 950 } 951 952 /******************************************************************************* 953 * Enable architecture extensions on first entry to Non-secure world. 954 ******************************************************************************/ 955 static void manage_extensions_nonsecure(cpu_context_t *ctx) 956 { 957 #if IMAGE_BL31 958 /* NOTE: registers are not context switched */ 959 if (is_feat_amu_supported()) { 960 amu_enable(ctx); 961 } 962 963 if (is_feat_sme_supported()) { 964 sme_enable(ctx); 965 } 966 967 if (is_feat_fgt2_supported()) { 968 fgt2_enable(ctx); 969 } 970 971 if (is_feat_debugv8p9_supported()) { 972 debugv8p9_extended_bp_wp_enable(ctx); 973 } 974 975 if (is_feat_spe_supported()) { 976 spe_enable_ns(ctx); 977 } 978 979 if (is_feat_trbe_supported()) { 980 if (check_if_trbe_disable_affected_core()) { 981 trbe_disable_ns(ctx); 982 } else { 983 trbe_enable_ns(ctx); 984 } 985 } 986 987 if (is_feat_brbe_supported()) { 988 brbe_enable(ctx); 989 } 990 #endif /* IMAGE_BL31 */ 991 } 992 993 #if INIT_UNUSED_NS_EL2 994 /******************************************************************************* 995 * Enable architecture extensions in-place at EL2 on first entry to Non-secure 996 * world when EL2 is empty and unused. 997 ******************************************************************************/ 998 static void manage_extensions_nonsecure_el2_unused(void) 999 { 1000 #if IMAGE_BL31 1001 if (is_feat_spe_supported()) { 1002 spe_init_el2_unused(); 1003 } 1004 1005 if (is_feat_amu_supported()) { 1006 amu_init_el2_unused(); 1007 } 1008 1009 if (is_feat_mpam_supported()) { 1010 mpam_init_el2_unused(); 1011 } 1012 1013 if (is_feat_trbe_supported()) { 1014 trbe_init_el2_unused(); 1015 } 1016 1017 if (is_feat_sys_reg_trace_supported()) { 1018 sys_reg_trace_init_el2_unused(); 1019 } 1020 1021 if (is_feat_trf_supported()) { 1022 trf_init_el2_unused(); 1023 } 1024 1025 pmuv3_init_el2_unused(); 1026 1027 if (is_feat_sve_supported()) { 1028 sve_init_el2_unused(); 1029 } 1030 1031 if (is_feat_sme_supported()) { 1032 sme_init_el2_unused(); 1033 } 1034 1035 if (is_feat_mops_supported() && is_feat_hcx_supported()) { 1036 write_hcrx_el2(read_hcrx_el2() | HCRX_EL2_MSCEn_BIT); 1037 } 1038 1039 if (is_feat_pauth_supported()) { 1040 pauth_enable_el2(); 1041 } 1042 #endif /* IMAGE_BL31 */ 1043 } 1044 #endif /* INIT_UNUSED_NS_EL2 */ 1045 1046 /******************************************************************************* 1047 * Enable architecture extensions on first entry to Secure world. 1048 ******************************************************************************/ 1049 static void manage_extensions_secure(cpu_context_t *ctx) 1050 { 1051 #if IMAGE_BL31 1052 if (is_feat_sme_supported()) { 1053 if (ENABLE_SME_FOR_SWD) { 1054 /* 1055 * Enable SME, SVE, FPU/SIMD in secure context, secure manager 1056 * must ensure SME, SVE, and FPU/SIMD context properly managed. 1057 */ 1058 sme_init_el3(); 1059 sme_enable(ctx); 1060 } else { 1061 /* 1062 * Disable SME, SVE, FPU/SIMD in secure context so non-secure 1063 * world can safely use the associated registers. 1064 */ 1065 sme_disable(ctx); 1066 } 1067 } 1068 1069 if (is_feat_spe_supported()) { 1070 spe_disable_secure(ctx); 1071 } 1072 1073 if (is_feat_trbe_supported()) { 1074 trbe_disable_secure(ctx); 1075 } 1076 #endif /* IMAGE_BL31 */ 1077 } 1078 1079 /******************************************************************************* 1080 * The following function initializes the cpu_context for the current CPU 1081 * for first use, and sets the initial entrypoint state as specified by the 1082 * entry_point_info structure. 1083 ******************************************************************************/ 1084 void cm_init_my_context(const entry_point_info_t *ep) 1085 { 1086 cpu_context_t *ctx; 1087 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 1088 cm_setup_context(ctx, ep); 1089 } 1090 1091 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */ 1092 static void init_nonsecure_el2_unused(cpu_context_t *ctx) 1093 { 1094 #if INIT_UNUSED_NS_EL2 1095 u_register_t hcr_el2 = HCR_RESET_VAL; 1096 u_register_t mdcr_el2; 1097 u_register_t scr_el3; 1098 1099 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1100 1101 /* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */ 1102 if ((scr_el3 & SCR_RW_BIT) != 0U) { 1103 hcr_el2 |= HCR_RW_BIT; 1104 } 1105 1106 write_hcr_el2(hcr_el2); 1107 1108 /* 1109 * Initialise CPTR_EL2 setting all fields rather than relying on the hw. 1110 * All fields have architecturally UNKNOWN reset values. 1111 */ 1112 write_cptr_el2(CPTR_EL2_RESET_VAL); 1113 1114 /* 1115 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on 1116 * reset and are set to zero except for field(s) listed below. 1117 * 1118 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of 1119 * Non-secure EL0 and EL1 accesses to the physical timer registers. 1120 * 1121 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of 1122 * Non-secure EL0 and EL1 accesses to the physical counter registers. 1123 */ 1124 write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT); 1125 1126 /* 1127 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally 1128 * UNKNOWN value. 1129 */ 1130 write_cntvoff_el2(0); 1131 1132 /* 1133 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1 1134 * respectively. 1135 */ 1136 write_vpidr_el2(read_midr_el1()); 1137 write_vmpidr_el2(read_mpidr_el1()); 1138 1139 /* 1140 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset. 1141 * 1142 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address 1143 * translation is disabled, cache maintenance operations depend on the 1144 * VMID. 1145 * 1146 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is 1147 * disabled. 1148 */ 1149 write_vttbr_el2(VTTBR_RESET_VAL & 1150 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) | 1151 (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 1152 1153 /* 1154 * Initialise MDCR_EL2, setting all fields rather than relying on hw. 1155 * Some fields are architecturally UNKNOWN on reset. 1156 * 1157 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System 1158 * register accesses to the Debug ROM registers are not trapped to EL2. 1159 * 1160 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register 1161 * accesses to the powerdown debug registers are not trapped to EL2. 1162 * 1163 * MDCR_EL2.TDA: Set to zero so that System register accesses to the 1164 * debug registers do not trap to EL2. 1165 * 1166 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to 1167 * EL2. 1168 */ 1169 mdcr_el2 = MDCR_EL2_RESET_VAL & 1170 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT | 1171 MDCR_EL2_TDE_BIT); 1172 1173 write_mdcr_el2(mdcr_el2); 1174 1175 /* 1176 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset. 1177 * 1178 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or 1179 * EL1 accesses to System registers do not trap to EL2. 1180 */ 1181 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); 1182 1183 /* 1184 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on 1185 * reset. 1186 * 1187 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer 1188 * and prevent timer interrupts. 1189 */ 1190 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); 1191 1192 manage_extensions_nonsecure_el2_unused(); 1193 #endif /* INIT_UNUSED_NS_EL2 */ 1194 } 1195 1196 /******************************************************************************* 1197 * Prepare the CPU system registers for first entry into realm, secure, or 1198 * normal world. 1199 * 1200 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized 1201 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports 1202 * EL2 then EL2 is disabled by configuring all necessary EL2 registers. 1203 * For all entries, the EL1 registers are initialized from the cpu_context 1204 ******************************************************************************/ 1205 void cm_prepare_el3_exit(size_t security_state) 1206 { 1207 u_register_t sctlr_el2, scr_el3; 1208 cpu_context_t *ctx = cm_get_context(security_state); 1209 1210 assert(ctx != NULL); 1211 1212 if (security_state == NON_SECURE) { 1213 uint64_t el2_implemented = el_implemented(2); 1214 1215 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), 1216 CTX_SCR_EL3); 1217 1218 if (el2_implemented != EL_IMPL_NONE) { 1219 setup_el2_regs(); 1220 1221 /* Condition to ensure EL2 is being used. */ 1222 if ((scr_el3 & SCR_HCE_BIT) != 0U) { 1223 /* Initialize SCTLR_EL2 register with reset value. */ 1224 sctlr_el2 = SCTLR_EL2_RES1; 1225 1226 /* 1227 * If workaround of errata 764081 for Cortex-A75 1228 * is used then set SCTLR_EL2.IESB to enable 1229 * Implicit Error Synchronization Barrier. 1230 */ 1231 if (errata_a75_764081_applies()) { 1232 sctlr_el2 |= SCTLR_IESB_BIT; 1233 } 1234 1235 write_sctlr_el2(sctlr_el2); 1236 } else { 1237 /* 1238 * (scr_el3 & SCR_HCE_BIT==0) 1239 * EL2 implemented but unused. 1240 */ 1241 init_nonsecure_el2_unused(ctx); 1242 } 1243 } 1244 1245 if (is_feat_fgwte3_supported()) { 1246 /* 1247 * TCR_EL3 and ACTLR_EL3 could be overwritten 1248 * by platforms and hence is locked a bit late. 1249 */ 1250 write_fgwte3_el3(FGWTE3_EL3_LATE_INIT_VAL); 1251 } 1252 } 1253 #if !CTX_INCLUDE_EL2_REGS || IMAGE_BL1 1254 /* Restore EL1 system registers, only when CTX_INCLUDE_EL2_REGS=0 */ 1255 cm_el1_sysregs_context_restore(security_state); 1256 #endif 1257 cm_set_next_eret_context(security_state); 1258 } 1259 1260 /* Assumes prepare_el3_entry() has disabled counters 2 and 3 */ 1261 void cm_sysregs_context_save_amu(unsigned int security_state) 1262 { 1263 world_amu_regs_t *ctx = PER_CPU_CUR(world_amu_ctx[get_cpu_context_index(security_state)]); 1264 1265 ctx->amevcntr02_el0 = read_amevcntr02_el0(); 1266 ctx->amevcntr03_el0 = read_amevcntr03_el0(); 1267 } 1268 1269 void cm_sysregs_context_restore_amu(unsigned int security_state) 1270 { 1271 world_amu_regs_t *ctx = PER_CPU_CUR(world_amu_ctx[get_cpu_context_index(security_state)]); 1272 1273 write_amevcntr02_el0(ctx->amevcntr02_el0); 1274 write_amevcntr03_el0(ctx->amevcntr03_el0); 1275 } 1276 1277 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 1278 1279 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx) 1280 { 1281 write_el2_ctx_fgt(ctx, hdfgrtr_el2, read_hdfgrtr_el2()); 1282 if (is_feat_amu_supported()) { 1283 write_el2_ctx_fgt(ctx, hafgrtr_el2, read_hafgrtr_el2()); 1284 } 1285 write_el2_ctx_fgt(ctx, hdfgwtr_el2, read_hdfgwtr_el2()); 1286 write_el2_ctx_fgt(ctx, hfgitr_el2, read_hfgitr_el2()); 1287 write_el2_ctx_fgt(ctx, hfgrtr_el2, read_hfgrtr_el2()); 1288 write_el2_ctx_fgt(ctx, hfgwtr_el2, read_hfgwtr_el2()); 1289 } 1290 1291 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx) 1292 { 1293 write_hdfgrtr_el2(read_el2_ctx_fgt(ctx, hdfgrtr_el2)); 1294 if (is_feat_amu_supported()) { 1295 write_hafgrtr_el2(read_el2_ctx_fgt(ctx, hafgrtr_el2)); 1296 } 1297 write_hdfgwtr_el2(read_el2_ctx_fgt(ctx, hdfgwtr_el2)); 1298 write_hfgitr_el2(read_el2_ctx_fgt(ctx, hfgitr_el2)); 1299 write_hfgrtr_el2(read_el2_ctx_fgt(ctx, hfgrtr_el2)); 1300 write_hfgwtr_el2(read_el2_ctx_fgt(ctx, hfgwtr_el2)); 1301 } 1302 1303 static void el2_sysregs_context_save_fgt2(el2_sysregs_t *ctx) 1304 { 1305 write_el2_ctx_fgt2(ctx, hdfgrtr2_el2, read_hdfgrtr2_el2()); 1306 write_el2_ctx_fgt2(ctx, hdfgwtr2_el2, read_hdfgwtr2_el2()); 1307 write_el2_ctx_fgt2(ctx, hfgitr2_el2, read_hfgitr2_el2()); 1308 write_el2_ctx_fgt2(ctx, hfgrtr2_el2, read_hfgrtr2_el2()); 1309 write_el2_ctx_fgt2(ctx, hfgwtr2_el2, read_hfgwtr2_el2()); 1310 } 1311 1312 static void el2_sysregs_context_restore_fgt2(el2_sysregs_t *ctx) 1313 { 1314 write_hdfgrtr2_el2(read_el2_ctx_fgt2(ctx, hdfgrtr2_el2)); 1315 write_hdfgwtr2_el2(read_el2_ctx_fgt2(ctx, hdfgwtr2_el2)); 1316 write_hfgitr2_el2(read_el2_ctx_fgt2(ctx, hfgitr2_el2)); 1317 write_hfgrtr2_el2(read_el2_ctx_fgt2(ctx, hfgrtr2_el2)); 1318 write_hfgwtr2_el2(read_el2_ctx_fgt2(ctx, hfgwtr2_el2)); 1319 } 1320 1321 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx) 1322 { 1323 u_register_t mpam_idr = read_mpamidr_el1(); 1324 1325 write_el2_ctx_mpam(ctx, mpam2_el2, read_mpam2_el2()); 1326 1327 /* 1328 * The context registers that we intend to save would be part of the 1329 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1. 1330 */ 1331 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1332 return; 1333 } 1334 1335 /* 1336 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if 1337 * MPAMIDR_HAS_HCR_BIT == 1. 1338 */ 1339 write_el2_ctx_mpam(ctx, mpamhcr_el2, read_mpamhcr_el2()); 1340 write_el2_ctx_mpam(ctx, mpamvpm0_el2, read_mpamvpm0_el2()); 1341 write_el2_ctx_mpam(ctx, mpamvpmv_el2, read_mpamvpmv_el2()); 1342 1343 /* 1344 * The number of MPAMVPM registers is implementation defined, their 1345 * number is stored in the MPAMIDR_EL1 register. 1346 */ 1347 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1348 case 7: 1349 write_el2_ctx_mpam(ctx, mpamvpm7_el2, read_mpamvpm7_el2()); 1350 __fallthrough; 1351 case 6: 1352 write_el2_ctx_mpam(ctx, mpamvpm6_el2, read_mpamvpm6_el2()); 1353 __fallthrough; 1354 case 5: 1355 write_el2_ctx_mpam(ctx, mpamvpm5_el2, read_mpamvpm5_el2()); 1356 __fallthrough; 1357 case 4: 1358 write_el2_ctx_mpam(ctx, mpamvpm4_el2, read_mpamvpm4_el2()); 1359 __fallthrough; 1360 case 3: 1361 write_el2_ctx_mpam(ctx, mpamvpm3_el2, read_mpamvpm3_el2()); 1362 __fallthrough; 1363 case 2: 1364 write_el2_ctx_mpam(ctx, mpamvpm2_el2, read_mpamvpm2_el2()); 1365 __fallthrough; 1366 case 1: 1367 write_el2_ctx_mpam(ctx, mpamvpm1_el2, read_mpamvpm1_el2()); 1368 break; 1369 } 1370 } 1371 1372 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx) 1373 { 1374 u_register_t mpam_idr = read_mpamidr_el1(); 1375 1376 write_mpam2_el2(read_el2_ctx_mpam(ctx, mpam2_el2)); 1377 1378 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) { 1379 return; 1380 } 1381 1382 write_mpamhcr_el2(read_el2_ctx_mpam(ctx, mpamhcr_el2)); 1383 write_mpamvpm0_el2(read_el2_ctx_mpam(ctx, mpamvpm0_el2)); 1384 write_mpamvpmv_el2(read_el2_ctx_mpam(ctx, mpamvpmv_el2)); 1385 1386 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) { 1387 case 7: 1388 write_mpamvpm7_el2(read_el2_ctx_mpam(ctx, mpamvpm7_el2)); 1389 __fallthrough; 1390 case 6: 1391 write_mpamvpm6_el2(read_el2_ctx_mpam(ctx, mpamvpm6_el2)); 1392 __fallthrough; 1393 case 5: 1394 write_mpamvpm5_el2(read_el2_ctx_mpam(ctx, mpamvpm5_el2)); 1395 __fallthrough; 1396 case 4: 1397 write_mpamvpm4_el2(read_el2_ctx_mpam(ctx, mpamvpm4_el2)); 1398 __fallthrough; 1399 case 3: 1400 write_mpamvpm3_el2(read_el2_ctx_mpam(ctx, mpamvpm3_el2)); 1401 __fallthrough; 1402 case 2: 1403 write_mpamvpm2_el2(read_el2_ctx_mpam(ctx, mpamvpm2_el2)); 1404 __fallthrough; 1405 case 1: 1406 write_mpamvpm1_el2(read_el2_ctx_mpam(ctx, mpamvpm1_el2)); 1407 break; 1408 } 1409 } 1410 1411 /* --------------------------------------------------------------------------- 1412 * The following registers are not added: 1413 * ICH_AP0R<n>_EL2 1414 * ICH_AP1R<n>_EL2 1415 * ICH_LR<n>_EL2 1416 * 1417 * NOTE: For a system with S-EL2 present but not enabled, accessing 1418 * ICC_SRE_EL2 is undefined from EL3. To workaround this change the 1419 * SCR_EL3.NS = 1 before accessing this register. 1420 * --------------------------------------------------------------------------- 1421 */ 1422 void cm_el2_sysregs_context_save_gic(uint32_t security_state) 1423 { 1424 el2_sysregs_t *ctx = get_el2_sysregs_ctx(cm_get_context(security_state)); 1425 1426 u_register_t scr_el3 = read_scr_el3(); 1427 1428 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 1429 write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2()); 1430 #else 1431 write_scr_el3(scr_el3 | SCR_NS_BIT); 1432 isb(); 1433 1434 write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2()); 1435 1436 write_scr_el3(scr_el3); 1437 isb(); 1438 #endif 1439 write_el2_ctx_common(ctx, ich_hcr_el2, read_ich_hcr_el2()); 1440 1441 if (errata_ich_vmcr_el2_applies()) { 1442 if (security_state == SECURE) { 1443 write_scr_el3(scr_el3 & ~SCR_NS_BIT); 1444 } else { 1445 write_scr_el3(scr_el3 | SCR_NS_BIT); 1446 } 1447 isb(); 1448 } 1449 1450 write_el2_ctx_common(ctx, ich_vmcr_el2, read_ich_vmcr_el2()); 1451 1452 if (errata_ich_vmcr_el2_applies()) { 1453 write_scr_el3(scr_el3); 1454 isb(); 1455 } 1456 } 1457 1458 void cm_el2_sysregs_context_restore_gic(uint32_t security_state) 1459 { 1460 el2_sysregs_t *ctx = get_el2_sysregs_ctx(cm_get_context(security_state)); 1461 1462 u_register_t scr_el3 = read_scr_el3(); 1463 1464 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 1465 write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2)); 1466 #else 1467 write_scr_el3(scr_el3 | SCR_NS_BIT); 1468 isb(); 1469 1470 write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2)); 1471 1472 write_scr_el3(scr_el3); 1473 isb(); 1474 #endif 1475 write_ich_hcr_el2(read_el2_ctx_common(ctx, ich_hcr_el2)); 1476 1477 if (errata_ich_vmcr_el2_applies()) { 1478 if (security_state == SECURE) { 1479 write_scr_el3(scr_el3 & ~SCR_NS_BIT); 1480 } else { 1481 write_scr_el3(scr_el3 | SCR_NS_BIT); 1482 } 1483 isb(); 1484 } 1485 1486 write_ich_vmcr_el2(read_el2_ctx_common(ctx, ich_vmcr_el2)); 1487 1488 if (errata_ich_vmcr_el2_applies()) { 1489 write_scr_el3(scr_el3); 1490 isb(); 1491 } 1492 } 1493 1494 /* ----------------------------------------------------- 1495 * The following registers are not added: 1496 * AMEVCNTVOFF0<n>_EL2 1497 * AMEVCNTVOFF1<n>_EL2 1498 * ----------------------------------------------------- 1499 */ 1500 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx) 1501 { 1502 write_el2_ctx_common(ctx, actlr_el2, read_actlr_el2()); 1503 write_el2_ctx_common(ctx, afsr0_el2, read_afsr0_el2()); 1504 write_el2_ctx_common(ctx, afsr1_el2, read_afsr1_el2()); 1505 write_el2_ctx_common(ctx, amair_el2, read_amair_el2()); 1506 write_el2_ctx_common(ctx, cnthctl_el2, read_cnthctl_el2()); 1507 write_el2_ctx_common(ctx, cntvoff_el2, read_cntvoff_el2()); 1508 write_el2_ctx_common(ctx, cptr_el2, read_cptr_el2()); 1509 if (CTX_INCLUDE_AARCH32_REGS) { 1510 write_el2_ctx_common(ctx, dbgvcr32_el2, read_dbgvcr32_el2()); 1511 } 1512 write_el2_ctx_common(ctx, elr_el2, read_elr_el2()); 1513 write_el2_ctx_common(ctx, esr_el2, read_esr_el2()); 1514 write_el2_ctx_common(ctx, far_el2, read_far_el2()); 1515 write_el2_ctx_common(ctx, hacr_el2, read_hacr_el2()); 1516 write_el2_ctx_common(ctx, hcr_el2, read_hcr_el2()); 1517 write_el2_ctx_common(ctx, hpfar_el2, read_hpfar_el2()); 1518 write_el2_ctx_common(ctx, hstr_el2, read_hstr_el2()); 1519 write_el2_ctx_common(ctx, mair_el2, read_mair_el2()); 1520 write_el2_ctx_common(ctx, mdcr_el2, read_mdcr_el2()); 1521 write_el2_ctx_common(ctx, sctlr_el2, read_sctlr_el2()); 1522 write_el2_ctx_common(ctx, spsr_el2, read_spsr_el2()); 1523 write_el2_ctx_common(ctx, sp_el2, read_sp_el2()); 1524 write_el2_ctx_common(ctx, tcr_el2, read_tcr_el2()); 1525 write_el2_ctx_common(ctx, tpidr_el2, read_tpidr_el2()); 1526 write_el2_ctx_common(ctx, vbar_el2, read_vbar_el2()); 1527 write_el2_ctx_common(ctx, vmpidr_el2, read_vmpidr_el2()); 1528 write_el2_ctx_common(ctx, vpidr_el2, read_vpidr_el2()); 1529 write_el2_ctx_common(ctx, vtcr_el2, read_vtcr_el2()); 1530 1531 write_el2_ctx_common_sysreg128(ctx, ttbr0_el2, read_ttbr0_el2()); 1532 write_el2_ctx_common_sysreg128(ctx, vttbr_el2, read_vttbr_el2()); 1533 } 1534 1535 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx) 1536 { 1537 write_actlr_el2(read_el2_ctx_common(ctx, actlr_el2)); 1538 write_afsr0_el2(read_el2_ctx_common(ctx, afsr0_el2)); 1539 write_afsr1_el2(read_el2_ctx_common(ctx, afsr1_el2)); 1540 write_amair_el2(read_el2_ctx_common(ctx, amair_el2)); 1541 write_cnthctl_el2(read_el2_ctx_common(ctx, cnthctl_el2)); 1542 write_cntvoff_el2(read_el2_ctx_common(ctx, cntvoff_el2)); 1543 write_cptr_el2(read_el2_ctx_common(ctx, cptr_el2)); 1544 if (CTX_INCLUDE_AARCH32_REGS) { 1545 write_dbgvcr32_el2(read_el2_ctx_common(ctx, dbgvcr32_el2)); 1546 } 1547 write_elr_el2(read_el2_ctx_common(ctx, elr_el2)); 1548 write_esr_el2(read_el2_ctx_common(ctx, esr_el2)); 1549 write_far_el2(read_el2_ctx_common(ctx, far_el2)); 1550 write_hacr_el2(read_el2_ctx_common(ctx, hacr_el2)); 1551 write_hcr_el2(read_el2_ctx_common(ctx, hcr_el2)); 1552 write_hpfar_el2(read_el2_ctx_common(ctx, hpfar_el2)); 1553 write_hstr_el2(read_el2_ctx_common(ctx, hstr_el2)); 1554 write_mair_el2(read_el2_ctx_common(ctx, mair_el2)); 1555 write_mdcr_el2(read_el2_ctx_common(ctx, mdcr_el2)); 1556 write_sctlr_el2(read_el2_ctx_common(ctx, sctlr_el2)); 1557 write_spsr_el2(read_el2_ctx_common(ctx, spsr_el2)); 1558 write_sp_el2(read_el2_ctx_common(ctx, sp_el2)); 1559 write_tcr_el2(read_el2_ctx_common(ctx, tcr_el2)); 1560 write_tpidr_el2(read_el2_ctx_common(ctx, tpidr_el2)); 1561 write_ttbr0_el2(read_el2_ctx_common(ctx, ttbr0_el2)); 1562 write_vbar_el2(read_el2_ctx_common(ctx, vbar_el2)); 1563 write_vmpidr_el2(read_el2_ctx_common(ctx, vmpidr_el2)); 1564 write_vpidr_el2(read_el2_ctx_common(ctx, vpidr_el2)); 1565 write_vtcr_el2(read_el2_ctx_common(ctx, vtcr_el2)); 1566 write_vttbr_el2(read_el2_ctx_common(ctx, vttbr_el2)); 1567 } 1568 1569 /******************************************************************************* 1570 * Save EL2 sysreg context 1571 ******************************************************************************/ 1572 void cm_el2_sysregs_context_save(uint32_t security_state) 1573 { 1574 cpu_context_t *ctx; 1575 el2_sysregs_t *el2_sysregs_ctx; 1576 1577 ctx = cm_get_context(security_state); 1578 assert(ctx != NULL); 1579 1580 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1581 1582 el2_sysregs_context_save_common(el2_sysregs_ctx); 1583 1584 if (is_feat_mte2_supported()) { 1585 write_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2, read_tfsr_el2()); 1586 } 1587 1588 if (is_feat_mpam_supported()) { 1589 el2_sysregs_context_save_mpam(el2_sysregs_ctx); 1590 } 1591 1592 if (is_feat_fgt_supported()) { 1593 el2_sysregs_context_save_fgt(el2_sysregs_ctx); 1594 } 1595 1596 if (is_feat_fgt2_supported()) { 1597 el2_sysregs_context_save_fgt2(el2_sysregs_ctx); 1598 } 1599 1600 if (is_feat_ecv_v2_supported()) { 1601 write_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2, read_cntpoff_el2()); 1602 } 1603 1604 if (is_feat_vhe_supported()) { 1605 write_el2_ctx_vhe(el2_sysregs_ctx, contextidr_el2, 1606 read_contextidr_el2()); 1607 write_el2_ctx_vhe_sysreg128(el2_sysregs_ctx, ttbr1_el2, read_ttbr1_el2()); 1608 } 1609 1610 if (is_feat_ras_supported()) { 1611 write_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2, read_vdisr_el2()); 1612 write_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2, read_vsesr_el2()); 1613 } 1614 1615 if (is_feat_nv2_supported()) { 1616 write_el2_ctx_neve(el2_sysregs_ctx, vncr_el2, read_vncr_el2()); 1617 } 1618 1619 if (is_feat_trf_supported()) { 1620 write_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2, read_trfcr_el2()); 1621 } 1622 1623 if (is_feat_csv2_2_supported()) { 1624 write_el2_ctx_csv2_2(el2_sysregs_ctx, scxtnum_el2, 1625 read_scxtnum_el2()); 1626 } 1627 1628 if (is_feat_hcx_supported()) { 1629 write_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2, read_hcrx_el2()); 1630 } 1631 1632 if (is_feat_tcr2_supported()) { 1633 write_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2, read_tcr2_el2()); 1634 } 1635 1636 if (is_feat_s1pie_supported()) { 1637 write_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2, read_pire0_el2()); 1638 write_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2, read_pir_el2()); 1639 } 1640 1641 if (is_feat_s1poe_supported()) { 1642 write_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2, read_por_el2()); 1643 } 1644 1645 if (is_feat_brbe_supported()) { 1646 write_el2_ctx_brbe(el2_sysregs_ctx, brbcr_el2, read_brbcr_el2()); 1647 } 1648 1649 if (is_feat_s2pie_supported()) { 1650 write_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2, read_s2pir_el2()); 1651 } 1652 1653 if (is_feat_gcs_supported()) { 1654 write_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2, read_gcscr_el2()); 1655 write_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2, read_gcspr_el2()); 1656 } 1657 1658 if (is_feat_sctlr2_supported()) { 1659 write_el2_ctx_sctlr2(el2_sysregs_ctx, sctlr2_el2, read_sctlr2_el2()); 1660 } 1661 1662 if (is_feat_amu_supported()) { 1663 cm_sysregs_context_save_amu(security_state); 1664 } 1665 } 1666 1667 /******************************************************************************* 1668 * Restore EL2 sysreg context 1669 ******************************************************************************/ 1670 void cm_el2_sysregs_context_restore(uint32_t security_state) 1671 { 1672 cpu_context_t *ctx; 1673 el2_sysregs_t *el2_sysregs_ctx; 1674 1675 ctx = cm_get_context(security_state); 1676 assert(ctx != NULL); 1677 1678 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); 1679 1680 el2_sysregs_context_restore_common(el2_sysregs_ctx); 1681 1682 if (is_feat_mte2_supported()) { 1683 write_tfsr_el2(read_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2)); 1684 } 1685 1686 if (is_feat_mpam_supported()) { 1687 el2_sysregs_context_restore_mpam(el2_sysregs_ctx); 1688 } 1689 1690 if (is_feat_fgt_supported()) { 1691 el2_sysregs_context_restore_fgt(el2_sysregs_ctx); 1692 } 1693 1694 if (is_feat_fgt2_supported()) { 1695 el2_sysregs_context_restore_fgt2(el2_sysregs_ctx); 1696 } 1697 1698 if (is_feat_ecv_v2_supported()) { 1699 write_cntpoff_el2(read_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2)); 1700 } 1701 1702 if (is_feat_vhe_supported()) { 1703 write_contextidr_el2(read_el2_ctx_vhe(el2_sysregs_ctx, 1704 contextidr_el2)); 1705 write_ttbr1_el2(read_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2)); 1706 } 1707 1708 if (is_feat_ras_supported()) { 1709 write_vdisr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2)); 1710 write_vsesr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2)); 1711 } 1712 1713 if (is_feat_nv2_supported()) { 1714 write_vncr_el2(read_el2_ctx_neve(el2_sysregs_ctx, vncr_el2)); 1715 } 1716 1717 if (is_feat_trf_supported()) { 1718 write_trfcr_el2(read_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2)); 1719 } 1720 1721 if (is_feat_csv2_2_supported()) { 1722 write_scxtnum_el2(read_el2_ctx_csv2_2(el2_sysregs_ctx, 1723 scxtnum_el2)); 1724 } 1725 1726 if (is_feat_hcx_supported()) { 1727 write_hcrx_el2(read_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2)); 1728 } 1729 1730 if (is_feat_tcr2_supported()) { 1731 write_tcr2_el2(read_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2)); 1732 } 1733 1734 if (is_feat_s1pie_supported()) { 1735 write_pire0_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2)); 1736 write_pir_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2)); 1737 } 1738 1739 if (is_feat_s1poe_supported()) { 1740 write_por_el2(read_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2)); 1741 } 1742 1743 if (is_feat_s2pie_supported()) { 1744 write_s2pir_el2(read_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2)); 1745 } 1746 1747 if (is_feat_gcs_supported()) { 1748 write_gcscr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2)); 1749 write_gcspr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2)); 1750 } 1751 1752 if (is_feat_sctlr2_supported()) { 1753 write_sctlr2_el2(read_el2_ctx_sctlr2(el2_sysregs_ctx, sctlr2_el2)); 1754 } 1755 1756 if (is_feat_brbe_supported()) { 1757 write_brbcr_el2(read_el2_ctx_brbe(el2_sysregs_ctx, brbcr_el2)); 1758 } 1759 1760 if (is_feat_amu_supported()) { 1761 cm_sysregs_context_restore_amu(security_state); 1762 } 1763 } 1764 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */ 1765 1766 /******************************************************************************* 1767 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS 1768 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly 1769 * updating EL1 and EL2 registers. Otherwise, it calls the generic 1770 * cm_prepare_el3_exit function. 1771 ******************************************************************************/ 1772 void cm_prepare_el3_exit_ns(void) 1773 { 1774 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) 1775 #if ENABLE_ASSERTIONS 1776 cpu_context_t *ctx = cm_get_context(NON_SECURE); 1777 assert(ctx != NULL); 1778 1779 /* Assert that EL2 is used. */ 1780 u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); 1781 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && 1782 (el_implemented(2U) != EL_IMPL_NONE)); 1783 #endif /* ENABLE_ASSERTIONS */ 1784 1785 /* Restore EL2 sysreg contexts */ 1786 cm_el2_sysregs_context_restore(NON_SECURE); 1787 cm_el2_sysregs_context_restore_gic(NON_SECURE); 1788 cm_set_next_eret_context(NON_SECURE); 1789 #else 1790 cm_prepare_el3_exit(NON_SECURE); 1791 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */ 1792 1793 if (is_feat_amu_supported()) { 1794 cm_sysregs_context_restore_amu(NON_SECURE); 1795 } 1796 } 1797 1798 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) 1799 /******************************************************************************* 1800 * The next set of six functions are used by runtime services to save and restore 1801 * EL1 context on the 'cpu_context' structure for the specified security state. 1802 ******************************************************************************/ 1803 static void el1_sysregs_context_save(el1_sysregs_t *ctx) 1804 { 1805 write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1()); 1806 write_el1_ctx_common(ctx, elr_el1, read_elr_el1()); 1807 1808 #if (!ERRATA_SPECULATIVE_AT) 1809 write_el1_ctx_common(ctx, sctlr_el1, read_sctlr_el1()); 1810 write_el1_ctx_common(ctx, tcr_el1, read_tcr_el1()); 1811 #endif /* (!ERRATA_SPECULATIVE_AT) */ 1812 1813 write_el1_ctx_common(ctx, cpacr_el1, read_cpacr_el1()); 1814 write_el1_ctx_common(ctx, csselr_el1, read_csselr_el1()); 1815 write_el1_ctx_common(ctx, sp_el1, read_sp_el1()); 1816 write_el1_ctx_common(ctx, esr_el1, read_esr_el1()); 1817 write_el1_ctx_common(ctx, mair_el1, read_mair_el1()); 1818 write_el1_ctx_common(ctx, amair_el1, read_amair_el1()); 1819 write_el1_ctx_common(ctx, actlr_el1, read_actlr_el1()); 1820 write_el1_ctx_common(ctx, tpidr_el1, read_tpidr_el1()); 1821 write_el1_ctx_common(ctx, tpidr_el0, read_tpidr_el0()); 1822 write_el1_ctx_common(ctx, tpidrro_el0, read_tpidrro_el0()); 1823 write_el1_ctx_common(ctx, far_el1, read_far_el1()); 1824 write_el1_ctx_common(ctx, afsr0_el1, read_afsr0_el1()); 1825 write_el1_ctx_common(ctx, afsr1_el1, read_afsr1_el1()); 1826 write_el1_ctx_common(ctx, contextidr_el1, read_contextidr_el1()); 1827 write_el1_ctx_common(ctx, vbar_el1, read_vbar_el1()); 1828 write_el1_ctx_common(ctx, mdccint_el1, read_mdccint_el1()); 1829 write_el1_ctx_common(ctx, mdscr_el1, read_mdscr_el1()); 1830 1831 write_el1_ctx_common_sysreg128(ctx, par_el1, read_par_el1()); 1832 write_el1_ctx_common_sysreg128(ctx, ttbr0_el1, read_ttbr0_el1()); 1833 write_el1_ctx_common_sysreg128(ctx, ttbr1_el1, read_ttbr1_el1()); 1834 1835 if (CTX_INCLUDE_AARCH32_REGS) { 1836 /* Save Aarch32 registers */ 1837 write_el1_ctx_aarch32(ctx, spsr_abt, read_spsr_abt()); 1838 write_el1_ctx_aarch32(ctx, spsr_und, read_spsr_und()); 1839 write_el1_ctx_aarch32(ctx, spsr_irq, read_spsr_irq()); 1840 write_el1_ctx_aarch32(ctx, spsr_fiq, read_spsr_fiq()); 1841 write_el1_ctx_aarch32(ctx, dacr32_el2, read_dacr32_el2()); 1842 write_el1_ctx_aarch32(ctx, ifsr32_el2, read_ifsr32_el2()); 1843 } 1844 1845 /* Save counter-timer kernel control register */ 1846 write_el1_ctx_arch_timer(ctx, cntkctl_el1, read_cntkctl_el1()); 1847 #if NS_TIMER_SWITCH 1848 /* Save NS Timer registers */ 1849 write_el1_ctx_arch_timer(ctx, cntp_ctl_el0, read_cntp_ctl_el0()); 1850 write_el1_ctx_arch_timer(ctx, cntp_cval_el0, read_cntp_cval_el0()); 1851 write_el1_ctx_arch_timer(ctx, cntv_ctl_el0, read_cntv_ctl_el0()); 1852 write_el1_ctx_arch_timer(ctx, cntv_cval_el0, read_cntv_cval_el0()); 1853 #endif 1854 1855 if (is_feat_mte2_supported()) { 1856 write_el1_ctx_mte2(ctx, tfsre0_el1, read_tfsre0_el1()); 1857 write_el1_ctx_mte2(ctx, tfsr_el1, read_tfsr_el1()); 1858 write_el1_ctx_mte2(ctx, rgsr_el1, read_rgsr_el1()); 1859 write_el1_ctx_mte2(ctx, gcr_el1, read_gcr_el1()); 1860 } 1861 1862 if (is_feat_ras_supported()) { 1863 write_el1_ctx_ras(ctx, disr_el1, read_disr_el1()); 1864 } 1865 1866 if (is_feat_s1pie_supported()) { 1867 write_el1_ctx_s1pie(ctx, pire0_el1, read_pire0_el1()); 1868 write_el1_ctx_s1pie(ctx, pir_el1, read_pir_el1()); 1869 } 1870 1871 if (is_feat_s1poe_supported()) { 1872 write_el1_ctx_s1poe(ctx, por_el1, read_por_el1()); 1873 } 1874 1875 if (is_feat_s2poe_supported()) { 1876 write_el1_ctx_s2poe(ctx, s2por_el1, read_s2por_el1()); 1877 } 1878 1879 if (is_feat_tcr2_supported()) { 1880 write_el1_ctx_tcr2(ctx, tcr2_el1, read_tcr2_el1()); 1881 } 1882 1883 if (is_feat_trf_supported()) { 1884 write_el1_ctx_trf(ctx, trfcr_el1, read_trfcr_el1()); 1885 } 1886 1887 if (is_feat_csv2_2_supported()) { 1888 write_el1_ctx_csv2_2(ctx, scxtnum_el0, read_scxtnum_el0()); 1889 write_el1_ctx_csv2_2(ctx, scxtnum_el1, read_scxtnum_el1()); 1890 } 1891 1892 if (is_feat_gcs_supported()) { 1893 write_el1_ctx_gcs(ctx, gcscr_el1, read_gcscr_el1()); 1894 write_el1_ctx_gcs(ctx, gcscre0_el1, read_gcscre0_el1()); 1895 write_el1_ctx_gcs(ctx, gcspr_el1, read_gcspr_el1()); 1896 write_el1_ctx_gcs(ctx, gcspr_el0, read_gcspr_el0()); 1897 } 1898 1899 if (is_feat_the_supported()) { 1900 write_el1_ctx_the_sysreg128(ctx, rcwmask_el1, read_rcwmask_el1()); 1901 write_el1_ctx_the_sysreg128(ctx, rcwsmask_el1, read_rcwsmask_el1()); 1902 } 1903 1904 if (is_feat_sctlr2_supported()) { 1905 write_el1_ctx_sctlr2(ctx, sctlr2_el1, read_sctlr2_el1()); 1906 } 1907 1908 if (is_feat_ls64_accdata_supported()) { 1909 write_el1_ctx_ls64(ctx, accdata_el1, read_accdata_el1()); 1910 } 1911 1912 if (is_feat_step2_supported()) { 1913 write_el1_ctx_step2(ctx, mdstepop_el1, read_mdstepop_el1()); 1914 } 1915 } 1916 1917 static void el1_sysregs_context_restore(el1_sysregs_t *ctx) 1918 { 1919 write_spsr_el1(read_el1_ctx_common(ctx, spsr_el1)); 1920 write_elr_el1(read_el1_ctx_common(ctx, elr_el1)); 1921 1922 #if (!ERRATA_SPECULATIVE_AT) 1923 write_sctlr_el1(read_el1_ctx_common(ctx, sctlr_el1)); 1924 write_tcr_el1(read_el1_ctx_common(ctx, tcr_el1)); 1925 #endif /* (!ERRATA_SPECULATIVE_AT) */ 1926 1927 write_cpacr_el1(read_el1_ctx_common(ctx, cpacr_el1)); 1928 write_csselr_el1(read_el1_ctx_common(ctx, csselr_el1)); 1929 write_sp_el1(read_el1_ctx_common(ctx, sp_el1)); 1930 write_esr_el1(read_el1_ctx_common(ctx, esr_el1)); 1931 write_ttbr0_el1(read_el1_ctx_common(ctx, ttbr0_el1)); 1932 write_ttbr1_el1(read_el1_ctx_common(ctx, ttbr1_el1)); 1933 write_mair_el1(read_el1_ctx_common(ctx, mair_el1)); 1934 write_amair_el1(read_el1_ctx_common(ctx, amair_el1)); 1935 write_actlr_el1(read_el1_ctx_common(ctx, actlr_el1)); 1936 write_tpidr_el1(read_el1_ctx_common(ctx, tpidr_el1)); 1937 write_tpidr_el0(read_el1_ctx_common(ctx, tpidr_el0)); 1938 write_tpidrro_el0(read_el1_ctx_common(ctx, tpidrro_el0)); 1939 write_par_el1(read_el1_ctx_common(ctx, par_el1)); 1940 write_far_el1(read_el1_ctx_common(ctx, far_el1)); 1941 write_afsr0_el1(read_el1_ctx_common(ctx, afsr0_el1)); 1942 write_afsr1_el1(read_el1_ctx_common(ctx, afsr1_el1)); 1943 write_contextidr_el1(read_el1_ctx_common(ctx, contextidr_el1)); 1944 write_vbar_el1(read_el1_ctx_common(ctx, vbar_el1)); 1945 write_mdccint_el1(read_el1_ctx_common(ctx, mdccint_el1)); 1946 write_mdscr_el1(read_el1_ctx_common(ctx, mdscr_el1)); 1947 1948 if (CTX_INCLUDE_AARCH32_REGS) { 1949 /* Restore Aarch32 registers */ 1950 write_spsr_abt(read_el1_ctx_aarch32(ctx, spsr_abt)); 1951 write_spsr_und(read_el1_ctx_aarch32(ctx, spsr_und)); 1952 write_spsr_irq(read_el1_ctx_aarch32(ctx, spsr_irq)); 1953 write_spsr_fiq(read_el1_ctx_aarch32(ctx, spsr_fiq)); 1954 write_dacr32_el2(read_el1_ctx_aarch32(ctx, dacr32_el2)); 1955 write_ifsr32_el2(read_el1_ctx_aarch32(ctx, ifsr32_el2)); 1956 } 1957 1958 /* Restore counter-timer kernel control register */ 1959 write_cntkctl_el1(read_el1_ctx_arch_timer(ctx, cntkctl_el1)); 1960 #if NS_TIMER_SWITCH 1961 /* Restore NS Timer registers */ 1962 write_cntp_ctl_el0(read_el1_ctx_arch_timer(ctx, cntp_ctl_el0)); 1963 write_cntp_cval_el0(read_el1_ctx_arch_timer(ctx, cntp_cval_el0)); 1964 write_cntv_ctl_el0(read_el1_ctx_arch_timer(ctx, cntv_ctl_el0)); 1965 write_cntv_cval_el0(read_el1_ctx_arch_timer(ctx, cntv_cval_el0)); 1966 #endif 1967 1968 if (is_feat_mte2_supported()) { 1969 write_tfsre0_el1(read_el1_ctx_mte2(ctx, tfsre0_el1)); 1970 write_tfsr_el1(read_el1_ctx_mte2(ctx, tfsr_el1)); 1971 write_rgsr_el1(read_el1_ctx_mte2(ctx, rgsr_el1)); 1972 write_gcr_el1(read_el1_ctx_mte2(ctx, gcr_el1)); 1973 } 1974 1975 if (is_feat_ras_supported()) { 1976 write_disr_el1(read_el1_ctx_ras(ctx, disr_el1)); 1977 } 1978 1979 if (is_feat_s1pie_supported()) { 1980 write_pire0_el1(read_el1_ctx_s1pie(ctx, pire0_el1)); 1981 write_pir_el1(read_el1_ctx_s1pie(ctx, pir_el1)); 1982 } 1983 1984 if (is_feat_s1poe_supported()) { 1985 write_por_el1(read_el1_ctx_s1poe(ctx, por_el1)); 1986 } 1987 1988 if (is_feat_s2poe_supported()) { 1989 write_s2por_el1(read_el1_ctx_s2poe(ctx, s2por_el1)); 1990 } 1991 1992 if (is_feat_tcr2_supported()) { 1993 write_tcr2_el1(read_el1_ctx_tcr2(ctx, tcr2_el1)); 1994 } 1995 1996 if (is_feat_trf_supported()) { 1997 write_trfcr_el1(read_el1_ctx_trf(ctx, trfcr_el1)); 1998 } 1999 2000 if (is_feat_csv2_2_supported()) { 2001 write_scxtnum_el0(read_el1_ctx_csv2_2(ctx, scxtnum_el0)); 2002 write_scxtnum_el1(read_el1_ctx_csv2_2(ctx, scxtnum_el1)); 2003 } 2004 2005 if (is_feat_gcs_supported()) { 2006 write_gcscr_el1(read_el1_ctx_gcs(ctx, gcscr_el1)); 2007 write_gcscre0_el1(read_el1_ctx_gcs(ctx, gcscre0_el1)); 2008 write_gcspr_el1(read_el1_ctx_gcs(ctx, gcspr_el1)); 2009 write_gcspr_el0(read_el1_ctx_gcs(ctx, gcspr_el0)); 2010 } 2011 2012 if (is_feat_the_supported()) { 2013 write_rcwmask_el1(read_el1_ctx_the(ctx, rcwmask_el1)); 2014 write_rcwsmask_el1(read_el1_ctx_the(ctx, rcwsmask_el1)); 2015 } 2016 2017 if (is_feat_sctlr2_supported()) { 2018 write_sctlr2_el1(read_el1_ctx_sctlr2(ctx, sctlr2_el1)); 2019 } 2020 2021 if (is_feat_ls64_accdata_supported()) { 2022 write_accdata_el1(read_el1_ctx_ls64(ctx, accdata_el1)); 2023 } 2024 2025 if (is_feat_step2_supported()) { 2026 write_mdstepop_el1(read_el1_ctx_step2(ctx, mdstepop_el1)); 2027 } 2028 } 2029 2030 /******************************************************************************* 2031 * The next couple of functions are used by runtime services to save and restore 2032 * EL1 context on the 'cpu_context' structure for the specified security state. 2033 ******************************************************************************/ 2034 void cm_el1_sysregs_context_save(uint32_t security_state) 2035 { 2036 cpu_context_t *ctx; 2037 2038 ctx = cm_get_context(security_state); 2039 assert(ctx != NULL); 2040 2041 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); 2042 2043 #if IMAGE_BL31 2044 if (is_feat_amu_supported()) { 2045 cm_sysregs_context_save_amu(security_state); 2046 } 2047 2048 if (security_state == SECURE) { 2049 PUBLISH_EVENT(cm_exited_secure_world); 2050 } else { 2051 PUBLISH_EVENT(cm_exited_normal_world); 2052 } 2053 #endif 2054 } 2055 2056 void cm_el1_sysregs_context_restore(uint32_t security_state) 2057 { 2058 cpu_context_t *ctx; 2059 2060 ctx = cm_get_context(security_state); 2061 assert(ctx != NULL); 2062 2063 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); 2064 2065 #if IMAGE_BL31 2066 if (is_feat_amu_supported()) { 2067 cm_sysregs_context_restore_amu(security_state); 2068 } 2069 2070 if (security_state == SECURE) { 2071 PUBLISH_EVENT(cm_entering_secure_world); 2072 } else { 2073 PUBLISH_EVENT(cm_entering_normal_world); 2074 } 2075 #endif 2076 } 2077 2078 #endif /* ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) */ 2079 2080 /******************************************************************************* 2081 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the 2082 * given security state with the given entrypoint 2083 ******************************************************************************/ 2084 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) 2085 { 2086 cpu_context_t *ctx; 2087 el3_state_t *state; 2088 2089 ctx = cm_get_context(security_state); 2090 assert(ctx != NULL); 2091 2092 /* Populate EL3 state so that ERET jumps to the correct entry */ 2093 state = get_el3state_ctx(ctx); 2094 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 2095 } 2096 2097 /******************************************************************************* 2098 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' 2099 * pertaining to the given security state 2100 ******************************************************************************/ 2101 void cm_set_elr_spsr_el3(uint32_t security_state, 2102 uintptr_t entrypoint, uint32_t spsr) 2103 { 2104 cpu_context_t *ctx; 2105 el3_state_t *state; 2106 2107 ctx = cm_get_context(security_state); 2108 assert(ctx != NULL); 2109 2110 /* Populate EL3 state so that ERET jumps to the correct entry */ 2111 state = get_el3state_ctx(ctx); 2112 write_ctx_reg(state, CTX_ELR_EL3, entrypoint); 2113 write_ctx_reg(state, CTX_SPSR_EL3, spsr); 2114 } 2115 2116 /******************************************************************************* 2117 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' 2118 * pertaining to the given security state using the value and bit position 2119 * specified in the parameters. It preserves all other bits. 2120 ******************************************************************************/ 2121 void cm_write_scr_el3_bit(uint32_t security_state, 2122 uint32_t bit_pos, 2123 uint32_t value) 2124 { 2125 cpu_context_t *ctx; 2126 el3_state_t *state; 2127 u_register_t scr_el3; 2128 2129 ctx = cm_get_context(security_state); 2130 assert(ctx != NULL); 2131 2132 /* Ensure that the bit position is a valid one */ 2133 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); 2134 2135 /* Ensure that the 'value' is only a bit wide */ 2136 assert(value <= 1U); 2137 2138 /* 2139 * Get the SCR_EL3 value from the cpu context, clear the desired bit 2140 * and set it to its new value. 2141 */ 2142 state = get_el3state_ctx(ctx); 2143 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 2144 scr_el3 &= ~(1UL << bit_pos); 2145 scr_el3 |= (u_register_t)value << bit_pos; 2146 write_ctx_reg(state, CTX_SCR_EL3, scr_el3); 2147 } 2148 2149 /******************************************************************************* 2150 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the 2151 * given security state. 2152 ******************************************************************************/ 2153 u_register_t cm_get_scr_el3(uint32_t security_state) 2154 { 2155 const cpu_context_t *ctx; 2156 const el3_state_t *state; 2157 2158 ctx = cm_get_context(security_state); 2159 assert(ctx != NULL); 2160 2161 /* Populate EL3 state so that ERET jumps to the correct entry */ 2162 state = get_el3state_ctx(ctx); 2163 return read_ctx_reg(state, CTX_SCR_EL3); 2164 } 2165 2166 /******************************************************************************* 2167 * This function is used to program the context that's used for exception 2168 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for 2169 * the required security state 2170 ******************************************************************************/ 2171 void cm_set_next_eret_context(uint32_t security_state) 2172 { 2173 cpu_context_t *ctx; 2174 2175 ctx = cm_get_context(security_state); 2176 assert(ctx != NULL); 2177 2178 cm_set_next_context(ctx); 2179 } 2180