1/* 2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <assert_macros.S> 10#include <context.h> 11#include <el3_common_macros.S> 12#include <lib/el3_runtime/cpu_data.h> 13#include <lib/per_cpu/per_cpu_macros.S> 14#include <platform_def.h> 15 16#if CTX_INCLUDE_FPREGS 17 .global fpregs_context_save 18 .global fpregs_context_restore 19#endif /* CTX_INCLUDE_FPREGS */ 20 21#if CTX_INCLUDE_SVE_REGS 22 .global sve_context_save 23 .global sve_context_restore 24#endif /* CTX_INCLUDE_SVE_REGS */ 25 26#if ERRATA_SPECULATIVE_AT 27 .global save_and_update_ptw_el1_sys_regs 28#endif /* ERRATA_SPECULATIVE_AT */ 29 30 .global prepare_el3_entry 31 .global restore_el3_runtime_regs 32 .global el3_exit 33 34/* Following macros will be used if any of CTX_INCLUDE_FPREGS or CTX_INCLUDE_SVE_REGS is enabled */ 35#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 36.macro fpregs_state_save base:req hold:req 37 mrs \hold, fpsr 38 str \hold, [\base, #CTX_SIMD_FPSR] 39 40 mrs \hold, fpcr 41 str \hold, [\base, #CTX_SIMD_FPCR] 42 43#if CTX_INCLUDE_AARCH32_REGS && CTX_INCLUDE_FPREGS 44 mrs \hold, fpexc32_el2 45 str \hold, [\base, #CTX_SIMD_FPEXC32] 46#endif 47.endm 48 49.macro fpregs_state_restore base:req hold:req 50 ldr \hold, [\base, #CTX_SIMD_FPSR] 51 msr fpsr, \hold 52 53 ldr \hold, [\base, #CTX_SIMD_FPCR] 54 msr fpcr, \hold 55 56#if CTX_INCLUDE_AARCH32_REGS && CTX_INCLUDE_FPREGS 57 ldr \hold, [\base, #CTX_SIMD_FPEXC32] 58 msr fpexc32_el2, \hold 59#endif 60.endm 61 62#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */ 63 64/* ------------------------------------------------------------------ 65 * The following function follows the aapcs_64 strictly to use 66 * x9-x17 (temporary caller-saved registers according to AArch64 PCS) 67 * to save floating point register context. It assumes that 'x0' is 68 * pointing to a 'fp_regs' structure where the register context will 69 * be saved. 70 * 71 * Access to VFP registers will trap if CPTR_EL3.TFP is set. 72 * However currently we don't use VFP registers nor set traps in 73 * Trusted Firmware, and assume it's cleared. 74 * 75 * TODO: Revisit when VFP is used in secure world 76 * ------------------------------------------------------------------ 77 */ 78#if CTX_INCLUDE_FPREGS 79func fpregs_context_save 80.arch_extension fp 81 /* Temporarily enable floating point */ 82 83 /* Save x0 and pass its original value to fpregs_state_save */ 84 mov x1, x0 85 86 stp q0, q1, [x0], #32 87 stp q2, q3, [x0], #32 88 stp q4, q5, [x0], #32 89 stp q6, q7, [x0], #32 90 stp q8, q9, [x0], #32 91 stp q10, q11, [x0], #32 92 stp q12, q13, [x0], #32 93 stp q14, q15, [x0], #32 94 stp q16, q17, [x0], #32 95 stp q18, q19, [x0], #32 96 stp q20, q21, [x0], #32 97 stp q22, q23, [x0], #32 98 stp q24, q25, [x0], #32 99 stp q26, q27, [x0], #32 100 stp q28, q29, [x0], #32 101 stp q30, q31, [x0], #32 102 103 fpregs_state_save x1, x9 104 105.arch_extension nofp 106 ret 107endfunc fpregs_context_save 108 109/* ------------------------------------------------------------------ 110 * The following function follows the aapcs_64 strictly to use x9-x17 111 * (temporary caller-saved registers according to AArch64 PCS) to 112 * restore floating point register context. It assumes that 'x0' is 113 * pointing to a 'fp_regs' structure from where the register context 114 * will be restored. 115 * 116 * Access to VFP registers will trap if CPTR_EL3.TFP is set. 117 * However currently we don't use VFP registers nor set traps in 118 * Trusted Firmware, and assume it's cleared. 119 * 120 * TODO: Revisit when VFP is used in secure world 121 * ------------------------------------------------------------------ 122 */ 123func fpregs_context_restore 124.arch_extension fp 125 /* Temporarily enable floating point */ 126 127 /* Save x0 and pass its original value to fpregs_state_restore */ 128 mov x1, x0 129 130 ldp q0, q1, [x0], #32 131 ldp q2, q3, [x0], #32 132 ldp q4, q5, [x0], #32 133 ldp q6, q7, [x0], #32 134 ldp q8, q9, [x0], #32 135 ldp q10, q11, [x0], #32 136 ldp q12, q13, [x0], #32 137 ldp q14, q15, [x0], #32 138 ldp q16, q17, [x0], #32 139 ldp q18, q19, [x0], #32 140 ldp q20, q21, [x0], #32 141 ldp q22, q23, [x0], #32 142 ldp q24, q25, [x0], #32 143 ldp q26, q27, [x0], #32 144 ldp q28, q29, [x0], #32 145 ldp q30, q31, [x0], #32 146 147 fpregs_state_restore x1, x9 148 149.arch_extension nofp 150 ret 151endfunc fpregs_context_restore 152#endif /* CTX_INCLUDE_FPREGS */ 153 154#if CTX_INCLUDE_SVE_REGS 155/* 156 * Helper macros for SVE predicates save/restore operations. 157 */ 158.macro sve_predicate_op op:req reg:req 159 \op p0, [\reg, #0, MUL VL] 160 \op p1, [\reg, #1, MUL VL] 161 \op p2, [\reg, #2, MUL VL] 162 \op p3, [\reg, #3, MUL VL] 163 \op p4, [\reg, #4, MUL VL] 164 \op p5, [\reg, #5, MUL VL] 165 \op p6, [\reg, #6, MUL VL] 166 \op p7, [\reg, #7, MUL VL] 167 \op p8, [\reg, #8, MUL VL] 168 \op p9, [\reg, #9, MUL VL] 169 \op p10, [\reg, #10, MUL VL] 170 \op p11, [\reg, #11, MUL VL] 171 \op p12, [\reg, #12, MUL VL] 172 \op p13, [\reg, #13, MUL VL] 173 \op p14, [\reg, #14, MUL VL] 174 \op p15, [\reg, #15, MUL VL] 175.endm 176 177.macro sve_vectors_op op:req reg:req 178 \op z0, [\reg, #0, MUL VL] 179 \op z1, [\reg, #1, MUL VL] 180 \op z2, [\reg, #2, MUL VL] 181 \op z3, [\reg, #3, MUL VL] 182 \op z4, [\reg, #4, MUL VL] 183 \op z5, [\reg, #5, MUL VL] 184 \op z6, [\reg, #6, MUL VL] 185 \op z7, [\reg, #7, MUL VL] 186 \op z8, [\reg, #8, MUL VL] 187 \op z9, [\reg, #9, MUL VL] 188 \op z10, [\reg, #10, MUL VL] 189 \op z11, [\reg, #11, MUL VL] 190 \op z12, [\reg, #12, MUL VL] 191 \op z13, [\reg, #13, MUL VL] 192 \op z14, [\reg, #14, MUL VL] 193 \op z15, [\reg, #15, MUL VL] 194 \op z16, [\reg, #16, MUL VL] 195 \op z17, [\reg, #17, MUL VL] 196 \op z18, [\reg, #18, MUL VL] 197 \op z19, [\reg, #19, MUL VL] 198 \op z20, [\reg, #20, MUL VL] 199 \op z21, [\reg, #21, MUL VL] 200 \op z22, [\reg, #22, MUL VL] 201 \op z23, [\reg, #23, MUL VL] 202 \op z24, [\reg, #24, MUL VL] 203 \op z25, [\reg, #25, MUL VL] 204 \op z26, [\reg, #26, MUL VL] 205 \op z27, [\reg, #27, MUL VL] 206 \op z28, [\reg, #28, MUL VL] 207 \op z29, [\reg, #29, MUL VL] 208 \op z30, [\reg, #30, MUL VL] 209 \op z31, [\reg, #31, MUL VL] 210.endm 211 212/* ------------------------------------------------------------------ 213 * The following function follows the aapcs_64 strictly to use x9-x17 214 * (temporary caller-saved registers according to AArch64 PCS) to 215 * restore SVE register context. It assumes that 'x0' is 216 * pointing to a 'sve_regs_t' structure to which the register context 217 * will be saved. 218 * ------------------------------------------------------------------ 219 */ 220func sve_context_save 221.arch_extension sve 222 /* Predicate registers */ 223 mov x13, #CTX_SIMD_PREDICATES 224 add x9, x0, x13 225 sve_predicate_op str, x9 226 227 /* Save FFR after predicates */ 228 mov x13, #CTX_SIMD_FFR 229 add x9, x0, x13 230 rdffr p0.b 231 str p0, [x9] 232 233 /* Save vector registers */ 234 mov x13, #CTX_SIMD_VECTORS 235 add x9, x0, x13 236 sve_vectors_op str, x9 237.arch_extension nosve 238 239 /* Save FPSR, FPCR and FPEXC32 */ 240 fpregs_state_save x0, x9 241 242 ret 243endfunc sve_context_save 244 245/* ------------------------------------------------------------------ 246 * The following function follows the aapcs_64 strictly to use x9-x17 247 * (temporary caller-saved registers according to AArch64 PCS) to 248 * restore SVE register context. It assumes that 'x0' is pointing to 249 * a 'sve_regs_t' structure from where the register context will be 250 * restored. 251 * ------------------------------------------------------------------ 252 */ 253func sve_context_restore 254.arch_extension sve 255 /* Restore FFR register before predicates */ 256 mov x13, #CTX_SIMD_FFR 257 add x9, x0, x13 258 ldr p0, [x9] 259 wrffr p0.b 260 261 /* Restore predicate registers */ 262 mov x13, #CTX_SIMD_PREDICATES 263 add x9, x0, x13 264 sve_predicate_op ldr, x9 265 266 /* Restore vector registers */ 267 mov x13, #CTX_SIMD_VECTORS 268 add x9, x0, x13 269 sve_vectors_op ldr, x9 270.arch_extension nosve 271 272 /* Restore FPSR, FPCR and FPEXC32 */ 273 fpregs_state_restore x0, x9 274 ret 275endfunc sve_context_restore 276#endif /* CTX_INCLUDE_SVE_REGS */ 277 278 /* 279 * Set the PSTATE bits not set when the exception was taken as 280 * described in the AArch64.TakeException() pseudocode function 281 * in ARM DDI 0487F.c page J1-7635 to a default value. 282 */ 283 .macro set_unset_pstate_bits 284 /* 285 * If Data Independent Timing (DIT) functionality is implemented, 286 * always enable DIT in EL3 287 */ 288#if ENABLE_FEAT_DIT 289#if ENABLE_FEAT_DIT >= 2 290 mrs x8, id_aa64pfr0_el1 291 and x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT) 292 cbz x8, 1f 293#endif 294 mov x8, #DIT_BIT 295 msr DIT, x8 2961: 297#endif /* ENABLE_FEAT_DIT */ 298 .endm /* set_unset_pstate_bits */ 299 300/*------------------------------------------------------------------------- 301 * This macro checks the ENABLE_FEAT_MPAM state, performs ID register 302 * check to see if the platform supports MPAM extension and restores MPAM3 303 * register value if it is FEAT_STATE_ENABLED/FEAT_STATE_CHECKED. 304 * 305 * This is particularly more complicated because we can't check 306 * if the platform supports MPAM by looking for status of a particular bit 307 * in the MDCR_EL3 or CPTR_EL3 register like other extensions. 308 * ------------------------------------------------------------------------ 309 */ 310 311 .macro restore_mpam3_el3 312#if ENABLE_FEAT_MPAM 313#if ENABLE_FEAT_MPAM >= 2 314 mrs x8, id_aa64pfr0_el1 315 lsr x8, x8, #(ID_AA64PFR0_MPAM_SHIFT) 316 and x8, x8, #(ID_AA64PFR0_MPAM_MASK) 317 mrs x7, id_aa64pfr1_el1 318 lsr x7, x7, #(ID_AA64PFR1_MPAM_FRAC_SHIFT) 319 and x7, x7, #(ID_AA64PFR1_MPAM_FRAC_MASK) 320 orr x7, x7, x8 321 cbz x7, no_mpam 322#endif 323 /* ----------------------------------------------------------- 324 * Restore MPAM3_EL3 register as per context state 325 * Currently we only enable MPAM for NS world and trap to EL3 326 * for MPAM access in lower ELs of Secure and Realm world 327 * x9 holds address of the per_world context 328 * ----------------------------------------------------------- 329 */ 330 331 ldr x17, [x9, #CTX_MPAM3_EL3] 332 msr S3_6_C10_C5_0, x17 /* mpam3_el3 */ 333 334no_mpam: 335#endif 336 .endm /* restore_mpam3_el3 */ 337 338/* ------------------------------------------------------------------ 339 * The following macro is used to save all the general purpose 340 * registers and swap the FEAT_PAUTH keys with BL31's keys in 341 * cpu_data. It also checks if the Secure Cycle Counter (PMCCNTR_EL0) 342 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0 needs 343 * not to be saved/restored during world switch. It also disables 344 * ISNT_RETIRED and STALL_BACKEND_MEM counters. They are not used for 345 * critical perfromance modelling but may provide a side channel for 346 * sensitive timing information, especially in secure world. 347 * CPU_CYCLES and CNT_CYCLES are essential for performance, fiddling 348 * with them could produce system jitter. They do not provide any 349 * novel timing information so they are always kept on. 350 * 351 * Ideally we would only save and restore the callee saved registers 352 * when a world switch occurs but that type of implementation is more 353 * complex. So currently we will always save and restore these 354 * registers on entry and exit of EL3. 355 * clobbers: x18 356 * ------------------------------------------------------------------ 357 */ 358 .macro save_el3_runtime_regs 359 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 360 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 361 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 362 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 363 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 364 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 365 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 366 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 367 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 368 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 369 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 370 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 371 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 372 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 373 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 374 mrs x18, sp_el0 375 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 376 377#if ENABLE_FEAT_AMU 378#if ENABLE_FEAT_AMU == 2 379 is_feat_amu_present_asm x9 380 beq no_amu_save_\@ 381#endif 382 /* Rely on the isb in setup_el3_execution_context. */ 383 mov_imm x9, AMCNTENCLR0_EL0_Pn_CONTEXTED 384 msr AMCNTENCLR0_EL0, x9 385no_amu_save_\@: 386#endif 387 388 /* PMUv3 is presumed to be always present */ 389 mrs x9, pmcr_el0 390 str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0] 391#if CTX_INCLUDE_PAUTH_REGS 392#if CTX_INCLUDE_PAUTH_REGS == 2 393 /* Skip if not present in hardware */ 394 is_feat_pauth_present_asm x9, x10 395 beq no_pauth_\@ 396#endif 397 /* ---------------------------------------------------------- 398 * Save the ARMv8.3-PAuth keys as they are not banked 399 * by exception level 400 * ---------------------------------------------------------- 401 */ 402 add x19, sp, #CTX_PAUTH_REGS_OFFSET 403 404 mrs x20, APIAKeyLo_EL1 /* x21:x20 = APIAKey */ 405 mrs x21, APIAKeyHi_EL1 406 mrs x22, APIBKeyLo_EL1 /* x23:x22 = APIBKey */ 407 mrs x23, APIBKeyHi_EL1 408 mrs x24, APDAKeyLo_EL1 /* x25:x24 = APDAKey */ 409 mrs x25, APDAKeyHi_EL1 410 mrs x26, APDBKeyLo_EL1 /* x27:x26 = APDBKey */ 411 mrs x27, APDBKeyHi_EL1 412 mrs x28, APGAKeyLo_EL1 /* x29:x28 = APGAKey */ 413 mrs x29, APGAKeyHi_EL1 414 415 stp x20, x21, [x19, #CTX_PACIAKEY_LO] 416 stp x22, x23, [x19, #CTX_PACIBKEY_LO] 417 stp x24, x25, [x19, #CTX_PACDAKEY_LO] 418 stp x26, x27, [x19, #CTX_PACDBKEY_LO] 419 stp x28, x29, [x19, #CTX_PACGAKEY_LO] 420#if ENABLE_PAUTH 421#if IMAGE_BL31 422 /* tpidr_el3 contains the address of the cpu_data structure */ 423 per_cpu_cur percpu_data, x9, x10 424 /* Load APIAKey from cpu_data */ 425 ldp x10, x11, [x9, #CPU_DATA_APIAKEY] 426#endif /* IMAGE_BL31 */ 427 428#if IMAGE_BL1 429 /* BL1 does not use cpu_data and has dedicated storage */ 430 adr_l x9, bl1_apiakey 431 ldp x10, x11, [x9] 432#endif /* IMAGE_BL1 */ 433 434 /* Program instruction key A */ 435 msr APIAKeyLo_EL1, x10 436 msr APIAKeyHi_EL1, x11 437no_pauth_\@: 438#endif /* ENABLE_PAUTH */ 439#endif /* CTX_INCLUDE_PAUTH_REGS */ 440 441 /* write ddc_el3 to ddc_el0 so it can be used with sp_el0 */ 442#if ENABLE_FEAT_MORELLO 443#if ENABLE_FEAT_MORELLO == 2 444 is_feat_morello_present_asm x10 445 cbz x10, 1f 446#endif /* ENABLE_FEAT_MORELLO == 2 */ 447 /* Save DDC_EL0 */ 448 mrs c24, ddc_el0 449 str c24, [sp, #CTX_DDC_OFFSET + CTX_DDC_EL0] 450 mrs c26, ddc /* ddc_el3 */ 451 msr ddc_el0, c26 4521: 453#endif /* ENABLE_FEAT_MORELLO */ 454 .endm /* save_el3_runtime_regs */ 455 456/* ----------------------------------------------------------------- 457 * This function saves the context and sets the PSTATE to a known 458 * state, preparing entry to el3. 459 * Save all the general purpose and ARMv8.3-PAuth (if enabled) 460 * registers. 461 * Then set any of the PSTATE bits that are not set by hardware 462 * according to the Aarch64.TakeException pseudocode in the Arm 463 * Architecture Reference Manual to a default value for EL3. 464 * clobbers: all registers. Will save lower EL's register contents. 465 * returns: a pointer to the current context structure (from sp_el3) 466 * ----------------------------------------------------------------- 467 */ 468func prepare_el3_entry 469 /* 470 * context is about to mutate, so make sure we don't affect any still 471 * in-flight profiling operations. We don't care that they actually 472 * finish, that can still be later. NOP if not present 473 */ 474#if ENABLE_SPE_FOR_NS 475 psb_csync 476#endif 477#if ENABLE_TRBE_FOR_NS 478 tsb_csync 479#endif 480 isb 481 save_el3_runtime_regs 482 setup_el3_execution_context 483 484 /* x0 will point to the context structure (SP_EL3) */ 485 mov x0, sp 486 487 /* Save the SPSR_EL3 and ELR_EL3 so that el3_exit works */ 488 mrs x28, spsr_el3 489 mrs x29, elr_el3 490 stp x28, x29, [x0, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 491 492 /* 493 * Restore the saved C runtime stack value which will become the new 494 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 495 * structure prior to the last ERET from EL3. 496 */ 497 ldr x27, [x0, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 498 499 /* Switch to SP_EL0 */ 500 msr spsel, #MODE_SP_EL0 501 mov sp, x27 502 503 ret 504endfunc prepare_el3_entry 505 506/* ------------------------------------------------------------------ 507 * This function restores ARMv8.3-PAuth (if enabled) and all general 508 * purpose registers except x30 from the CPU context. It also turns on 509 * all AMU counters that were stopped on save. 510 * x30 register must be explicitly restored by the caller. 511 * ------------------------------------------------------------------ 512 */ 513func restore_el3_runtime_regs 514#if CTX_INCLUDE_PAUTH_REGS 515#if CTX_INCLUDE_PAUTH_REGS == 2 516 /* Skip if not present in hardware */ 517 is_feat_pauth_present_asm x0, x1 518 beq no_pauth 519#endif 520 /* Restore the ARMv8.3 PAuth keys */ 521 add x10, sp, #CTX_PAUTH_REGS_OFFSET 522 523 ldp x0, x1, [x10, #CTX_PACIAKEY_LO] /* x1:x0 = APIAKey */ 524 ldp x2, x3, [x10, #CTX_PACIBKEY_LO] /* x3:x2 = APIBKey */ 525 ldp x4, x5, [x10, #CTX_PACDAKEY_LO] /* x5:x4 = APDAKey */ 526 ldp x6, x7, [x10, #CTX_PACDBKEY_LO] /* x7:x6 = APDBKey */ 527 ldp x8, x9, [x10, #CTX_PACGAKEY_LO] /* x9:x8 = APGAKey */ 528 529 msr APIAKeyLo_EL1, x0 530 msr APIAKeyHi_EL1, x1 531 msr APIBKeyLo_EL1, x2 532 msr APIBKeyHi_EL1, x3 533 msr APDAKeyLo_EL1, x4 534 msr APDAKeyHi_EL1, x5 535 msr APDBKeyLo_EL1, x6 536 msr APDBKeyHi_EL1, x7 537 msr APGAKeyLo_EL1, x8 538 msr APGAKeyHi_EL1, x9 539no_pauth: 540#endif /* CTX_INCLUDE_PAUTH_REGS */ 541 542#if ENABLE_FEAT_MORELLO 543#if ENABLE_FEAT_MORELLO == 2 544 is_feat_morello_present_asm x10 545 cbz x10, 1f 546#endif /* ENABLE_FEAT_MORELLO == 2 */ 547 /* Restore the saved DDC_EL0 value. */ 548 ldr c24, [sp, #CTX_DDC_OFFSET + CTX_DDC_EL0] 549 msr ddc_el0, c24 5501: 551#endif /* ENABLE_FEAT_MORELLO */ 552 553#if ENABLE_FEAT_AMU 554#if ENABLE_FEAT_AMU == 2 555 is_feat_amu_present_asm x9 556 beq no_amu_restore 557#endif 558 /* enable all group 0 counters. On an SMC this will be the counters we 559 * disabled, on warmboot it will be all counters. Cold boot enabled them 560 * early. */ 561 mov_imm x9, AMCNTENSET0_EL0_Pn_ALL 562 msr AMCNTENSET0_EL0, x9 563 /* no isb, rely on the eret that follows soon */ 564no_amu_restore: 565#endif 566 /* PMUv3 is presumed to be always present */ 567 ldr x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0] 568 msr pmcr_el0, x0 569 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 570 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 571 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 572 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 573 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 574 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 575 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 576 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 577 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 578 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 579 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 580 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 581 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 582 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 583 ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 584 msr sp_el0, x28 585 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 586 ret 587endfunc restore_el3_runtime_regs 588 589#if ERRATA_SPECULATIVE_AT 590/* -------------------------------------------------------------------- 591 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1 592 * registers and update EL1 registers to disable stage1 and stage2 593 * page table walk. 594 * -------------------------------------------------------------------- 595 */ 596func save_and_update_ptw_el1_sys_regs 597 /* ---------------------------------------------------------- 598 * Save only sctlr_el1 and tcr_el1 registers 599 * ---------------------------------------------------------- 600 */ 601 mrs x29, sctlr_el1 602 str x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1)] 603 mrs x29, tcr_el1 604 str x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_TCR_EL1)] 605 606 /* ------------------------------------------------------------ 607 * Must follow below order in order to disable page table 608 * walk for lower ELs (EL1 and EL0). First step ensures that 609 * page table walk is disabled for stage1 and second step 610 * ensures that page table walker should use TCR_EL1.EPDx 611 * bits to perform address translation. ISB ensures that CPU 612 * does these 2 steps in order. 613 * 614 * 1. Update TCR_EL1.EPDx bits to disable page table walk by 615 * stage1. 616 * 2. Enable MMU bit to avoid identity mapping via stage2 617 * and force TCR_EL1.EPDx to be used by the page table 618 * walker. 619 * ------------------------------------------------------------ 620 */ 621 orr x29, x29, #(TCR_EPD0_BIT) 622 orr x29, x29, #(TCR_EPD1_BIT) 623 msr tcr_el1, x29 624 isb 625 mrs x29, sctlr_el1 626 orr x29, x29, #SCTLR_M_BIT 627 msr sctlr_el1, x29 628 isb 629 ret 630endfunc save_and_update_ptw_el1_sys_regs 631 632#endif /* ERRATA_SPECULATIVE_AT */ 633 634/* ----------------------------------------------------------------- 635* The below macro returns the address of the per_world context for 636* the security state, retrieved through "get_security_state" macro. 637* The per_world context address is returned in the register argument. 638* Clobbers: x9, x10 639* ------------------------------------------------------------------ 640*/ 641 642.macro get_per_world_context _reg:req 643 ldr x10, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 644 get_security_state x9, x10 645 mov_imm x10, (CTX_PERWORLD_EL3STATE_END - CTX_CPTR_EL3) 646 mul x9, x9, x10 647 adrp x10, per_world_context 648 add x10, x10, :lo12:per_world_context 649 add x9, x9, x10 650 mov \_reg, x9 651.endm 652 653/* ------------------------------------------------------------------ 654 * This routine assumes that the SP_EL3 is pointing to a valid 655 * context structure from where the gp regs and other special 656 * registers can be retrieved. 657 * ------------------------------------------------------------------ 658 */ 659func el3_exit 660#if ENABLE_ASSERTIONS 661 /* el3_exit assumes SP_EL0 on entry */ 662 mrs x17, spsel 663 cmp x17, #MODE_SP_EL0 664 ASM_ASSERT(eq) 665#endif /* ENABLE_ASSERTIONS */ 666 667 /* ---------------------------------------------------------- 668 * Save the current SP_EL0 i.e. the EL3 runtime stack which 669 * will be used for handling the next SMC. 670 * Then switch to SP_EL3. 671 * ---------------------------------------------------------- 672 */ 673 mov x17, sp 674 msr spsel, #MODE_SP_ELX 675 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 676 677 /* ---------------------------------------------------------- 678 * Restore CPTR_EL3. 679 * ---------------------------------------------------------- */ 680 681 /* The address of the per_world context is stored in x9 */ 682 get_per_world_context x9 683 684 ldp x19, x20, [x9, #CTX_CPTR_EL3] 685 msr cptr_el3, x19 686 687#if IMAGE_BL31 688 restore_mpam3_el3 689 690#endif /* IMAGE_BL31 */ 691 692#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 693 /* ---------------------------------------------------------- 694 * Restore mitigation state as it was on entry to EL3 695 * ---------------------------------------------------------- 696 */ 697 ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE] 698 cbz x17, 1f 699 blr x17 7001: 701#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */ 702 703#if IMAGE_BL31 704 synchronize_errors 705#endif /* IMAGE_BL31 */ 706 707 /* -------------------------------------------------------------- 708 * Restore MDCR_EL3, SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET 709 * -------------------------------------------------------------- 710 */ 711 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 712 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 713 ldr x19, [sp, #CTX_EL3STATE_OFFSET + CTX_MDCR_EL3] 714 msr spsr_el3, x16 715 msr_wide_reg elr_el3, 17 716 msr scr_el3, x18 717 msr mdcr_el3, x19 718 719 restore_ptw_el1_sys_regs 720 721 /* ---------------------------------------------------------- 722 * Restore general purpose (including x30), PMCR_EL0 and 723 * ARMv8.3-PAuth registers. 724 * Exit EL3 via ERET to a lower exception level. 725 * ---------------------------------------------------------- 726 */ 727 bl restore_el3_runtime_regs 728 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 729 730#ifdef IMAGE_BL31 731 /* Clear the EL3 flag as we are exiting el3 */ 732 str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 733#endif /* IMAGE_BL31 */ 734 735 exception_return 736 737endfunc el3_exit 738