1/* 2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <context.h> 10 11 .global el1_sysregs_context_save 12 .global el1_sysregs_context_restore 13#if CTX_INCLUDE_FPREGS 14 .global fpregs_context_save 15 .global fpregs_context_restore 16#endif 17#if CTX_INCLUDE_PAUTH_REGS 18 .global pauth_context_restore 19 .global pauth_context_save 20#endif 21#if ENABLE_PAUTH 22 .global pauth_load_bl_apiakey 23#endif 24 .global save_gp_registers 25 .global restore_gp_registers 26 .global restore_gp_registers_eret 27 .global el3_exit 28 29/* ----------------------------------------------------- 30 * The following function strictly follows the AArch64 31 * PCS to use x9-x17 (temporary caller-saved registers) 32 * to save EL1 system register context. It assumes that 33 * 'x0' is pointing to a 'el1_sys_regs' structure where 34 * the register context will be saved. 35 * ----------------------------------------------------- 36 */ 37func el1_sysregs_context_save 38 39 mrs x9, spsr_el1 40 mrs x10, elr_el1 41 stp x9, x10, [x0, #CTX_SPSR_EL1] 42 43 mrs x15, sctlr_el1 44 mrs x16, actlr_el1 45 stp x15, x16, [x0, #CTX_SCTLR_EL1] 46 47 mrs x17, cpacr_el1 48 mrs x9, csselr_el1 49 stp x17, x9, [x0, #CTX_CPACR_EL1] 50 51 mrs x10, sp_el1 52 mrs x11, esr_el1 53 stp x10, x11, [x0, #CTX_SP_EL1] 54 55 mrs x12, ttbr0_el1 56 mrs x13, ttbr1_el1 57 stp x12, x13, [x0, #CTX_TTBR0_EL1] 58 59 mrs x14, mair_el1 60 mrs x15, amair_el1 61 stp x14, x15, [x0, #CTX_MAIR_EL1] 62 63 mrs x16, tcr_el1 64 mrs x17, tpidr_el1 65 stp x16, x17, [x0, #CTX_TCR_EL1] 66 67 mrs x9, tpidr_el0 68 mrs x10, tpidrro_el0 69 stp x9, x10, [x0, #CTX_TPIDR_EL0] 70 71 mrs x13, par_el1 72 mrs x14, far_el1 73 stp x13, x14, [x0, #CTX_PAR_EL1] 74 75 mrs x15, afsr0_el1 76 mrs x16, afsr1_el1 77 stp x15, x16, [x0, #CTX_AFSR0_EL1] 78 79 mrs x17, contextidr_el1 80 mrs x9, vbar_el1 81 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] 82 83 mrs x10, pmcr_el0 84 str x10, [x0, #CTX_PMCR_EL0] 85 86 /* Save AArch32 system registers if the build has instructed so */ 87#if CTX_INCLUDE_AARCH32_REGS 88 mrs x11, spsr_abt 89 mrs x12, spsr_und 90 stp x11, x12, [x0, #CTX_SPSR_ABT] 91 92 mrs x13, spsr_irq 93 mrs x14, spsr_fiq 94 stp x13, x14, [x0, #CTX_SPSR_IRQ] 95 96 mrs x15, dacr32_el2 97 mrs x16, ifsr32_el2 98 stp x15, x16, [x0, #CTX_DACR32_EL2] 99#endif 100 101 /* Save NS timer registers if the build has instructed so */ 102#if NS_TIMER_SWITCH 103 mrs x10, cntp_ctl_el0 104 mrs x11, cntp_cval_el0 105 stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] 106 107 mrs x12, cntv_ctl_el0 108 mrs x13, cntv_cval_el0 109 stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] 110 111 mrs x14, cntkctl_el1 112 str x14, [x0, #CTX_CNTKCTL_EL1] 113#endif 114 115 ret 116endfunc el1_sysregs_context_save 117 118/* ----------------------------------------------------- 119 * The following function strictly follows the AArch64 120 * PCS to use x9-x17 (temporary caller-saved registers) 121 * to restore EL1 system register context. It assumes 122 * that 'x0' is pointing to a 'el1_sys_regs' structure 123 * from where the register context will be restored 124 * ----------------------------------------------------- 125 */ 126func el1_sysregs_context_restore 127 128 ldp x9, x10, [x0, #CTX_SPSR_EL1] 129 msr spsr_el1, x9 130 msr elr_el1, x10 131 132 ldp x15, x16, [x0, #CTX_SCTLR_EL1] 133 msr sctlr_el1, x15 134 msr actlr_el1, x16 135 136 ldp x17, x9, [x0, #CTX_CPACR_EL1] 137 msr cpacr_el1, x17 138 msr csselr_el1, x9 139 140 ldp x10, x11, [x0, #CTX_SP_EL1] 141 msr sp_el1, x10 142 msr esr_el1, x11 143 144 ldp x12, x13, [x0, #CTX_TTBR0_EL1] 145 msr ttbr0_el1, x12 146 msr ttbr1_el1, x13 147 148 ldp x14, x15, [x0, #CTX_MAIR_EL1] 149 msr mair_el1, x14 150 msr amair_el1, x15 151 152 ldp x16, x17, [x0, #CTX_TCR_EL1] 153 msr tcr_el1, x16 154 msr tpidr_el1, x17 155 156 ldp x9, x10, [x0, #CTX_TPIDR_EL0] 157 msr tpidr_el0, x9 158 msr tpidrro_el0, x10 159 160 ldp x13, x14, [x0, #CTX_PAR_EL1] 161 msr par_el1, x13 162 msr far_el1, x14 163 164 ldp x15, x16, [x0, #CTX_AFSR0_EL1] 165 msr afsr0_el1, x15 166 msr afsr1_el1, x16 167 168 ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] 169 msr contextidr_el1, x17 170 msr vbar_el1, x9 171 172 ldr x10, [x0, #CTX_PMCR_EL0] 173 msr pmcr_el0, x10 174 175 /* Restore AArch32 system registers if the build has instructed so */ 176#if CTX_INCLUDE_AARCH32_REGS 177 ldp x11, x12, [x0, #CTX_SPSR_ABT] 178 msr spsr_abt, x11 179 msr spsr_und, x12 180 181 ldp x13, x14, [x0, #CTX_SPSR_IRQ] 182 msr spsr_irq, x13 183 msr spsr_fiq, x14 184 185 ldp x15, x16, [x0, #CTX_DACR32_EL2] 186 msr dacr32_el2, x15 187 msr ifsr32_el2, x16 188#endif 189 /* Restore NS timer registers if the build has instructed so */ 190#if NS_TIMER_SWITCH 191 ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] 192 msr cntp_ctl_el0, x10 193 msr cntp_cval_el0, x11 194 195 ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] 196 msr cntv_ctl_el0, x12 197 msr cntv_cval_el0, x13 198 199 ldr x14, [x0, #CTX_CNTKCTL_EL1] 200 msr cntkctl_el1, x14 201#endif 202 203 /* No explict ISB required here as ERET covers it */ 204 ret 205endfunc el1_sysregs_context_restore 206 207/* ----------------------------------------------------- 208 * The following function follows the aapcs_64 strictly 209 * to use x9-x17 (temporary caller-saved registers 210 * according to AArch64 PCS) to save floating point 211 * register context. It assumes that 'x0' is pointing to 212 * a 'fp_regs' structure where the register context will 213 * be saved. 214 * 215 * Access to VFP registers will trap if CPTR_EL3.TFP is 216 * set. However currently we don't use VFP registers 217 * nor set traps in Trusted Firmware, and assume it's 218 * cleared 219 * 220 * TODO: Revisit when VFP is used in secure world 221 * ----------------------------------------------------- 222 */ 223#if CTX_INCLUDE_FPREGS 224func fpregs_context_save 225 stp q0, q1, [x0, #CTX_FP_Q0] 226 stp q2, q3, [x0, #CTX_FP_Q2] 227 stp q4, q5, [x0, #CTX_FP_Q4] 228 stp q6, q7, [x0, #CTX_FP_Q6] 229 stp q8, q9, [x0, #CTX_FP_Q8] 230 stp q10, q11, [x0, #CTX_FP_Q10] 231 stp q12, q13, [x0, #CTX_FP_Q12] 232 stp q14, q15, [x0, #CTX_FP_Q14] 233 stp q16, q17, [x0, #CTX_FP_Q16] 234 stp q18, q19, [x0, #CTX_FP_Q18] 235 stp q20, q21, [x0, #CTX_FP_Q20] 236 stp q22, q23, [x0, #CTX_FP_Q22] 237 stp q24, q25, [x0, #CTX_FP_Q24] 238 stp q26, q27, [x0, #CTX_FP_Q26] 239 stp q28, q29, [x0, #CTX_FP_Q28] 240 stp q30, q31, [x0, #CTX_FP_Q30] 241 242 mrs x9, fpsr 243 str x9, [x0, #CTX_FP_FPSR] 244 245 mrs x10, fpcr 246 str x10, [x0, #CTX_FP_FPCR] 247 248#if CTX_INCLUDE_AARCH32_REGS 249 mrs x11, fpexc32_el2 250 str x11, [x0, #CTX_FP_FPEXC32_EL2] 251#endif 252 ret 253endfunc fpregs_context_save 254 255/* ----------------------------------------------------- 256 * The following function follows the aapcs_64 strictly 257 * to use x9-x17 (temporary caller-saved registers 258 * according to AArch64 PCS) to restore floating point 259 * register context. It assumes that 'x0' is pointing to 260 * a 'fp_regs' structure from where the register context 261 * will be restored. 262 * 263 * Access to VFP registers will trap if CPTR_EL3.TFP is 264 * set. However currently we don't use VFP registers 265 * nor set traps in Trusted Firmware, and assume it's 266 * cleared 267 * 268 * TODO: Revisit when VFP is used in secure world 269 * ----------------------------------------------------- 270 */ 271func fpregs_context_restore 272 ldp q0, q1, [x0, #CTX_FP_Q0] 273 ldp q2, q3, [x0, #CTX_FP_Q2] 274 ldp q4, q5, [x0, #CTX_FP_Q4] 275 ldp q6, q7, [x0, #CTX_FP_Q6] 276 ldp q8, q9, [x0, #CTX_FP_Q8] 277 ldp q10, q11, [x0, #CTX_FP_Q10] 278 ldp q12, q13, [x0, #CTX_FP_Q12] 279 ldp q14, q15, [x0, #CTX_FP_Q14] 280 ldp q16, q17, [x0, #CTX_FP_Q16] 281 ldp q18, q19, [x0, #CTX_FP_Q18] 282 ldp q20, q21, [x0, #CTX_FP_Q20] 283 ldp q22, q23, [x0, #CTX_FP_Q22] 284 ldp q24, q25, [x0, #CTX_FP_Q24] 285 ldp q26, q27, [x0, #CTX_FP_Q26] 286 ldp q28, q29, [x0, #CTX_FP_Q28] 287 ldp q30, q31, [x0, #CTX_FP_Q30] 288 289 ldr x9, [x0, #CTX_FP_FPSR] 290 msr fpsr, x9 291 292 ldr x10, [x0, #CTX_FP_FPCR] 293 msr fpcr, x10 294 295#if CTX_INCLUDE_AARCH32_REGS 296 ldr x11, [x0, #CTX_FP_FPEXC32_EL2] 297 msr fpexc32_el2, x11 298#endif 299 /* 300 * No explict ISB required here as ERET to 301 * switch to secure EL1 or non-secure world 302 * covers it 303 */ 304 305 ret 306endfunc fpregs_context_restore 307#endif /* CTX_INCLUDE_FPREGS */ 308 309#if CTX_INCLUDE_PAUTH_REGS 310/* ----------------------------------------------------- 311 * The following function strictly follows the AArch64 312 * PCS to use x9-x17 (temporary caller-saved registers) 313 * to save the ARMv8.3-PAuth register context. It assumes 314 * that 'sp' is pointing to a 'cpu_context_t' structure 315 * to where the register context will be saved. 316 * ----------------------------------------------------- 317 */ 318func pauth_context_save 319 add x11, sp, #CTX_PAUTH_REGS_OFFSET 320 321 mrs x9, APIAKeyLo_EL1 322 mrs x10, APIAKeyHi_EL1 323 stp x9, x10, [x11, #CTX_PACIAKEY_LO] 324 325 mrs x9, APIBKeyLo_EL1 326 mrs x10, APIBKeyHi_EL1 327 stp x9, x10, [x11, #CTX_PACIBKEY_LO] 328 329 mrs x9, APDAKeyLo_EL1 330 mrs x10, APDAKeyHi_EL1 331 stp x9, x10, [x11, #CTX_PACDAKEY_LO] 332 333 mrs x9, APDBKeyLo_EL1 334 mrs x10, APDBKeyHi_EL1 335 stp x9, x10, [x11, #CTX_PACDBKEY_LO] 336 337 mrs x9, APGAKeyLo_EL1 338 mrs x10, APGAKeyHi_EL1 339 stp x9, x10, [x11, #CTX_PACGAKEY_LO] 340 341 ret 342endfunc pauth_context_save 343 344/* ----------------------------------------------------- 345 * The following function strictly follows the AArch64 346 * PCS to use x9-x17 (temporary caller-saved registers) 347 * to restore the ARMv8.3-PAuth register context. It assumes 348 * that 'sp' is pointing to a 'cpu_context_t' structure 349 * from where the register context will be restored. 350 * ----------------------------------------------------- 351 */ 352func pauth_context_restore 353 add x11, sp, #CTX_PAUTH_REGS_OFFSET 354 355 ldp x9, x10, [x11, #CTX_PACIAKEY_LO] 356 msr APIAKeyLo_EL1, x9 357 msr APIAKeyHi_EL1, x10 358 359 ldp x9, x10, [x11, #CTX_PACIBKEY_LO] 360 msr APIBKeyLo_EL1, x9 361 msr APIBKeyHi_EL1, x10 362 363 ldp x9, x10, [x11, #CTX_PACDAKEY_LO] 364 msr APDAKeyLo_EL1, x9 365 msr APDAKeyHi_EL1, x10 366 367 ldp x9, x10, [x11, #CTX_PACDBKEY_LO] 368 msr APDBKeyLo_EL1, x9 369 msr APDBKeyHi_EL1, x10 370 371 ldp x9, x10, [x11, #CTX_PACGAKEY_LO] 372 msr APGAKeyLo_EL1, x9 373 msr APGAKeyHi_EL1, x10 374 375 ret 376endfunc pauth_context_restore 377#endif /* CTX_INCLUDE_PAUTH_REGS */ 378 379/* ----------------------------------------------------- 380 * The following function strictly follows the AArch64 381 * PCS to use x9-x17 (temporary caller-saved registers) 382 * to load the APIA key used by the firmware. 383 * ----------------------------------------------------- 384 */ 385#if ENABLE_PAUTH 386func pauth_load_bl_apiakey 387 /* Load instruction key A used by the Trusted Firmware. */ 388 adrp x11, plat_apiakey 389 add x11, x11, :lo12:plat_apiakey 390 ldp x9, x10, [x11, #0] 391 392 msr APIAKeyLo_EL1, x9 393 msr APIAKeyHi_EL1, x10 394 395 ret 396endfunc pauth_load_bl_apiakey 397#endif /* ENABLE_PAUTH */ 398 399/* ----------------------------------------------------- 400 * The following functions are used to save and restore 401 * all the general purpose registers. Ideally we would 402 * only save and restore the callee saved registers when 403 * a world switch occurs but that type of implementation 404 * is more complex. So currently we will always save and 405 * restore these registers on entry and exit of EL3. 406 * These are not macros to ensure their invocation fits 407 * within the 32 instructions per exception vector. 408 * clobbers: x18 409 * ----------------------------------------------------- 410 */ 411func save_gp_registers 412 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 413 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 414 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 415 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 416 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 417 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 418 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 419 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 420 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 421 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 422 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 423 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 424 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 425 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 426 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 427 mrs x18, sp_el0 428 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 429 ret 430endfunc save_gp_registers 431 432/* ----------------------------------------------------- 433 * This function restores all general purpose registers except x30 from the 434 * CPU context. x30 register must be explicitly restored by the caller. 435 * ----------------------------------------------------- 436 */ 437func restore_gp_registers 438 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 439 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 440 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 441 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 442 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 443 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 444 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 445 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 446 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 447 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 448 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 449 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 450 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 451 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 452 ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 453 msr sp_el0, x28 454 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 455 ret 456endfunc restore_gp_registers 457 458/* ----------------------------------------------------- 459 * Restore general purpose registers (including x30), and exit EL3 via ERET to 460 * a lower exception level. 461 * ----------------------------------------------------- 462 */ 463func restore_gp_registers_eret 464 bl restore_gp_registers 465 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 466 467#if IMAGE_BL31 && RAS_EXTENSION 468 /* 469 * Issue Error Synchronization Barrier to synchronize SErrors before 470 * exiting EL3. We're running with EAs unmasked, so any synchronized 471 * errors would be taken immediately; therefore no need to inspect 472 * DISR_EL1 register. 473 */ 474 esb 475#endif 476 eret 477endfunc restore_gp_registers_eret 478 479/* ----------------------------------------------------- 480 * This routine assumes that the SP_EL3 is pointing to 481 * a valid context structure from where the gp regs and 482 * other special registers can be retrieved. 483 * ----------------------------------------------------- 484 */ 485func el3_exit 486 /* ----------------------------------------------------- 487 * Save the current SP_EL0 i.e. the EL3 runtime stack 488 * which will be used for handling the next SMC. Then 489 * switch to SP_EL3 490 * ----------------------------------------------------- 491 */ 492 mov x17, sp 493 msr spsel, #1 494 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 495 496 /* ----------------------------------------------------- 497 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET 498 * ----------------------------------------------------- 499 */ 500 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 501 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 502 msr scr_el3, x18 503 msr spsr_el3, x16 504 msr elr_el3, x17 505 506#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 507 /* Restore mitigation state as it was on entry to EL3 */ 508 ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE] 509 cmp x17, xzr 510 beq 1f 511 blr x17 5121: 513#endif 514 515#if CTX_INCLUDE_PAUTH_REGS 516 /* Restore ARMv8.3-PAuth registers */ 517 bl pauth_context_restore 518#endif 519 520 /* Restore saved general purpose registers and return */ 521 b restore_gp_registers_eret 522endfunc el3_exit 523