1/* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <context.h> 10 11 .global el1_sysregs_context_save 12 .global el1_sysregs_context_save_post_ops 13 .global el1_sysregs_context_restore 14#if CTX_INCLUDE_FPREGS 15 .global fpregs_context_save 16 .global fpregs_context_restore 17#endif 18 .global save_gp_registers 19 .global restore_gp_registers_eret 20 .global restore_gp_registers_callee_eret 21 .global el3_exit 22 23/* ----------------------------------------------------- 24 * The following function strictly follows the AArch64 25 * PCS to use x9-x17 (temporary caller-saved registers) 26 * to save EL1 system register context. It assumes that 27 * 'x0' is pointing to a 'el1_sys_regs' structure where 28 * the register context will be saved. 29 * ----------------------------------------------------- 30 */ 31func el1_sysregs_context_save 32 33 mrs x9, spsr_el1 34 mrs x10, elr_el1 35 stp x9, x10, [x0, #CTX_SPSR_EL1] 36 37 mrs x15, sctlr_el1 38 mrs x16, actlr_el1 39 stp x15, x16, [x0, #CTX_SCTLR_EL1] 40 41 mrs x17, cpacr_el1 42 mrs x9, csselr_el1 43 stp x17, x9, [x0, #CTX_CPACR_EL1] 44 45 mrs x10, sp_el1 46 mrs x11, esr_el1 47 stp x10, x11, [x0, #CTX_SP_EL1] 48 49 mrs x12, ttbr0_el1 50 mrs x13, ttbr1_el1 51 stp x12, x13, [x0, #CTX_TTBR0_EL1] 52 53 mrs x14, mair_el1 54 mrs x15, amair_el1 55 stp x14, x15, [x0, #CTX_MAIR_EL1] 56 57 mrs x16, tcr_el1 58 mrs x17, tpidr_el1 59 stp x16, x17, [x0, #CTX_TCR_EL1] 60 61 mrs x9, tpidr_el0 62 mrs x10, tpidrro_el0 63 stp x9, x10, [x0, #CTX_TPIDR_EL0] 64 65 mrs x13, par_el1 66 mrs x14, far_el1 67 stp x13, x14, [x0, #CTX_PAR_EL1] 68 69 mrs x15, afsr0_el1 70 mrs x16, afsr1_el1 71 stp x15, x16, [x0, #CTX_AFSR0_EL1] 72 73 mrs x17, contextidr_el1 74 mrs x9, vbar_el1 75 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] 76 77 /* Save AArch32 system registers if the build has instructed so */ 78#if CTX_INCLUDE_AARCH32_REGS 79 mrs x11, spsr_abt 80 mrs x12, spsr_und 81 stp x11, x12, [x0, #CTX_SPSR_ABT] 82 83 mrs x13, spsr_irq 84 mrs x14, spsr_fiq 85 stp x13, x14, [x0, #CTX_SPSR_IRQ] 86 87 mrs x15, dacr32_el2 88 mrs x16, ifsr32_el2 89 stp x15, x16, [x0, #CTX_DACR32_EL2] 90 91 mrs x17, fpexc32_el2 92 str x17, [x0, #CTX_FP_FPEXC32_EL2] 93#endif 94 95 /* Save NS timer registers if the build has instructed so */ 96#if NS_TIMER_SWITCH 97 mrs x10, cntp_ctl_el0 98 mrs x11, cntp_cval_el0 99 stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] 100 101 mrs x12, cntv_ctl_el0 102 mrs x13, cntv_cval_el0 103 stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] 104 105 mrs x14, cntkctl_el1 106 str x14, [x0, #CTX_CNTKCTL_EL1] 107#endif 108 109 ret 110endfunc el1_sysregs_context_save 111 112/* ----------------------------------------------------- 113 * The following function strictly follows the AArch64 114 * PCS to use x9-x17 (temporary caller-saved registers) 115 * to do post operations after saving the EL1 system 116 * register context. 117 * ----------------------------------------------------- 118 */ 119func el1_sysregs_context_save_post_ops 120#if ENABLE_SPE_FOR_LOWER_ELS 121 /* Detect if SPE is implemented */ 122 mrs x9, id_aa64dfr0_el1 123 ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH 124 cmp x9, #0x1 125 b.ne 1f 126 127 /* 128 * Before switching from normal world to secure world 129 * the profiling buffers need to be drained out to memory. This is 130 * required to avoid an invalid memory access when TTBR is switched 131 * for entry to SEL1. 132 */ 133 .arch armv8.2-a+profile 134 psb csync 135 dsb nsh 136 .arch armv8-a 1371: 138#endif 139 ret 140endfunc el1_sysregs_context_save_post_ops 141 142/* ----------------------------------------------------- 143 * The following function strictly follows the AArch64 144 * PCS to use x9-x17 (temporary caller-saved registers) 145 * to restore EL1 system register context. It assumes 146 * that 'x0' is pointing to a 'el1_sys_regs' structure 147 * from where the register context will be restored 148 * ----------------------------------------------------- 149 */ 150func el1_sysregs_context_restore 151 152 ldp x9, x10, [x0, #CTX_SPSR_EL1] 153 msr spsr_el1, x9 154 msr elr_el1, x10 155 156 ldp x15, x16, [x0, #CTX_SCTLR_EL1] 157 msr sctlr_el1, x15 158 msr actlr_el1, x16 159 160 ldp x17, x9, [x0, #CTX_CPACR_EL1] 161 msr cpacr_el1, x17 162 msr csselr_el1, x9 163 164 ldp x10, x11, [x0, #CTX_SP_EL1] 165 msr sp_el1, x10 166 msr esr_el1, x11 167 168 ldp x12, x13, [x0, #CTX_TTBR0_EL1] 169 msr ttbr0_el1, x12 170 msr ttbr1_el1, x13 171 172 ldp x14, x15, [x0, #CTX_MAIR_EL1] 173 msr mair_el1, x14 174 msr amair_el1, x15 175 176 ldp x16, x17, [x0, #CTX_TCR_EL1] 177 msr tcr_el1, x16 178 msr tpidr_el1, x17 179 180 ldp x9, x10, [x0, #CTX_TPIDR_EL0] 181 msr tpidr_el0, x9 182 msr tpidrro_el0, x10 183 184 ldp x13, x14, [x0, #CTX_PAR_EL1] 185 msr par_el1, x13 186 msr far_el1, x14 187 188 ldp x15, x16, [x0, #CTX_AFSR0_EL1] 189 msr afsr0_el1, x15 190 msr afsr1_el1, x16 191 192 ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] 193 msr contextidr_el1, x17 194 msr vbar_el1, x9 195 196 /* Restore AArch32 system registers if the build has instructed so */ 197#if CTX_INCLUDE_AARCH32_REGS 198 ldp x11, x12, [x0, #CTX_SPSR_ABT] 199 msr spsr_abt, x11 200 msr spsr_und, x12 201 202 ldp x13, x14, [x0, #CTX_SPSR_IRQ] 203 msr spsr_irq, x13 204 msr spsr_fiq, x14 205 206 ldp x15, x16, [x0, #CTX_DACR32_EL2] 207 msr dacr32_el2, x15 208 msr ifsr32_el2, x16 209 210 ldr x17, [x0, #CTX_FP_FPEXC32_EL2] 211 msr fpexc32_el2, x17 212#endif 213 /* Restore NS timer registers if the build has instructed so */ 214#if NS_TIMER_SWITCH 215 ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] 216 msr cntp_ctl_el0, x10 217 msr cntp_cval_el0, x11 218 219 ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] 220 msr cntv_ctl_el0, x12 221 msr cntv_cval_el0, x13 222 223 ldr x14, [x0, #CTX_CNTKCTL_EL1] 224 msr cntkctl_el1, x14 225#endif 226 227 /* No explict ISB required here as ERET covers it */ 228 ret 229endfunc el1_sysregs_context_restore 230 231/* ----------------------------------------------------- 232 * The following function follows the aapcs_64 strictly 233 * to use x9-x17 (temporary caller-saved registers 234 * according to AArch64 PCS) to save floating point 235 * register context. It assumes that 'x0' is pointing to 236 * a 'fp_regs' structure where the register context will 237 * be saved. 238 * 239 * Access to VFP registers will trap if CPTR_EL3.TFP is 240 * set. However currently we don't use VFP registers 241 * nor set traps in Trusted Firmware, and assume it's 242 * cleared 243 * 244 * TODO: Revisit when VFP is used in secure world 245 * ----------------------------------------------------- 246 */ 247#if CTX_INCLUDE_FPREGS 248func fpregs_context_save 249 stp q0, q1, [x0, #CTX_FP_Q0] 250 stp q2, q3, [x0, #CTX_FP_Q2] 251 stp q4, q5, [x0, #CTX_FP_Q4] 252 stp q6, q7, [x0, #CTX_FP_Q6] 253 stp q8, q9, [x0, #CTX_FP_Q8] 254 stp q10, q11, [x0, #CTX_FP_Q10] 255 stp q12, q13, [x0, #CTX_FP_Q12] 256 stp q14, q15, [x0, #CTX_FP_Q14] 257 stp q16, q17, [x0, #CTX_FP_Q16] 258 stp q18, q19, [x0, #CTX_FP_Q18] 259 stp q20, q21, [x0, #CTX_FP_Q20] 260 stp q22, q23, [x0, #CTX_FP_Q22] 261 stp q24, q25, [x0, #CTX_FP_Q24] 262 stp q26, q27, [x0, #CTX_FP_Q26] 263 stp q28, q29, [x0, #CTX_FP_Q28] 264 stp q30, q31, [x0, #CTX_FP_Q30] 265 266 mrs x9, fpsr 267 str x9, [x0, #CTX_FP_FPSR] 268 269 mrs x10, fpcr 270 str x10, [x0, #CTX_FP_FPCR] 271 272 ret 273endfunc fpregs_context_save 274 275/* ----------------------------------------------------- 276 * The following function follows the aapcs_64 strictly 277 * to use x9-x17 (temporary caller-saved registers 278 * according to AArch64 PCS) to restore floating point 279 * register context. It assumes that 'x0' is pointing to 280 * a 'fp_regs' structure from where the register context 281 * will be restored. 282 * 283 * Access to VFP registers will trap if CPTR_EL3.TFP is 284 * set. However currently we don't use VFP registers 285 * nor set traps in Trusted Firmware, and assume it's 286 * cleared 287 * 288 * TODO: Revisit when VFP is used in secure world 289 * ----------------------------------------------------- 290 */ 291func fpregs_context_restore 292 ldp q0, q1, [x0, #CTX_FP_Q0] 293 ldp q2, q3, [x0, #CTX_FP_Q2] 294 ldp q4, q5, [x0, #CTX_FP_Q4] 295 ldp q6, q7, [x0, #CTX_FP_Q6] 296 ldp q8, q9, [x0, #CTX_FP_Q8] 297 ldp q10, q11, [x0, #CTX_FP_Q10] 298 ldp q12, q13, [x0, #CTX_FP_Q12] 299 ldp q14, q15, [x0, #CTX_FP_Q14] 300 ldp q16, q17, [x0, #CTX_FP_Q16] 301 ldp q18, q19, [x0, #CTX_FP_Q18] 302 ldp q20, q21, [x0, #CTX_FP_Q20] 303 ldp q22, q23, [x0, #CTX_FP_Q22] 304 ldp q24, q25, [x0, #CTX_FP_Q24] 305 ldp q26, q27, [x0, #CTX_FP_Q26] 306 ldp q28, q29, [x0, #CTX_FP_Q28] 307 ldp q30, q31, [x0, #CTX_FP_Q30] 308 309 ldr x9, [x0, #CTX_FP_FPSR] 310 msr fpsr, x9 311 312 ldr x10, [x0, #CTX_FP_FPCR] 313 msr fpcr, x10 314 315 /* 316 * No explict ISB required here as ERET to 317 * switch to secure EL1 or non-secure world 318 * covers it 319 */ 320 321 ret 322endfunc fpregs_context_restore 323#endif /* CTX_INCLUDE_FPREGS */ 324 325/* ----------------------------------------------------- 326 * The following functions are used to save and restore 327 * all the general purpose registers. Ideally we would 328 * only save and restore the callee saved registers when 329 * a world switch occurs but that type of implementation 330 * is more complex. So currently we will always save and 331 * restore these registers on entry and exit of EL3. 332 * These are not macros to ensure their invocation fits 333 * within the 32 instructions per exception vector. 334 * clobbers: x18 335 * ----------------------------------------------------- 336 */ 337func save_gp_registers 338 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 339 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 340 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 341 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 342 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 343 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 344 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 345 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 346 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 347 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 348 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 349 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 350 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 351 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 352 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 353 mrs x18, sp_el0 354 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 355 ret 356endfunc save_gp_registers 357 358func restore_gp_registers_eret 359 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 360 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 361 b restore_gp_registers_callee_eret 362endfunc restore_gp_registers_eret 363 364func restore_gp_registers_callee_eret 365 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 366 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 367 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 368 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 369 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 370 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 371 ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 372 ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 373 ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 374 ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 375 ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 376 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 377 ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 378 msr sp_el0, x17 379 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 380 eret 381endfunc restore_gp_registers_callee_eret 382 383 /* ----------------------------------------------------- 384 * This routine assumes that the SP_EL3 is pointing to 385 * a valid context structure from where the gp regs and 386 * other special registers can be retrieved. 387 * ----------------------------------------------------- 388 */ 389func el3_exit 390 /* ----------------------------------------------------- 391 * Save the current SP_EL0 i.e. the EL3 runtime stack 392 * which will be used for handling the next SMC. Then 393 * switch to SP_EL3 394 * ----------------------------------------------------- 395 */ 396 mov x17, sp 397 msr spsel, #1 398 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 399 400 /* ----------------------------------------------------- 401 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET 402 * ----------------------------------------------------- 403 */ 404 ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 405 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 406 msr scr_el3, x18 407 msr spsr_el3, x16 408 msr elr_el3, x17 409 410 /* Restore saved general purpose registers and return */ 411 b restore_gp_registers_eret 412endfunc el3_exit 413