1/* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <runtime_svc.h> 33#include <platform.h> 34#include <context.h> 35#include <asm_macros.S> 36#include <cm_macros.S> 37 38 .globl runtime_exceptions 39 .globl el3_exit 40 .globl get_exception_stack 41 42 .section .vectors, "ax"; .align 11 43 44 .align 7 45runtime_exceptions: 46 /* ----------------------------------------------------- 47 * Current EL with _sp_el0 : 0x0 - 0x180 48 * ----------------------------------------------------- 49 */ 50sync_exception_sp_el0: 51 /* ----------------------------------------------------- 52 * We don't expect any synchronous exceptions from EL3 53 * ----------------------------------------------------- 54 */ 55 wfi 56 b sync_exception_sp_el0 57 check_vector_size sync_exception_sp_el0 58 59 .align 7 60 /* ----------------------------------------------------- 61 * EL3 code is non-reentrant. Any asynchronous exception 62 * is a serious error. Loop infinitely. 63 * ----------------------------------------------------- 64 */ 65irq_sp_el0: 66 handle_async_exception IRQ_SP_EL0 67 b irq_sp_el0 68 check_vector_size irq_sp_el0 69 70 .align 7 71fiq_sp_el0: 72 handle_async_exception FIQ_SP_EL0 73 b fiq_sp_el0 74 check_vector_size fiq_sp_el0 75 76 .align 7 77serror_sp_el0: 78 handle_async_exception SERROR_SP_EL0 79 b serror_sp_el0 80 check_vector_size serror_sp_el0 81 82 /* ----------------------------------------------------- 83 * Current EL with SPx: 0x200 - 0x380 84 * ----------------------------------------------------- 85 */ 86 .align 7 87sync_exception_sp_elx: 88 /* ----------------------------------------------------- 89 * This exception will trigger if anything went wrong 90 * during a previous exception entry or exit or while 91 * handling an earlier unexpected synchronous exception. 92 * In any case we cannot rely on SP_EL3. Switching to a 93 * known safe area of memory will corrupt at least a 94 * single register. It is best to enter wfi in loop as 95 * that will preserve the system state for analysis 96 * through a debugger later. 97 * ----------------------------------------------------- 98 */ 99 wfi 100 b sync_exception_sp_elx 101 check_vector_size sync_exception_sp_elx 102 103 /* ----------------------------------------------------- 104 * As mentioned in the previous comment, all bets are 105 * off if SP_EL3 cannot be relied upon. Report their 106 * occurrence. 107 * ----------------------------------------------------- 108 */ 109 .align 7 110irq_sp_elx: 111 b irq_sp_elx 112 check_vector_size irq_sp_elx 113 114 .align 7 115fiq_sp_elx: 116 b fiq_sp_elx 117 check_vector_size fiq_sp_elx 118 119 .align 7 120serror_sp_elx: 121 b serror_sp_elx 122 check_vector_size serror_sp_elx 123 124 /* ----------------------------------------------------- 125 * Lower EL using AArch64 : 0x400 - 0x580 126 * ----------------------------------------------------- 127 */ 128 .align 7 129sync_exception_aarch64: 130 /* ----------------------------------------------------- 131 * This exception vector will be the entry point for 132 * SMCs and traps that are unhandled at lower ELs most 133 * commonly. SP_EL3 should point to a valid cpu context 134 * where the general purpose and system register state 135 * can be saved. 136 * ----------------------------------------------------- 137 */ 138 handle_sync_exception 139 check_vector_size sync_exception_aarch64 140 141 .align 7 142 /* ----------------------------------------------------- 143 * Asynchronous exceptions from lower ELs are not 144 * currently supported. Report their occurrence. 145 * ----------------------------------------------------- 146 */ 147irq_aarch64: 148 handle_async_exception IRQ_AARCH64 149 b irq_aarch64 150 check_vector_size irq_aarch64 151 152 .align 7 153fiq_aarch64: 154 handle_async_exception FIQ_AARCH64 155 b fiq_aarch64 156 check_vector_size fiq_aarch64 157 158 .align 7 159serror_aarch64: 160 handle_async_exception SERROR_AARCH64 161 b serror_aarch64 162 check_vector_size serror_aarch64 163 164 /* ----------------------------------------------------- 165 * Lower EL using AArch32 : 0x600 - 0x780 166 * ----------------------------------------------------- 167 */ 168 .align 7 169sync_exception_aarch32: 170 /* ----------------------------------------------------- 171 * This exception vector will be the entry point for 172 * SMCs and traps that are unhandled at lower ELs most 173 * commonly. SP_EL3 should point to a valid cpu context 174 * where the general purpose and system register state 175 * can be saved. 176 * ----------------------------------------------------- 177 */ 178 handle_sync_exception 179 check_vector_size sync_exception_aarch32 180 181 .align 7 182 /* ----------------------------------------------------- 183 * Asynchronous exceptions from lower ELs are not 184 * currently supported. Report their occurrence. 185 * ----------------------------------------------------- 186 */ 187irq_aarch32: 188 handle_async_exception IRQ_AARCH32 189 b irq_aarch32 190 check_vector_size irq_aarch32 191 192 .align 7 193fiq_aarch32: 194 handle_async_exception FIQ_AARCH32 195 b fiq_aarch32 196 check_vector_size fiq_aarch32 197 198 .align 7 199serror_aarch32: 200 handle_async_exception SERROR_AARCH32 201 b serror_aarch32 202 check_vector_size serror_aarch32 203 204 .align 7 205 206 /* ----------------------------------------------------- 207 * The following code handles secure monitor calls. 208 * Depending upon the execution state from where the SMC 209 * has been invoked, it frees some general purpose 210 * registers to perform the remaining tasks. They 211 * involve finding the runtime service handler that is 212 * the target of the SMC & switching to runtime stacks 213 * (SP_EL0) before calling the handler. 214 * 215 * Note that x30 has been explicitly saved and can be 216 * used here 217 * ----------------------------------------------------- 218 */ 219func smc_handler 220smc_handler32: 221 /* Check whether aarch32 issued an SMC64 */ 222 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 223 224 /* ----------------------------------------------------- 225 * Since we're are coming from aarch32, x8-x18 need to 226 * be saved as per SMC32 calling convention. If a lower 227 * EL in aarch64 is making an SMC32 call then it must 228 * have saved x8-x17 already therein. 229 * ----------------------------------------------------- 230 */ 231 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 232 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 233 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 234 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 235 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 236 237 /* x4-x7, x18, sp_el0 are saved below */ 238 239smc_handler64: 240 /* ----------------------------------------------------- 241 * Populate the parameters for the SMC handler. We 242 * already have x0-x4 in place. x5 will point to a 243 * cookie (not used now). x6 will point to the context 244 * structure (SP_EL3) and x7 will contain flags we need 245 * to pass to the handler Hence save x5-x7. Note that x4 246 * only needs to be preserved for AArch32 callers but we 247 * do it for AArch64 callers as well for convenience 248 * ----------------------------------------------------- 249 */ 250 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 251 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 252 253 mov x5, xzr 254 mov x6, sp 255 256 /* Get the unique owning entity number */ 257 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 258 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 259 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 260 261 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 262 263 /* Load descriptor index from array of indices */ 264 adr x14, rt_svc_descs_indices 265 ldrb w15, [x14, x16] 266 267 /* Save x18 and SP_EL0 */ 268 mrs x17, sp_el0 269 stp x18, x17, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 270 271 /* ----------------------------------------------------- 272 * Restore the saved C runtime stack value which will 273 * become the new SP_EL0 i.e. EL3 runtime stack. It was 274 * saved in the 'cpu_context' structure prior to the last 275 * ERET from EL3. 276 * ----------------------------------------------------- 277 */ 278 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 279 280 /* 281 * Any index greater than 127 is invalid. Check bit 7 for 282 * a valid index 283 */ 284 tbnz w15, 7, smc_unknown 285 286 /* Switch to SP_EL0 */ 287 msr spsel, #0 288 289 /* ----------------------------------------------------- 290 * Get the descriptor using the index 291 * x11 = (base + off), x15 = index 292 * 293 * handler = (base + off) + (index << log2(size)) 294 * ----------------------------------------------------- 295 */ 296 lsl w10, w15, #RT_SVC_SIZE_LOG2 297 ldr x15, [x11, w10, uxtw] 298 299 /* ----------------------------------------------------- 300 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there 301 * is a world switch during SMC handling. 302 * TODO: Revisit if all system registers can be saved 303 * later. 304 * ----------------------------------------------------- 305 */ 306 mrs x16, spsr_el3 307 mrs x17, elr_el3 308 mrs x18, scr_el3 309 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 310 stp x18, xzr, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 311 312 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 313 bfi x7, x18, #0, #1 314 315 mov sp, x12 316 317 /* ----------------------------------------------------- 318 * Call the Secure Monitor Call handler and then drop 319 * directly into el3_exit() which will program any 320 * remaining architectural state prior to issuing the 321 * ERET to the desired lower EL. 322 * ----------------------------------------------------- 323 */ 324#if DEBUG 325 cbz x15, rt_svc_fw_critical_error 326#endif 327 blr x15 328 329 /* ----------------------------------------------------- 330 * This routine assumes that the SP_EL3 is pointing to 331 * a valid context structure from where the gp regs and 332 * other special registers can be retrieved. 333 * 334 * Keep it in the same section as smc_handler as this 335 * function uses a fall-through to el3_exit 336 * ----------------------------------------------------- 337 */ 338el3_exit: ; .type el3_exit, %function 339 /* ----------------------------------------------------- 340 * Save the current SP_EL0 i.e. the EL3 runtime stack 341 * which will be used for handling the next SMC. Then 342 * switch to SP_EL3 343 * ----------------------------------------------------- 344 */ 345 mov x17, sp 346 msr spsel, #1 347 str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 348 349 /* ----------------------------------------------------- 350 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET 351 * ----------------------------------------------------- 352 */ 353 ldp x18, xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 354 ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 355 msr scr_el3, x18 356 msr spsr_el3, x16 357 msr elr_el3, x17 358 359 /* Restore saved general purpose registers and return */ 360 bl restore_scratch_registers 361 ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 362 eret 363 364smc_unknown: 365 /* 366 * Here we restore x4-x18 regardless of where we came from. AArch32 367 * callers will find the registers contents unchanged, but AArch64 368 * callers will find the registers modified (with stale earlier NS 369 * content). Either way, we aren't leaking any secure information 370 * through them 371 */ 372 bl restore_scratch_registers_callee 373 374smc_prohibited: 375 ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 376 mov w0, #SMC_UNK 377 eret 378 379rt_svc_fw_critical_error: 380 b rt_svc_fw_critical_error 381 382 /* ----------------------------------------------------- 383 * The following functions are used to saved and restore 384 * all the caller saved registers as per the aapcs_64. 385 * These are not macros to ensure their invocation fits 386 * within the 32 instructions per exception vector. 387 * ----------------------------------------------------- 388 */ 389func save_scratch_registers 390 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 391 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 392 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 393 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 394 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 395 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 396 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 397 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 398 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 399 mrs x17, sp_el0 400 stp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 401 ret 402 403func restore_scratch_registers 404 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 405 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 406 407restore_scratch_registers_callee: 408 ldp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 409 410 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 411 ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 412 ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 413 ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 414 ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 415 ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 416 417 msr sp_el0, x17 418 ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 419 ret 420 421 /* ----------------------------------------------------- 422 * 256 bytes of exception stack for each cpu 423 * ----------------------------------------------------- 424 */ 425#if DEBUG 426#define PCPU_EXCEPTION_STACK_SIZE 0x300 427#else 428#define PCPU_EXCEPTION_STACK_SIZE 0x100 429#endif 430 /* ----------------------------------------------------- 431 * void get_exception_stack (uint64_t mpidr) : This 432 * function is used to allocate a small stack for 433 * reporting unhandled exceptions 434 * ----------------------------------------------------- 435 */ 436func get_exception_stack 437 mov x10, x30 // lr 438 get_mp_stack pcpu_exception_stack, PCPU_EXCEPTION_STACK_SIZE 439 ret x10 440 441 /* ----------------------------------------------------- 442 * Per-cpu exception stacks in normal memory. 443 * ----------------------------------------------------- 444 */ 445declare_stack pcpu_exception_stack, tzfw_normal_stacks, \ 446 PCPU_EXCEPTION_STACK_SIZE, PLATFORM_CORE_COUNT 447