1/* 2 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <asm_macros.S> 11#include <bl31/ea_handle.h> 12#include <bl31/interrupt_mgmt.h> 13#include <bl31/sync_handle.h> 14#include <common/runtime_svc.h> 15#include <context.h> 16#include <el3_common_macros.S> 17#include <lib/el3_runtime/cpu_data.h> 18#include <lib/smccc.h> 19 20 .globl runtime_exceptions 21 22 .globl sync_exception_sp_el0 23 .globl irq_sp_el0 24 .globl fiq_sp_el0 25 .globl serror_sp_el0 26 27 .globl sync_exception_sp_elx 28 .globl irq_sp_elx 29 .globl fiq_sp_elx 30 .globl serror_sp_elx 31 32 .globl sync_exception_aarch64 33 .globl irq_aarch64 34 .globl fiq_aarch64 35 .globl serror_aarch64 36 37 .globl sync_exception_aarch32 38 .globl irq_aarch32 39 .globl fiq_aarch32 40 .globl serror_aarch32 41 42 /* 43 * Macro that prepares entry to EL3 upon taking an exception. 44 * 45 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB 46 * instruction. When an error is thus synchronized, the handling is 47 * delegated to platform EA handler. 48 * 49 * Without RAS_EXTENSION, this macro synchronizes pending errors using 50 * a DSB, unmasks Asynchronous External Aborts and saves X30 before 51 * setting the flag CTX_IS_IN_EL3. 52 */ 53 .macro check_and_unmask_ea 54#if RAS_EXTENSION 55 /* Synchronize pending External Aborts */ 56 esb 57 58 /* Unmask the SError interrupt */ 59 msr daifclr, #DAIF_ABT_BIT 60 61 /* 62 * Explicitly save x30 so as to free up a register and to enable 63 * branching 64 */ 65 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 66 67 /* Check for SErrors synchronized by the ESB instruction */ 68 mrs x30, DISR_EL1 69 tbz x30, #DISR_A_BIT, 1f 70 71 /* 72 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 73 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 74 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 75 * Also set the PSTATE to a known state. 76 */ 77 bl prepare_el3_entry 78 79 bl handle_lower_el_ea_esb 80 81 /* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */ 82 bl restore_gp_pmcr_pauth_regs 831: 84#else 85 /* 86 * For SoCs which do not implement RAS, use DSB as a barrier to 87 * synchronize pending external aborts. 88 */ 89 dsb sy 90 91 /* Unmask the SError interrupt */ 92 msr daifclr, #DAIF_ABT_BIT 93 94 /* Use ISB for the above unmask operation to take effect immediately */ 95 isb 96 97 /* 98 * Refer Note 1. No need to restore X30 as both handle_sync_exception 99 * and handle_interrupt_exception macro which follow this macro modify 100 * X30 anyway. 101 */ 102 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 103 mov x30, #1 104 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 105 dmb sy 106#endif 107 .endm 108 109#if !RAS_EXTENSION 110 /* 111 * Note 1: The explicit DSB at the entry of various exception vectors 112 * for handling exceptions from lower ELs can inadvertently trigger an 113 * SError exception in EL3 due to pending asynchronous aborts in lower 114 * ELs. This will end up being handled by serror_sp_elx which will 115 * ultimately panic and die. 116 * The way to workaround is to update a flag to indicate if the exception 117 * truly came from EL3. This flag is allocated in the cpu_context 118 * structure and located at offset "CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3" 119 * This is not a bullet proof solution to the problem at hand because 120 * we assume the instructions following "isb" that help to update the 121 * flag execute without causing further exceptions. 122 */ 123 124 /* --------------------------------------------------------------------- 125 * This macro handles Asynchronous External Aborts. 126 * --------------------------------------------------------------------- 127 */ 128 .macro handle_async_ea 129 /* 130 * Use a barrier to synchronize pending external aborts. 131 */ 132 dsb sy 133 134 /* Unmask the SError interrupt */ 135 msr daifclr, #DAIF_ABT_BIT 136 137 /* Use ISB for the above unmask operation to take effect immediately */ 138 isb 139 140 /* Refer Note 1 */ 141 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 142 mov x30, #1 143 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 144 dmb sy 145 146 b handle_lower_el_async_ea 147 .endm 148 149 /* 150 * This macro checks if the exception was taken due to SError in EL3 or 151 * because of pending asynchronous external aborts from lower EL that got 152 * triggered due to explicit synchronization in EL3. Refer Note 1. 153 */ 154 .macro check_if_serror_from_EL3 155 /* Assumes SP_EL3 on entry */ 156 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 157 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 158 cbnz x30, exp_from_EL3 159 160 /* Handle asynchronous external abort from lower EL */ 161 b handle_lower_el_async_ea 162 163exp_from_EL3: 164 /* Jump to plat_handle_el3_ea which does not return */ 165 .endm 166#endif 167 168 /* --------------------------------------------------------------------- 169 * This macro handles Synchronous exceptions. 170 * Only SMC exceptions are supported. 171 * --------------------------------------------------------------------- 172 */ 173 .macro handle_sync_exception 174#if ENABLE_RUNTIME_INSTRUMENTATION 175 /* 176 * Read the timestamp value and store it in per-cpu data. The value 177 * will be extracted from per-cpu data by the C level SMC handler and 178 * saved to the PMF timestamp region. 179 */ 180 mrs x30, cntpct_el0 181 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 182 mrs x29, tpidr_el3 183 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 184 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 185#endif 186 187 mrs x30, esr_el3 188 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 189 190 /* Handle SMC exceptions separately from other synchronous exceptions */ 191 cmp x30, #EC_AARCH32_SMC 192 b.eq smc_handler32 193 194 cmp x30, #EC_AARCH64_SMC 195 b.eq sync_handler64 196 197 cmp x30, #EC_AARCH64_SYS 198 b.eq sync_handler64 199 200 /* Synchronous exceptions other than the above are assumed to be EA */ 201 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 202 b enter_lower_el_sync_ea 203 .endm 204 205 206 /* --------------------------------------------------------------------- 207 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 208 * interrupts. 209 * --------------------------------------------------------------------- 210 */ 211 .macro handle_interrupt_exception label 212 213 /* 214 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 215 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 216 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 217 * Also set the PSTATE to a known state. 218 */ 219 bl prepare_el3_entry 220 221#if ENABLE_PAUTH 222 /* Load and program APIAKey firmware key */ 223 bl pauth_load_bl31_apiakey 224#endif 225 226 /* Save the EL3 system registers needed to return from this exception */ 227 mrs x0, spsr_el3 228 mrs x1, elr_el3 229 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 230 231 /* Switch to the runtime stack i.e. SP_EL0 */ 232 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 233 mov x20, sp 234 msr spsel, #MODE_SP_EL0 235 mov sp, x2 236 237 /* 238 * Find out whether this is a valid interrupt type. 239 * If the interrupt controller reports a spurious interrupt then return 240 * to where we came from. 241 */ 242 bl plat_ic_get_pending_interrupt_type 243 cmp x0, #INTR_TYPE_INVAL 244 b.eq interrupt_exit_\label 245 246 /* 247 * Get the registered handler for this interrupt type. 248 * A NULL return value could be 'cause of the following conditions: 249 * 250 * a. An interrupt of a type was routed correctly but a handler for its 251 * type was not registered. 252 * 253 * b. An interrupt of a type was not routed correctly so a handler for 254 * its type was not registered. 255 * 256 * c. An interrupt of a type was routed correctly to EL3, but was 257 * deasserted before its pending state could be read. Another 258 * interrupt of a different type pended at the same time and its 259 * type was reported as pending instead. However, a handler for this 260 * type was not registered. 261 * 262 * a. and b. can only happen due to a programming error. The 263 * occurrence of c. could be beyond the control of Trusted Firmware. 264 * It makes sense to return from this exception instead of reporting an 265 * error. 266 */ 267 bl get_interrupt_type_handler 268 cbz x0, interrupt_exit_\label 269 mov x21, x0 270 271 mov x0, #INTR_ID_UNAVAILABLE 272 273 /* Set the current security state in the 'flags' parameter */ 274 mrs x2, scr_el3 275 ubfx x1, x2, #0, #1 276 277 /* Restore the reference to the 'handle' i.e. SP_EL3 */ 278 mov x2, x20 279 280 /* x3 will point to a cookie (not used now) */ 281 mov x3, xzr 282 283 /* Call the interrupt type handler */ 284 blr x21 285 286interrupt_exit_\label: 287 /* Return from exception, possibly in a different security state */ 288 b el3_exit 289 290 .endm 291 292 293vector_base runtime_exceptions 294 295 /* --------------------------------------------------------------------- 296 * Current EL with SP_EL0 : 0x0 - 0x200 297 * --------------------------------------------------------------------- 298 */ 299vector_entry sync_exception_sp_el0 300#ifdef MONITOR_TRAPS 301 stp x29, x30, [sp, #-16]! 302 303 mrs x30, esr_el3 304 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 305 306 /* Check for BRK */ 307 cmp x30, #EC_BRK 308 b.eq brk_handler 309 310 ldp x29, x30, [sp], #16 311#endif /* MONITOR_TRAPS */ 312 313 /* We don't expect any synchronous exceptions from EL3 */ 314 b report_unhandled_exception 315end_vector_entry sync_exception_sp_el0 316 317vector_entry irq_sp_el0 318 /* 319 * EL3 code is non-reentrant. Any asynchronous exception is a serious 320 * error. Loop infinitely. 321 */ 322 b report_unhandled_interrupt 323end_vector_entry irq_sp_el0 324 325 326vector_entry fiq_sp_el0 327 b report_unhandled_interrupt 328end_vector_entry fiq_sp_el0 329 330 331vector_entry serror_sp_el0 332 no_ret plat_handle_el3_ea 333end_vector_entry serror_sp_el0 334 335 /* --------------------------------------------------------------------- 336 * Current EL with SP_ELx: 0x200 - 0x400 337 * --------------------------------------------------------------------- 338 */ 339vector_entry sync_exception_sp_elx 340 /* 341 * This exception will trigger if anything went wrong during a previous 342 * exception entry or exit or while handling an earlier unexpected 343 * synchronous exception. There is a high probability that SP_EL3 is 344 * corrupted. 345 */ 346 b report_unhandled_exception 347end_vector_entry sync_exception_sp_elx 348 349vector_entry irq_sp_elx 350 b report_unhandled_interrupt 351end_vector_entry irq_sp_elx 352 353vector_entry fiq_sp_elx 354 b report_unhandled_interrupt 355end_vector_entry fiq_sp_elx 356 357vector_entry serror_sp_elx 358#if !RAS_EXTENSION 359 check_if_serror_from_EL3 360#endif 361 no_ret plat_handle_el3_ea 362end_vector_entry serror_sp_elx 363 364 /* --------------------------------------------------------------------- 365 * Lower EL using AArch64 : 0x400 - 0x600 366 * --------------------------------------------------------------------- 367 */ 368vector_entry sync_exception_aarch64 369 /* 370 * This exception vector will be the entry point for SMCs and traps 371 * that are unhandled at lower ELs most commonly. SP_EL3 should point 372 * to a valid cpu context where the general purpose and system register 373 * state can be saved. 374 */ 375 apply_at_speculative_wa 376 check_and_unmask_ea 377 handle_sync_exception 378end_vector_entry sync_exception_aarch64 379 380vector_entry irq_aarch64 381 apply_at_speculative_wa 382 check_and_unmask_ea 383 handle_interrupt_exception irq_aarch64 384end_vector_entry irq_aarch64 385 386vector_entry fiq_aarch64 387 apply_at_speculative_wa 388 check_and_unmask_ea 389 handle_interrupt_exception fiq_aarch64 390end_vector_entry fiq_aarch64 391 392vector_entry serror_aarch64 393 apply_at_speculative_wa 394#if RAS_EXTENSION 395 msr daifclr, #DAIF_ABT_BIT 396 b enter_lower_el_async_ea 397#else 398 handle_async_ea 399#endif 400end_vector_entry serror_aarch64 401 402 /* --------------------------------------------------------------------- 403 * Lower EL using AArch32 : 0x600 - 0x800 404 * --------------------------------------------------------------------- 405 */ 406vector_entry sync_exception_aarch32 407 /* 408 * This exception vector will be the entry point for SMCs and traps 409 * that are unhandled at lower ELs most commonly. SP_EL3 should point 410 * to a valid cpu context where the general purpose and system register 411 * state can be saved. 412 */ 413 apply_at_speculative_wa 414 check_and_unmask_ea 415 handle_sync_exception 416end_vector_entry sync_exception_aarch32 417 418vector_entry irq_aarch32 419 apply_at_speculative_wa 420 check_and_unmask_ea 421 handle_interrupt_exception irq_aarch32 422end_vector_entry irq_aarch32 423 424vector_entry fiq_aarch32 425 apply_at_speculative_wa 426 check_and_unmask_ea 427 handle_interrupt_exception fiq_aarch32 428end_vector_entry fiq_aarch32 429 430vector_entry serror_aarch32 431 apply_at_speculative_wa 432#if RAS_EXTENSION 433 msr daifclr, #DAIF_ABT_BIT 434 b enter_lower_el_async_ea 435#else 436 handle_async_ea 437#endif 438end_vector_entry serror_aarch32 439 440#ifdef MONITOR_TRAPS 441 .section .rodata.brk_string, "aS" 442brk_location: 443 .asciz "Error at instruction 0x" 444brk_message: 445 .asciz "Unexpected BRK instruction with value 0x" 446#endif /* MONITOR_TRAPS */ 447 448 /* --------------------------------------------------------------------- 449 * The following code handles secure monitor calls. 450 * Depending upon the execution state from where the SMC has been 451 * invoked, it frees some general purpose registers to perform the 452 * remaining tasks. They involve finding the runtime service handler 453 * that is the target of the SMC & switching to runtime stacks (SP_EL0) 454 * before calling the handler. 455 * 456 * Note that x30 has been explicitly saved and can be used here 457 * --------------------------------------------------------------------- 458 */ 459func sync_exception_handler 460smc_handler32: 461 /* Check whether aarch32 issued an SMC64 */ 462 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 463 464sync_handler64: 465 /* NOTE: The code below must preserve x0-x4 */ 466 467 /* 468 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 469 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 470 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 471 * Also set the PSTATE to a known state. 472 */ 473 bl prepare_el3_entry 474 475#if ENABLE_PAUTH 476 /* Load and program APIAKey firmware key */ 477 bl pauth_load_bl31_apiakey 478#endif 479 480 /* 481 * Populate the parameters for the SMC handler. 482 * We already have x0-x4 in place. x5 will point to a cookie (not used 483 * now). x6 will point to the context structure (SP_EL3) and x7 will 484 * contain flags we need to pass to the handler. 485 */ 486 mov x5, xzr 487 mov x6, sp 488 489 /* 490 * Restore the saved C runtime stack value which will become the new 491 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 492 * structure prior to the last ERET from EL3. 493 */ 494 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 495 496 /* Switch to SP_EL0 */ 497 msr spsel, #MODE_SP_EL0 498 499 /* 500 * Save the SPSR_EL3 and ELR_EL3 in case there is a world 501 * switch during SMC handling. 502 * TODO: Revisit if all system registers can be saved later. 503 */ 504 mrs x16, spsr_el3 505 mrs x17, elr_el3 506 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 507 508 /* Load SCR_EL3 */ 509 mrs x18, scr_el3 510 511 /* check for system register traps */ 512 mrs x16, esr_el3 513 ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH 514 cmp x17, #EC_AARCH64_SYS 515 b.eq sysreg_handler64 516 517 /* Clear flag register */ 518 mov x7, xzr 519 520#if ENABLE_RME 521 /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ 522 ubfx x7, x18, #SCR_NSE_SHIFT, 1 523 524 /* 525 * Shift copied SCR_EL3.NSE bit by 5 to create space for 526 * SCR_EL3.NS bit. Bit 5 of the flag corresponds to 527 * the SCR_EL3.NSE bit. 528 */ 529 lsl x7, x7, #5 530#endif /* ENABLE_RME */ 531 532 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 533 bfi x7, x18, #0, #1 534 535 /* 536 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID 537 * passed through x0. Copy the SVE hint bit to flags and mask the 538 * bit in smc_fid passed to the standard service dispatcher. 539 * A service/dispatcher can retrieve the SVE hint bit state from 540 * flags using the appropriate helper. 541 */ 542 bfi x7, x0, #FUNCID_SVE_HINT_SHIFT, #FUNCID_SVE_HINT_MASK 543 bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 544 545 mov sp, x12 546 547 /* Get the unique owning entity number */ 548 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 549 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 550 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 551 552 /* Load descriptor index from array of indices */ 553 adrp x14, rt_svc_descs_indices 554 add x14, x14, :lo12:rt_svc_descs_indices 555 ldrb w15, [x14, x16] 556 557 /* Any index greater than 127 is invalid. Check bit 7. */ 558 tbnz w15, 7, smc_unknown 559 560 /* 561 * Get the descriptor using the index 562 * x11 = (base + off), w15 = index 563 * 564 * handler = (base + off) + (index << log2(size)) 565 */ 566 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 567 lsl w10, w15, #RT_SVC_SIZE_LOG2 568 ldr x15, [x11, w10, uxtw] 569 570 /* 571 * Call the Secure Monitor Call handler and then drop directly into 572 * el3_exit() which will program any remaining architectural state 573 * prior to issuing the ERET to the desired lower EL. 574 */ 575#if DEBUG 576 cbz x15, rt_svc_fw_critical_error 577#endif 578 blr x15 579 580 b el3_exit 581 582sysreg_handler64: 583 mov x0, x16 /* ESR_EL3, containing syndrome information */ 584 mov x1, x6 /* lower EL's context */ 585 mov x19, x6 /* save context pointer for after the call */ 586 mov sp, x12 /* EL3 runtime stack, as loaded above */ 587 588 /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */ 589 bl handle_sysreg_trap 590 /* 591 * returns: 592 * -1: unhandled trap, panic 593 * 0: handled trap, return to the trapping instruction (repeating it) 594 * 1: handled trap, return to the next instruction 595 */ 596 597 tst w0, w0 598 b.mi do_panic /* negative return value: panic */ 599 b.eq 1f /* zero: do not change ELR_EL3 */ 600 601 /* advance the PC to continue after the instruction */ 602 ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 603 add x1, x1, #4 604 str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 6051: 606 b el3_exit 607 608smc_unknown: 609 /* 610 * Unknown SMC call. Populate return value with SMC_UNK and call 611 * el3_exit() which will restore the remaining architectural state 612 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET 613 * to the desired lower EL. 614 */ 615 mov x0, #SMC_UNK 616 str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 617 b el3_exit 618 619smc_prohibited: 620 restore_ptw_el1_sys_regs 621 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 622 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 623 mov x0, #SMC_UNK 624 exception_return 625 626#if DEBUG 627rt_svc_fw_critical_error: 628 /* Switch to SP_ELx */ 629 msr spsel, #MODE_SP_ELX 630 no_ret report_unhandled_exception 631#endif 632endfunc sync_exception_handler 633 634 /* --------------------------------------------------------------------- 635 * The following code handles exceptions caused by BRK instructions. 636 * Following a BRK instruction, the only real valid cause of action is 637 * to print some information and panic, as the code that caused it is 638 * likely in an inconsistent internal state. 639 * 640 * This is initially intended to be used in conjunction with 641 * __builtin_trap. 642 * --------------------------------------------------------------------- 643 */ 644#ifdef MONITOR_TRAPS 645func brk_handler 646 /* Extract the ISS */ 647 mrs x10, esr_el3 648 ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 649 650 /* Ensure the console is initialized */ 651 bl plat_crash_console_init 652 653 adr x4, brk_location 654 bl asm_print_str 655 mrs x4, elr_el3 656 bl asm_print_hex 657 bl asm_print_newline 658 659 adr x4, brk_message 660 bl asm_print_str 661 mov x4, x10 662 mov x5, #28 663 bl asm_print_hex_bits 664 bl asm_print_newline 665 666 no_ret plat_panic_handler 667endfunc brk_handler 668#endif /* MONITOR_TRAPS */ 669