1/* 2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <asm_macros.S> 11#include <bl31/ea_handle.h> 12#include <bl31/interrupt_mgmt.h> 13#include <bl31/sync_handle.h> 14#include <common/runtime_svc.h> 15#include <context.h> 16#include <cpu_macros.S> 17#include <el3_common_macros.S> 18#include <lib/el3_runtime/cpu_data.h> 19#include <lib/smccc.h> 20 21 .globl runtime_exceptions 22 23 .globl sync_exception_sp_el0 24 .globl irq_sp_el0 25 .globl fiq_sp_el0 26 .globl serror_sp_el0 27 28 .globl sync_exception_sp_elx 29 .globl irq_sp_elx 30 .globl fiq_sp_elx 31 .globl serror_sp_elx 32 33 .globl sync_exception_aarch64 34 .globl irq_aarch64 35 .globl fiq_aarch64 36 .globl serror_aarch64 37 38 .globl sync_exception_aarch32 39 .globl irq_aarch32 40 .globl fiq_aarch32 41 .globl serror_aarch32 42 43 /* 44 * Save LR and make x30 available as most of the routines in vector entry 45 * need a free register 46 */ 47 .macro save_x30 48 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 49 .endm 50 51 .macro restore_x30 52 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 53 .endm 54 55 /* 56 * Macro that synchronizes errors (EA) and checks for pending SError. 57 * On detecting a pending SError it either reflects it back to lower 58 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model. 59 */ 60 .macro sync_and_handle_pending_serror 61 synchronize_errors 62 mrs x30, ISR_EL1 63 tbz x30, #ISR_A_SHIFT, 2f 64#if FFH_SUPPORT 65 mrs x30, scr_el3 66 tst x30, #SCR_EA_BIT 67 b.eq 1f 68 bl handle_pending_async_ea 69 b 2f 70#endif 711: 72 /* This function never returns, but need LR for decision making */ 73 bl reflect_pending_async_ea_to_lower_el 742: 75 .endm 76 77 /* --------------------------------------------------------------------- 78 * This macro handles Synchronous exceptions. 79 * Only SMC exceptions are supported. 80 * --------------------------------------------------------------------- 81 */ 82 .macro handle_sync_exception 83#if ENABLE_RUNTIME_INSTRUMENTATION 84 /* 85 * Read the timestamp value and store it in per-cpu data. The value 86 * will be extracted from per-cpu data by the C level SMC handler and 87 * saved to the PMF timestamp region. 88 */ 89 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 90 per_cpu_cur percpu_data, x29, x30 91 mrs x30, cntpct_el0 92 str x30, [x29, #CPU_DATA_CPU_DATA_PMF_TS] 93 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 94#endif 95 96 mrs x30, esr_el3 97 98 /* fast paths that have a minimal environment */ 99 cmp x30, #EC_IMP_DEF_EL3 100 b.eq imp_def_el3_handler 101 102 /* setup the full environment */ 103 bl prepare_el3_entry 104 105 bl handler_sync_exception 106 107 no_ret el3_exit 108 .endm 109 110.macro handle_lower_el_async_ea 111 bl prepare_el3_entry 112 113 bl handler_lower_el_async_ea 114 115 no_ret el3_exit 116.endm 117 118 /* --------------------------------------------------------------------- 119 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 120 * interrupts. 121 * --------------------------------------------------------------------- 122 */ 123.macro handle_interrupt_exception 124 bl prepare_el3_entry 125 126 bl handler_interrupt_exception 127 128 /* Return from exception, possibly in a different security state */ 129 no_ret el3_exit 130.endm 131 132vector_base runtime_exceptions 133 134 /* --------------------------------------------------------------------- 135 * Current EL with SP_EL0 : 0x0 - 0x200 136 * --------------------------------------------------------------------- 137 */ 138vector_entry sync_exception_sp_el0 139#ifdef MONITOR_TRAPS 140 stp x29, x30, [sp, #-16]! 141 142 mrs x30, esr_el3 143 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 144 145 /* Check for BRK */ 146 cmp x30, #EC_BRK 147 b.eq brk_handler 148 149 ldp x29, x30, [sp], #16 150#endif /* MONITOR_TRAPS */ 151 152 /* We don't expect any synchronous exceptions from EL3 */ 153 b report_unhandled_exception 154end_vector_entry sync_exception_sp_el0 155 156vector_entry irq_sp_el0 157 /* 158 * EL3 code is non-reentrant. Any asynchronous exception is a serious 159 * error. Loop infinitely. 160 */ 161 b report_unhandled_interrupt 162end_vector_entry irq_sp_el0 163 164 165vector_entry fiq_sp_el0 166 b report_unhandled_interrupt 167end_vector_entry fiq_sp_el0 168 169 170vector_entry serror_sp_el0 171 no_ret plat_handle_el3_ea 172end_vector_entry serror_sp_el0 173 174 /* --------------------------------------------------------------------- 175 * Current EL with SP_ELx: 0x200 - 0x400 176 * --------------------------------------------------------------------- 177 */ 178vector_entry sync_exception_sp_elx 179 /* 180 * This exception will trigger if anything went wrong during a previous 181 * exception entry or exit or while handling an earlier unexpected 182 * synchronous exception. There is a high probability that SP_EL3 is 183 * corrupted. 184 */ 185 b report_unhandled_exception 186end_vector_entry sync_exception_sp_elx 187 188vector_entry irq_sp_elx 189 b report_unhandled_interrupt 190end_vector_entry irq_sp_elx 191 192vector_entry fiq_sp_elx 193 b report_unhandled_interrupt 194end_vector_entry fiq_sp_elx 195 196vector_entry serror_sp_elx 197#if FFH_SUPPORT 198 /* 199 * This will trigger if the exception was taken due to SError in EL3 or 200 * because of pending asynchronous external aborts from lower EL that got 201 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1) 202 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea". 203 * The later case will occur when PSTATE.A bit is cleared in 204 * "handle_pending_async_ea". This means we are doing a nested 205 * exception in EL3. Call the handler for async EA which will eret back to 206 * original el3 handler if it is nested exception. Also, unmask EA so that we 207 * catch any further EA arise when handling this nested exception at EL3. 208 */ 209 save_x30 210 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 211 cbz x30, 1f 212 /* 213 * This is nested exception handling, clear the flag to avoid taking this 214 * path for further exceptions caused by EA handling 215 */ 216 str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 217 unmask_async_ea 218 219 handle_lower_el_async_ea 2201: 221 restore_x30 222#endif 223 no_ret plat_handle_el3_ea 224 225end_vector_entry serror_sp_elx 226 227 /* --------------------------------------------------------------------- 228 * Lower EL using AArch64 : 0x400 - 0x600 229 * --------------------------------------------------------------------- 230 */ 231vector_entry sync_exception_aarch64 232 /* 233 * This exception vector will be the entry point for SMCs and traps 234 * that are unhandled at lower ELs most commonly. SP_EL3 should point 235 * to a valid cpu context where the general purpose and system register 236 * state can be saved. 237 */ 238 save_x30 239 apply_at_speculative_wa 240 sync_and_handle_pending_serror 241 handle_sync_exception 242end_vector_entry sync_exception_aarch64 243 244vector_entry irq_aarch64 245 save_x30 246 apply_at_speculative_wa 247 sync_and_handle_pending_serror 248 handle_interrupt_exception 249end_vector_entry irq_aarch64 250 251vector_entry fiq_aarch64 252 save_x30 253 apply_at_speculative_wa 254 sync_and_handle_pending_serror 255 handle_interrupt_exception 256end_vector_entry fiq_aarch64 257 258 /* 259 * Need to synchronize any outstanding SError since we can get a burst of errors. 260 * So reuse the sync mechanism to catch any further errors which are pending. 261 */ 262vector_entry serror_aarch64 263#if FFH_SUPPORT 264 save_x30 265 apply_at_speculative_wa 266 sync_and_handle_pending_serror 267 handle_lower_el_async_ea 268#else 269 b report_unhandled_exception 270#endif 271end_vector_entry serror_aarch64 272 273 /* --------------------------------------------------------------------- 274 * Lower EL using AArch32 : 0x600 - 0x800 275 * --------------------------------------------------------------------- 276 */ 277vector_entry sync_exception_aarch32 278 /* 279 * This exception vector will be the entry point for SMCs and traps 280 * that are unhandled at lower ELs most commonly. SP_EL3 should point 281 * to a valid cpu context where the general purpose and system register 282 * state can be saved. 283 */ 284 save_x30 285 apply_at_speculative_wa 286 sync_and_handle_pending_serror 287 handle_sync_exception 288end_vector_entry sync_exception_aarch32 289 290vector_entry irq_aarch32 291 save_x30 292 apply_at_speculative_wa 293 sync_and_handle_pending_serror 294 handle_interrupt_exception 295end_vector_entry irq_aarch32 296 297vector_entry fiq_aarch32 298 save_x30 299 apply_at_speculative_wa 300 sync_and_handle_pending_serror 301 handle_interrupt_exception 302end_vector_entry fiq_aarch32 303 304 /* 305 * Need to synchronize any outstanding SError since we can get a burst of errors. 306 * So reuse the sync mechanism to catch any further errors which are pending. 307 */ 308vector_entry serror_aarch32 309#if FFH_SUPPORT 310 save_x30 311 apply_at_speculative_wa 312 sync_and_handle_pending_serror 313 handle_lower_el_async_ea 314#else 315 b report_unhandled_exception 316#endif 317end_vector_entry serror_aarch32 318 319#ifdef MONITOR_TRAPS 320 .section .rodata.brk_string, "aS" 321brk_location: 322 .asciz "Error at instruction 0x" 323brk_message: 324 .asciz "Unexpected BRK instruction with value 0x" 325#endif /* MONITOR_TRAPS */ 326 327func imp_def_el3_handler 328 /* Save GP registers */ 329 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 330 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 331 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 332 333 /* Get the cpu_ops pointer */ 334 bl get_cpu_ops_ptr 335 336 /* Get the cpu_ops exception handler */ 337 ldr x0, [x0, #CPU_E_HANDLER_FUNC] 338 339 /* 340 * If the reserved function pointer is NULL, this CPU does not have an 341 * implementation defined exception handler function 342 */ 343 cbz x0, el3_handler_exit 344 mrs x1, esr_el3 345 ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH 346 blr x0 347el3_handler_exit: 348 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 349 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 350 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 351 restore_x30 352 no_ret report_unhandled_exception 353endfunc imp_def_el3_handler 354 355/* 356 * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode. 357 * 358 * This scenario may arise when there is an error (EA) in the system which is not 359 * yet signaled to PE while executing in lower EL. During entry into EL3, the errors 360 * are synchronized either implicitly or explicitly causing async EA to pend at EL3. 361 * 362 * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is 363 * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL. 364 * 365 * This function assumes x30 has been saved. 366 */ 367func reflect_pending_async_ea_to_lower_el 368 /* 369 * As the original exception was not handled we need to ensure that we return 370 * back to the instruction which caused the exception. To acheive that, eret 371 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise 372 * (Label "skip_smc_check"). 373 * 374 * LIMITATION: It could be that async EA is masked at the target exception level 375 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which 376 * causes back and forth between lower EL and EL3. In case of back and forth between 377 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage 378 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic 379 * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop 380 * counter retains its value but if we do a normal el3_exit this flag gets cleared. 381 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling 382 * as per AArch64.TakeException pseudo code in Arm ARM. 383 * 384 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower 385 * ELs, we can remove the el3_panic and handle the original exception first and 386 * inject SError to lower EL before ereting back. 387 */ 388 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 389 ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3] 390 mrs x28, elr_el3 391 cmp x29, x28 392 b.eq check_loop_ctr 393 str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3] 394 /* Zero the loop counter */ 395 str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 396 b skip_loop_ctr 397check_loop_ctr: 398 ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 399 add x29, x29, #1 400 str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 401 cmp x29, #ASYNC_EA_REPLAY_COUNTER 402 b.ge el3_panic 403skip_loop_ctr: 404 /* 405 * Logic to distinguish if we came from SMC or any other exception. 406 * Use offsets in vector entry to get which exception we are handling. 407 * In each vector entry of size 0x200, address "0x0-0x80" is for sync 408 * exception and "0x80-0x200" is for async exceptions. 409 * Use vector base address (vbar_el3) and exception offset (LR) to 410 * calculate whether the address we came from is any of the following 411 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680" 412 */ 413 mrs x29, vbar_el3 414 sub x30, x30, x29 415 and x30, x30, #0x1ff 416 cmp x30, #0x80 417 b.ge skip_smc_check 418 /* Its a synchronous exception, Now check if it is SMC or not? */ 419 mrs x30, esr_el3 420 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 421 cmp x30, #EC_AARCH32_SMC 422 b.eq subtract_elr_el3 423 cmp x30, #EC_AARCH64_SMC 424 b.eq subtract_elr_el3 425 b skip_smc_check 426subtract_elr_el3: 427 sub x28, x28, #4 428skip_smc_check: 429 msr elr_el3, x28 430 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 431 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 432 exception_return 433endfunc reflect_pending_async_ea_to_lower_el 434 435 /* --------------------------------------------------------------------- 436 * The following code handles exceptions caused by BRK instructions. 437 * Following a BRK instruction, the only real valid cause of action is 438 * to print some information and panic, as the code that caused it is 439 * likely in an inconsistent internal state. 440 * 441 * This is initially intended to be used in conjunction with 442 * __builtin_trap. 443 * --------------------------------------------------------------------- 444 */ 445#ifdef MONITOR_TRAPS 446func brk_handler 447 /* Extract the ISS */ 448 mrs x10, esr_el3 449 ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 450 451 /* Ensure the console is initialized */ 452 bl plat_crash_console_init 453 454 adr x4, brk_location 455 bl asm_print_str 456 mrs x4, elr_el3 457 bl asm_print_hex 458 bl asm_print_newline 459 460 adr x4, brk_message 461 bl asm_print_str 462 mov x4, x10 463 mov x5, #28 464 bl asm_print_hex_bits 465 bl asm_print_newline 466 467 no_ret plat_panic_handler 468endfunc brk_handler 469#endif /* MONITOR_TRAPS */ 470