1/* 2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <asm_macros.S> 11#include <bl31/ea_handle.h> 12#include <bl31/interrupt_mgmt.h> 13#include <common/runtime_svc.h> 14#include <context.h> 15#include <cpu_macros.S> 16#include <el3_common_macros.S> 17#include <lib/el3_runtime/cpu_data.h> 18#include <lib/smccc.h> 19 20 .globl runtime_exceptions 21 22 .globl sync_exception_sp_el0 23 .globl irq_sp_el0 24 .globl fiq_sp_el0 25 .globl serror_sp_el0 26 27 .globl sync_exception_sp_elx 28 .globl irq_sp_elx 29 .globl fiq_sp_elx 30 .globl serror_sp_elx 31 32 .globl sync_exception_aarch64 33 .globl irq_aarch64 34 .globl fiq_aarch64 35 .globl serror_aarch64 36 37 .globl sync_exception_aarch32 38 .globl irq_aarch32 39 .globl fiq_aarch32 40 .globl serror_aarch32 41 42 /* 43 * Save LR and make x30 available as most of the routines in vector entry 44 * need a free register 45 */ 46 .macro save_x30 47 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 48 .endm 49 50 .macro restore_x30 51 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 52 .endm 53 54 /* 55 * Macro that synchronizes errors (EA) and checks for pending SError. 56 * On detecting a pending SError it either reflects it back to lower 57 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model. 58 */ 59 .macro sync_and_handle_pending_serror 60 synchronize_errors 61 mrs x30, ISR_EL1 62 tbz x30, #ISR_A_SHIFT, 2f 63#if FFH_SUPPORT 64 mrs x30, scr_el3 65 tst x30, #SCR_EA_BIT 66 b.eq 1f 67 bl handle_pending_async_ea 68 b 2f 69#endif 701: 71 /* This function never returns, but need LR for decision making */ 72 bl reflect_pending_async_ea_to_lower_el 732: 74 .endm 75 76 /* --------------------------------------------------------------------- 77 * This macro handles Synchronous exceptions. 78 * Only SMC exceptions are supported. 79 * --------------------------------------------------------------------- 80 */ 81 .macro handle_sync_exception 82#if ENABLE_RUNTIME_INSTRUMENTATION 83 /* 84 * Read the timestamp value and store it in per-cpu data. The value 85 * will be extracted from per-cpu data by the C level SMC handler and 86 * saved to the PMF timestamp region. 87 */ 88 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 89 per_cpu_cur percpu_data, x29, x30 90 mrs x30, cntpct_el0 91 str x30, [x29, #CPU_DATA_CPU_DATA_PMF_TS] 92 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 93#endif 94 95 mrs x30, esr_el3 96 97 /* fast paths that have a minimal environment */ 98 cmp x30, #EC_IMP_DEF_EL3 99 b.eq imp_def_el3_handler 100 101 /* setup the full environment */ 102 bl prepare_el3_entry 103 104 bl handler_sync_exception 105 106 no_ret el3_exit 107 .endm 108 109.macro handle_lower_el_async_ea 110 bl prepare_el3_entry 111 112 bl handler_lower_el_async_ea 113 114 no_ret el3_exit 115.endm 116 117 /* --------------------------------------------------------------------- 118 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 119 * interrupts. 120 * --------------------------------------------------------------------- 121 */ 122.macro handle_interrupt_exception 123 bl prepare_el3_entry 124 125 bl handler_interrupt_exception 126 127 /* Return from exception, possibly in a different security state */ 128 no_ret el3_exit 129.endm 130 131vector_base runtime_exceptions 132 133 /* --------------------------------------------------------------------- 134 * Current EL with SP_EL0 : 0x0 - 0x200 135 * --------------------------------------------------------------------- 136 */ 137vector_entry sync_exception_sp_el0 138#ifdef MONITOR_TRAPS 139 stp x29, x30, [sp, #-16]! 140 141 mrs x30, esr_el3 142 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 143 144 /* Check for BRK */ 145 cmp x30, #EC_BRK 146 b.eq brk_handler 147 148 ldp x29, x30, [sp], #16 149#endif /* MONITOR_TRAPS */ 150 151 /* We don't expect any synchronous exceptions from EL3 */ 152 b report_unhandled_exception 153end_vector_entry sync_exception_sp_el0 154 155vector_entry irq_sp_el0 156 /* 157 * EL3 code is non-reentrant. Any asynchronous exception is a serious 158 * error. Loop infinitely. 159 */ 160 b report_unhandled_interrupt 161end_vector_entry irq_sp_el0 162 163 164vector_entry fiq_sp_el0 165 b report_unhandled_interrupt 166end_vector_entry fiq_sp_el0 167 168 169vector_entry serror_sp_el0 170 no_ret plat_handle_el3_ea 171end_vector_entry serror_sp_el0 172 173 /* --------------------------------------------------------------------- 174 * Current EL with SP_ELx: 0x200 - 0x400 175 * --------------------------------------------------------------------- 176 */ 177vector_entry sync_exception_sp_elx 178 /* 179 * This exception will trigger if anything went wrong during a previous 180 * exception entry or exit or while handling an earlier unexpected 181 * synchronous exception. There is a high probability that SP_EL3 is 182 * corrupted. 183 */ 184 b report_unhandled_exception 185end_vector_entry sync_exception_sp_elx 186 187vector_entry irq_sp_elx 188 b report_unhandled_interrupt 189end_vector_entry irq_sp_elx 190 191vector_entry fiq_sp_elx 192 b report_unhandled_interrupt 193end_vector_entry fiq_sp_elx 194 195vector_entry serror_sp_elx 196#if FFH_SUPPORT 197 /* 198 * This will trigger if the exception was taken due to SError in EL3 or 199 * because of pending asynchronous external aborts from lower EL that got 200 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1) 201 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea". 202 * The later case will occur when PSTATE.A bit is cleared in 203 * "handle_pending_async_ea". This means we are doing a nested 204 * exception in EL3. Call the handler for async EA which will eret back to 205 * original el3 handler if it is nested exception. Also, unmask EA so that we 206 * catch any further EA arise when handling this nested exception at EL3. 207 */ 208 save_x30 209 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 210 cbz x30, 1f 211 /* 212 * This is nested exception handling, clear the flag to avoid taking this 213 * path for further exceptions caused by EA handling 214 */ 215 str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 216 unmask_async_ea 217 218 handle_lower_el_async_ea 2191: 220 restore_x30 221#endif 222 no_ret plat_handle_el3_ea 223 224end_vector_entry serror_sp_elx 225 226 /* --------------------------------------------------------------------- 227 * Lower EL using AArch64 : 0x400 - 0x600 228 * --------------------------------------------------------------------- 229 */ 230vector_entry sync_exception_aarch64 231 /* 232 * This exception vector will be the entry point for SMCs and traps 233 * that are unhandled at lower ELs most commonly. SP_EL3 should point 234 * to a valid cpu context where the general purpose and system register 235 * state can be saved. 236 */ 237 save_x30 238 apply_at_speculative_wa 239 sync_and_handle_pending_serror 240 handle_sync_exception 241end_vector_entry sync_exception_aarch64 242 243vector_entry irq_aarch64 244 save_x30 245 apply_at_speculative_wa 246 sync_and_handle_pending_serror 247 handle_interrupt_exception 248end_vector_entry irq_aarch64 249 250vector_entry fiq_aarch64 251 save_x30 252 apply_at_speculative_wa 253 sync_and_handle_pending_serror 254 handle_interrupt_exception 255end_vector_entry fiq_aarch64 256 257 /* 258 * Need to synchronize any outstanding SError since we can get a burst of errors. 259 * So reuse the sync mechanism to catch any further errors which are pending. 260 */ 261vector_entry serror_aarch64 262#if FFH_SUPPORT 263 save_x30 264 apply_at_speculative_wa 265 sync_and_handle_pending_serror 266 handle_lower_el_async_ea 267#else 268 b report_unhandled_exception 269#endif 270end_vector_entry serror_aarch64 271 272 /* --------------------------------------------------------------------- 273 * Lower EL using AArch32 : 0x600 - 0x800 274 * --------------------------------------------------------------------- 275 */ 276vector_entry sync_exception_aarch32 277 /* 278 * This exception vector will be the entry point for SMCs and traps 279 * that are unhandled at lower ELs most commonly. SP_EL3 should point 280 * to a valid cpu context where the general purpose and system register 281 * state can be saved. 282 */ 283 save_x30 284 apply_at_speculative_wa 285 sync_and_handle_pending_serror 286 handle_sync_exception 287end_vector_entry sync_exception_aarch32 288 289vector_entry irq_aarch32 290 save_x30 291 apply_at_speculative_wa 292 sync_and_handle_pending_serror 293 handle_interrupt_exception 294end_vector_entry irq_aarch32 295 296vector_entry fiq_aarch32 297 save_x30 298 apply_at_speculative_wa 299 sync_and_handle_pending_serror 300 handle_interrupt_exception 301end_vector_entry fiq_aarch32 302 303 /* 304 * Need to synchronize any outstanding SError since we can get a burst of errors. 305 * So reuse the sync mechanism to catch any further errors which are pending. 306 */ 307vector_entry serror_aarch32 308#if FFH_SUPPORT 309 save_x30 310 apply_at_speculative_wa 311 sync_and_handle_pending_serror 312 handle_lower_el_async_ea 313#else 314 b report_unhandled_exception 315#endif 316end_vector_entry serror_aarch32 317 318#ifdef MONITOR_TRAPS 319 .section .rodata.brk_string, "aS" 320brk_location: 321 .asciz "Error at instruction 0x" 322brk_message: 323 .asciz "Unexpected BRK instruction with value 0x" 324#endif /* MONITOR_TRAPS */ 325 326func imp_def_el3_handler 327 /* Save GP registers */ 328 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 329 stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 330 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 331 332 /* Get the cpu_ops pointer */ 333 bl get_cpu_ops_ptr 334 335 /* Get the cpu_ops exception handler */ 336 ldr x0, [x0, #CPU_E_HANDLER_FUNC] 337 338 /* 339 * If the reserved function pointer is NULL, this CPU does not have an 340 * implementation defined exception handler function 341 */ 342 cbz x0, el3_handler_exit 343 mrs x1, esr_el3 344 ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH 345 blr x0 346el3_handler_exit: 347 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 348 ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 349 ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 350 restore_x30 351 no_ret report_unhandled_exception 352endfunc imp_def_el3_handler 353 354/* 355 * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode. 356 * 357 * This scenario may arise when there is an error (EA) in the system which is not 358 * yet signaled to PE while executing in lower EL. During entry into EL3, the errors 359 * are synchronized either implicitly or explicitly causing async EA to pend at EL3. 360 * 361 * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is 362 * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL. 363 * 364 * This function assumes x30 has been saved. 365 */ 366func reflect_pending_async_ea_to_lower_el 367 /* 368 * As the original exception was not handled we need to ensure that we return 369 * back to the instruction which caused the exception. To acheive that, eret 370 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise 371 * (Label "skip_smc_check"). 372 * 373 * LIMITATION: It could be that async EA is masked at the target exception level 374 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which 375 * causes back and forth between lower EL and EL3. In case of back and forth between 376 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage 377 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic 378 * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop 379 * counter retains its value but if we do a normal el3_exit this flag gets cleared. 380 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling 381 * as per AArch64.TakeException pseudo code in Arm ARM. 382 * 383 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower 384 * ELs, we can remove the el3_panic and handle the original exception first and 385 * inject SError to lower EL before ereting back. 386 */ 387 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 388 ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3] 389 mrs x28, elr_el3 390 cmp x29, x28 391 b.eq check_loop_ctr 392 str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3] 393 /* Zero the loop counter */ 394 str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 395 b skip_loop_ctr 396check_loop_ctr: 397 ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 398 add x29, x29, #1 399 str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 400 cmp x29, #ASYNC_EA_REPLAY_COUNTER 401 b.ge el3_panic 402skip_loop_ctr: 403 /* 404 * Logic to distinguish if we came from SMC or any other exception. 405 * Use offsets in vector entry to get which exception we are handling. 406 * In each vector entry of size 0x200, address "0x0-0x80" is for sync 407 * exception and "0x80-0x200" is for async exceptions. 408 * Use vector base address (vbar_el3) and exception offset (LR) to 409 * calculate whether the address we came from is any of the following 410 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680" 411 */ 412 mrs x29, vbar_el3 413 sub x30, x30, x29 414 and x30, x30, #0x1ff 415 cmp x30, #0x80 416 b.ge skip_smc_check 417 /* Its a synchronous exception, Now check if it is SMC or not? */ 418 mrs x30, esr_el3 419 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 420 cmp x30, #EC_AARCH32_SMC 421 b.eq subtract_elr_el3 422 cmp x30, #EC_AARCH64_SMC 423 b.eq subtract_elr_el3 424 b skip_smc_check 425subtract_elr_el3: 426 sub x28, x28, #4 427skip_smc_check: 428 msr elr_el3, x28 429 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 430 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 431 exception_return 432endfunc reflect_pending_async_ea_to_lower_el 433 434 /* --------------------------------------------------------------------- 435 * The following code handles exceptions caused by BRK instructions. 436 * Following a BRK instruction, the only real valid cause of action is 437 * to print some information and panic, as the code that caused it is 438 * likely in an inconsistent internal state. 439 * 440 * This is initially intended to be used in conjunction with 441 * __builtin_trap. 442 * --------------------------------------------------------------------- 443 */ 444#ifdef MONITOR_TRAPS 445func brk_handler 446 /* Extract the ISS */ 447 mrs x10, esr_el3 448 ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_WIDTH 449 450 /* Ensure the console is initialized */ 451 bl plat_crash_console_init 452 453 adr x4, brk_location 454 bl asm_print_str 455 mrs x4, elr_el3 456 bl asm_print_hex 457 bl asm_print_newline 458 459 adr x4, brk_message 460 bl asm_print_str 461 mov x4, x10 462 mov x5, #28 463 bl asm_print_hex_bits 464 bl asm_print_newline 465 466 no_ret plat_panic_handler 467endfunc brk_handler 468#endif /* MONITOR_TRAPS */ 469