14f6ad66aSAchin Gupta/* 251997e3dSBoyan Karatotev * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 54f6ad66aSAchin Gupta */ 64f6ad66aSAchin Gupta 709d40e0eSAntonio Nino Diaz#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 94f6ad66aSAchin Gupta#include <arch.h> 1035e98e55SDan Handley#include <asm_macros.S> 1109d40e0eSAntonio Nino Diaz#include <bl31/ea_handle.h> 1209d40e0eSAntonio Nino Diaz#include <bl31/interrupt_mgmt.h> 13ccd81f1eSAndre Przywara#include <bl31/sync_handle.h> 1409d40e0eSAntonio Nino Diaz#include <common/runtime_svc.h> 1597043ac9SDan Handley#include <context.h> 166d22b089SManish Pandey#include <cpu_macros.S> 173b8456bdSManish V Badarkhe#include <el3_common_macros.S> 1809d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h> 1909d40e0eSAntonio Nino Diaz#include <lib/smccc.h> 204f6ad66aSAchin Gupta 214f6ad66aSAchin Gupta .globl runtime_exceptions 224f6ad66aSAchin Gupta 23f62ad322SDimitris Papastamos .globl sync_exception_sp_el0 24f62ad322SDimitris Papastamos .globl irq_sp_el0 25f62ad322SDimitris Papastamos .globl fiq_sp_el0 26f62ad322SDimitris Papastamos .globl serror_sp_el0 27f62ad322SDimitris Papastamos 28f62ad322SDimitris Papastamos .globl sync_exception_sp_elx 29f62ad322SDimitris Papastamos .globl irq_sp_elx 30f62ad322SDimitris Papastamos .globl fiq_sp_elx 31f62ad322SDimitris Papastamos .globl serror_sp_elx 32f62ad322SDimitris Papastamos 33f62ad322SDimitris Papastamos .globl sync_exception_aarch64 34f62ad322SDimitris Papastamos .globl irq_aarch64 35f62ad322SDimitris Papastamos .globl fiq_aarch64 36f62ad322SDimitris Papastamos .globl serror_aarch64 37f62ad322SDimitris Papastamos 38f62ad322SDimitris Papastamos .globl sync_exception_aarch32 39f62ad322SDimitris Papastamos .globl irq_aarch32 40f62ad322SDimitris Papastamos .globl fiq_aarch32 41f62ad322SDimitris Papastamos .globl serror_aarch32 42f62ad322SDimitris Papastamos 4376454abfSJeenu Viswambharan /* 44d87c0e27SManish Pandey * Save LR and make x30 available as most of the routines in vector entry 45d87c0e27SManish Pandey * need a free register 46d87c0e27SManish Pandey */ 47d87c0e27SManish Pandey .macro save_x30 48d87c0e27SManish Pandey str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 49d87c0e27SManish Pandey .endm 50d87c0e27SManish Pandey 51d04c04a4SManish Pandey .macro restore_x30 52d04c04a4SManish Pandey ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 53d04c04a4SManish Pandey .endm 5414c6016aSJeenu Viswambharan 55e290a8fcSAlexei Fedorov /* 56d04c04a4SManish Pandey * Macro that synchronizes errors (EA) and checks for pending SError. 57d04c04a4SManish Pandey * On detecting a pending SError it either reflects it back to lower 58d04c04a4SManish Pandey * EL (KFH) or handles it in EL3 (FFH) based on EA routing model. 59e290a8fcSAlexei Fedorov */ 60d04c04a4SManish Pandey .macro sync_and_handle_pending_serror 616597fcf1SManish Pandey synchronize_errors 62d04c04a4SManish Pandey mrs x30, ISR_EL1 63d04c04a4SManish Pandey tbz x30, #ISR_A_SHIFT, 2f 64f87e54f7SManish Pandey#if FFH_SUPPORT 65d04c04a4SManish Pandey mrs x30, scr_el3 66d04c04a4SManish Pandey tst x30, #SCR_EA_BIT 67d04c04a4SManish Pandey b.eq 1f 68d04c04a4SManish Pandey bl handle_pending_async_ea 69d04c04a4SManish Pandey b 2f 70c2d32a5fSMadhukar Pappireddy#endif 71d04c04a4SManish Pandey1: 72d04c04a4SManish Pandey /* This function never returns, but need LR for decision making */ 73d04c04a4SManish Pandey bl reflect_pending_async_ea_to_lower_el 74d04c04a4SManish Pandey2: 7576a91d87SManish Pandey .endm 76c2d32a5fSMadhukar Pappireddy 77a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 78a6ef4393SDouglas Raillard * This macro handles Synchronous exceptions. 79a6ef4393SDouglas Raillard * Only SMC exceptions are supported. 80a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 81dce74b89SAchin Gupta */ 82dce74b89SAchin Gupta .macro handle_sync_exception 83872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 84872be88aSdp-arm /* 85a6ef4393SDouglas Raillard * Read the timestamp value and store it in per-cpu data. The value 86a6ef4393SDouglas Raillard * will be extracted from per-cpu data by the C level SMC handler and 87a6ef4393SDouglas Raillard * saved to the PMF timestamp region. 88872be88aSdp-arm */ 89872be88aSdp-arm mrs x30, cntpct_el0 90872be88aSdp-arm str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 91872be88aSdp-arm mrs x29, tpidr_el3 924779becdSBoyan Karatotev str x30, [x29, #CPU_DATA_CPU_DATA_PMF_TS] 93872be88aSdp-arm ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 94872be88aSdp-arm#endif 95872be88aSdp-arm 96dce74b89SAchin Gupta mrs x30, esr_el3 97dce74b89SAchin Gupta ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 98dce74b89SAchin Gupta 99a6ef4393SDouglas Raillard /* Handle SMC exceptions separately from other synchronous exceptions */ 100dce74b89SAchin Gupta cmp x30, #EC_AARCH32_SMC 101dce74b89SAchin Gupta b.eq smc_handler32 102dce74b89SAchin Gupta 103dce74b89SAchin Gupta cmp x30, #EC_AARCH64_SMC 104ccd81f1eSAndre Przywara b.eq sync_handler64 105ccd81f1eSAndre Przywara 106ccd81f1eSAndre Przywara cmp x30, #EC_AARCH64_SYS 107ccd81f1eSAndre Przywara b.eq sync_handler64 108dce74b89SAchin Gupta 1096d22b089SManish Pandey cmp x30, #EC_IMP_DEF_EL3 1106d22b089SManish Pandey b.eq imp_def_el3_handler 1116d22b089SManish Pandey 1126d22b089SManish Pandey /* If FFH Support then try to handle lower EL EA exceptions. */ 1136d22b089SManish Pandey#if FFH_SUPPORT 1146d22b089SManish Pandey mrs x30, scr_el3 1156d22b089SManish Pandey tst x30, #SCR_EA_BIT 1166d22b089SManish Pandey b.eq 1f 1176f7de9a8SManish Pandey b handle_lower_el_sync_ea 1186d22b089SManish Pandey#endif 1196d22b089SManish Pandey1: 1206d22b089SManish Pandey /* Synchronous exceptions other than the above are unhandled */ 1216d22b089SManish Pandey b report_unhandled_exception 122dce74b89SAchin Gupta .endm 123dce74b89SAchin Gupta 124e0ae9fabSSandrine Bailleuxvector_base runtime_exceptions 125e0ae9fabSSandrine Bailleux 126a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 127a6ef4393SDouglas Raillard * Current EL with SP_EL0 : 0x0 - 0x200 128a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1294f6ad66aSAchin Gupta */ 130e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_el0 1311f461979SJustin Chadwell#ifdef MONITOR_TRAPS 1321f461979SJustin Chadwell stp x29, x30, [sp, #-16]! 1331f461979SJustin Chadwell 1341f461979SJustin Chadwell mrs x30, esr_el3 1351f461979SJustin Chadwell ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 1361f461979SJustin Chadwell 1371f461979SJustin Chadwell /* Check for BRK */ 1381f461979SJustin Chadwell cmp x30, #EC_BRK 1391f461979SJustin Chadwell b.eq brk_handler 1401f461979SJustin Chadwell 1411f461979SJustin Chadwell ldp x29, x30, [sp], #16 1421f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 1431f461979SJustin Chadwell 144a6ef4393SDouglas Raillard /* We don't expect any synchronous exceptions from EL3 */ 1454d91838bSJulius Werner b report_unhandled_exception 146a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_el0 1474f6ad66aSAchin Gupta 148e0ae9fabSSandrine Bailleuxvector_entry irq_sp_el0 149a6ef4393SDouglas Raillard /* 150a6ef4393SDouglas Raillard * EL3 code is non-reentrant. Any asynchronous exception is a serious 151a6ef4393SDouglas Raillard * error. Loop infinitely. 152a6ef4393SDouglas Raillard */ 1534d91838bSJulius Werner b report_unhandled_interrupt 154a9203edaSRoberto Vargasend_vector_entry irq_sp_el0 1554f6ad66aSAchin Gupta 156e0ae9fabSSandrine Bailleux 157e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_el0 1584d91838bSJulius Werner b report_unhandled_interrupt 159a9203edaSRoberto Vargasend_vector_entry fiq_sp_el0 1604f6ad66aSAchin Gupta 161e0ae9fabSSandrine Bailleux 162e0ae9fabSSandrine Bailleuxvector_entry serror_sp_el0 163eaeaa4d0SJeenu Viswambharan no_ret plat_handle_el3_ea 164a9203edaSRoberto Vargasend_vector_entry serror_sp_el0 1654f6ad66aSAchin Gupta 166a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 167a6ef4393SDouglas Raillard * Current EL with SP_ELx: 0x200 - 0x400 168a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1694f6ad66aSAchin Gupta */ 170e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_elx 171a6ef4393SDouglas Raillard /* 172a6ef4393SDouglas Raillard * This exception will trigger if anything went wrong during a previous 173a6ef4393SDouglas Raillard * exception entry or exit or while handling an earlier unexpected 174a6ef4393SDouglas Raillard * synchronous exception. There is a high probability that SP_EL3 is 175a6ef4393SDouglas Raillard * corrupted. 176caa84939SJeenu Viswambharan */ 1774d91838bSJulius Werner b report_unhandled_exception 178a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_elx 1794f6ad66aSAchin Gupta 180e0ae9fabSSandrine Bailleuxvector_entry irq_sp_elx 1814d91838bSJulius Werner b report_unhandled_interrupt 182a9203edaSRoberto Vargasend_vector_entry irq_sp_elx 183a7934d69SJeenu Viswambharan 184e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_elx 1854d91838bSJulius Werner b report_unhandled_interrupt 186a9203edaSRoberto Vargasend_vector_entry fiq_sp_elx 187a7934d69SJeenu Viswambharan 188e0ae9fabSSandrine Bailleuxvector_entry serror_sp_elx 189f87e54f7SManish Pandey#if FFH_SUPPORT 19076a91d87SManish Pandey /* 19176a91d87SManish Pandey * This will trigger if the exception was taken due to SError in EL3 or 19276a91d87SManish Pandey * because of pending asynchronous external aborts from lower EL that got 193d04c04a4SManish Pandey * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1) 194d04c04a4SManish Pandey * during EL3 entry. For the former case we continue with "plat_handle_el3_ea". 195d04c04a4SManish Pandey * The later case will occur when PSTATE.A bit is cleared in 196d04c04a4SManish Pandey * "handle_pending_async_ea". This means we are doing a nested 197d04c04a4SManish Pandey * exception in EL3. Call the handler for async EA which will eret back to 198d04c04a4SManish Pandey * original el3 handler if it is nested exception. Also, unmask EA so that we 199d04c04a4SManish Pandey * catch any further EA arise when handling this nested exception at EL3. 20076a91d87SManish Pandey */ 201d87c0e27SManish Pandey save_x30 202d04c04a4SManish Pandey ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 203d04c04a4SManish Pandey cbz x30, 1f 204d04c04a4SManish Pandey /* 205d04c04a4SManish Pandey * This is nested exception handling, clear the flag to avoid taking this 206d04c04a4SManish Pandey * path for further exceptions caused by EA handling 207d04c04a4SManish Pandey */ 208d04c04a4SManish Pandey str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 209d04c04a4SManish Pandey unmask_async_ea 21076a91d87SManish Pandey b handle_lower_el_async_ea 21176a91d87SManish Pandey1: 212d04c04a4SManish Pandey restore_x30 213c2d32a5fSMadhukar Pappireddy#endif 214eaeaa4d0SJeenu Viswambharan no_ret plat_handle_el3_ea 215d04c04a4SManish Pandey 216a9203edaSRoberto Vargasend_vector_entry serror_sp_elx 2174f6ad66aSAchin Gupta 218a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 21944804252SSandrine Bailleux * Lower EL using AArch64 : 0x400 - 0x600 220a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2214f6ad66aSAchin Gupta */ 222e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch64 223a6ef4393SDouglas Raillard /* 224a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 225a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 226a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 227a6ef4393SDouglas Raillard * state can be saved. 228caa84939SJeenu Viswambharan */ 229d87c0e27SManish Pandey save_x30 2303b8456bdSManish V Badarkhe apply_at_speculative_wa 231d04c04a4SManish Pandey sync_and_handle_pending_serror 232caa84939SJeenu Viswambharan handle_sync_exception 233a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch64 2344f6ad66aSAchin Gupta 235e0ae9fabSSandrine Bailleuxvector_entry irq_aarch64 236d87c0e27SManish Pandey save_x30 2373b8456bdSManish V Badarkhe apply_at_speculative_wa 238d04c04a4SManish Pandey sync_and_handle_pending_serror 2393991b889SManish Pandey b handle_interrupt_exception 240a9203edaSRoberto Vargasend_vector_entry irq_aarch64 2414f6ad66aSAchin Gupta 242e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch64 243d87c0e27SManish Pandey save_x30 2443b8456bdSManish V Badarkhe apply_at_speculative_wa 245d04c04a4SManish Pandey sync_and_handle_pending_serror 2463991b889SManish Pandey b handle_interrupt_exception 247a9203edaSRoberto Vargasend_vector_entry fiq_aarch64 2484f6ad66aSAchin Gupta 249d04c04a4SManish Pandey /* 250d04c04a4SManish Pandey * Need to synchronize any outstanding SError since we can get a burst of errors. 251d04c04a4SManish Pandey * So reuse the sync mechanism to catch any further errors which are pending. 252d04c04a4SManish Pandey */ 253e0ae9fabSSandrine Bailleuxvector_entry serror_aarch64 2546d22b089SManish Pandey#if FFH_SUPPORT 255d87c0e27SManish Pandey save_x30 2563b8456bdSManish V Badarkhe apply_at_speculative_wa 257d04c04a4SManish Pandey sync_and_handle_pending_serror 2586f7de9a8SManish Pandey b handle_lower_el_async_ea 2596d22b089SManish Pandey#else 2606d22b089SManish Pandey b report_unhandled_exception 2616d22b089SManish Pandey#endif 262a9203edaSRoberto Vargasend_vector_entry serror_aarch64 2634f6ad66aSAchin Gupta 264a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 26544804252SSandrine Bailleux * Lower EL using AArch32 : 0x600 - 0x800 266a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2674f6ad66aSAchin Gupta */ 268e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch32 269a6ef4393SDouglas Raillard /* 270a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 271a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 272a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 273a6ef4393SDouglas Raillard * state can be saved. 274caa84939SJeenu Viswambharan */ 275d87c0e27SManish Pandey save_x30 2763b8456bdSManish V Badarkhe apply_at_speculative_wa 277d04c04a4SManish Pandey sync_and_handle_pending_serror 278caa84939SJeenu Viswambharan handle_sync_exception 279a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch32 2804f6ad66aSAchin Gupta 281e0ae9fabSSandrine Bailleuxvector_entry irq_aarch32 282d87c0e27SManish Pandey save_x30 2833b8456bdSManish V Badarkhe apply_at_speculative_wa 284d04c04a4SManish Pandey sync_and_handle_pending_serror 2853991b889SManish Pandey b handle_interrupt_exception 286a9203edaSRoberto Vargasend_vector_entry irq_aarch32 2874f6ad66aSAchin Gupta 288e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch32 289d87c0e27SManish Pandey save_x30 2903b8456bdSManish V Badarkhe apply_at_speculative_wa 291d04c04a4SManish Pandey sync_and_handle_pending_serror 2923991b889SManish Pandey b handle_interrupt_exception 293a9203edaSRoberto Vargasend_vector_entry fiq_aarch32 2944f6ad66aSAchin Gupta 295d04c04a4SManish Pandey /* 296d04c04a4SManish Pandey * Need to synchronize any outstanding SError since we can get a burst of errors. 297d04c04a4SManish Pandey * So reuse the sync mechanism to catch any further errors which are pending. 298d04c04a4SManish Pandey */ 299e0ae9fabSSandrine Bailleuxvector_entry serror_aarch32 3006d22b089SManish Pandey#if FFH_SUPPORT 301d87c0e27SManish Pandey save_x30 3023b8456bdSManish V Badarkhe apply_at_speculative_wa 303d04c04a4SManish Pandey sync_and_handle_pending_serror 3046f7de9a8SManish Pandey b handle_lower_el_async_ea 3056d22b089SManish Pandey#else 3066d22b089SManish Pandey b report_unhandled_exception 3076d22b089SManish Pandey#endif 308a9203edaSRoberto Vargasend_vector_entry serror_aarch32 309a7934d69SJeenu Viswambharan 3101f461979SJustin Chadwell#ifdef MONITOR_TRAPS 3111f461979SJustin Chadwell .section .rodata.brk_string, "aS" 3121f461979SJustin Chadwellbrk_location: 3131f461979SJustin Chadwell .asciz "Error at instruction 0x" 3141f461979SJustin Chadwellbrk_message: 3151f461979SJustin Chadwell .asciz "Unexpected BRK instruction with value 0x" 3161f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 3171f461979SJustin Chadwell 3182f370465SAntonio Nino Diaz /* --------------------------------------------------------------------- 319caa84939SJeenu Viswambharan * The following code handles secure monitor calls. 320a6ef4393SDouglas Raillard * Depending upon the execution state from where the SMC has been 321a6ef4393SDouglas Raillard * invoked, it frees some general purpose registers to perform the 322a6ef4393SDouglas Raillard * remaining tasks. They involve finding the runtime service handler 323a6ef4393SDouglas Raillard * that is the target of the SMC & switching to runtime stacks (SP_EL0) 324a6ef4393SDouglas Raillard * before calling the handler. 325caa84939SJeenu Viswambharan * 326a6ef4393SDouglas Raillard * Note that x30 has been explicitly saved and can be used here 327a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 328caa84939SJeenu Viswambharan */ 329ccd81f1eSAndre Przywarafunc sync_exception_handler 330caa84939SJeenu Viswambharansmc_handler32: 331caa84939SJeenu Viswambharan /* Check whether aarch32 issued an SMC64 */ 332caa84939SJeenu Viswambharan tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 333caa84939SJeenu Viswambharan 334ccd81f1eSAndre Przywarasync_handler64: 3355283962eSAntonio Nino Diaz /* NOTE: The code below must preserve x0-x4 */ 3365283962eSAntonio Nino Diaz 337e290a8fcSAlexei Fedorov /* 338ed108b56SAlexei Fedorov * Save general purpose and ARMv8.3-PAuth registers (if enabled). 3391d6d6802SBoyan Karatotev * Also save PMCR_EL0 and set the PSTATE to a known state. 340e290a8fcSAlexei Fedorov */ 34197215e0fSDaniel Boulby bl prepare_el3_entry 342e290a8fcSAlexei Fedorov 343a6ef4393SDouglas Raillard /* 344a6ef4393SDouglas Raillard * Populate the parameters for the SMC handler. 345a6ef4393SDouglas Raillard * We already have x0-x4 in place. x5 will point to a cookie (not used 346a6ef4393SDouglas Raillard * now). x6 will point to the context structure (SP_EL3) and x7 will 347201ca5b6SDimitris Papastamos * contain flags we need to pass to the handler. 348caa84939SJeenu Viswambharan */ 349caa84939SJeenu Viswambharan mov x5, xzr 350caa84939SJeenu Viswambharan mov x6, sp 351caa84939SJeenu Viswambharan 352a6ef4393SDouglas Raillard /* 353a6ef4393SDouglas Raillard * Restore the saved C runtime stack value which will become the new 354a6ef4393SDouglas Raillard * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 355a6ef4393SDouglas Raillard * structure prior to the last ERET from EL3. 356caa84939SJeenu Viswambharan */ 357caa84939SJeenu Viswambharan ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 358caa84939SJeenu Viswambharan 359caa84939SJeenu Viswambharan /* Switch to SP_EL0 */ 360ed108b56SAlexei Fedorov msr spsel, #MODE_SP_EL0 361caa84939SJeenu Viswambharan 362a6ef4393SDouglas Raillard /* 363e61713b0SManish Pandey * Save the SPSR_EL3 and ELR_EL3 in case there is a world 364a6ef4393SDouglas Raillard * switch during SMC handling. 365a6ef4393SDouglas Raillard * TODO: Revisit if all system registers can be saved later. 366caa84939SJeenu Viswambharan */ 367caa84939SJeenu Viswambharan mrs x16, spsr_el3 368caa84939SJeenu Viswambharan mrs x17, elr_el3 369caa84939SJeenu Viswambharan stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 370e61713b0SManish Pandey 371e61713b0SManish Pandey /* Load SCR_EL3 */ 372e61713b0SManish Pandey mrs x18, scr_el3 373caa84939SJeenu Viswambharan 3744693ff72SZelalem Aweke /* Clear flag register */ 3754693ff72SZelalem Aweke mov x7, xzr 3764693ff72SZelalem Aweke 3774693ff72SZelalem Aweke#if ENABLE_RME 3784693ff72SZelalem Aweke /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ 379461c0a5dSElizabeth Ho ubfx x7, x18, #SCR_NSE_SHIFT, #1 3804693ff72SZelalem Aweke 3814693ff72SZelalem Aweke /* 3824693ff72SZelalem Aweke * Shift copied SCR_EL3.NSE bit by 5 to create space for 3830fe7b9f2SOlivier Deprez * SCR_EL3.NS bit. Bit 5 of the flag corresponds to 3844693ff72SZelalem Aweke * the SCR_EL3.NSE bit. 3854693ff72SZelalem Aweke */ 3864693ff72SZelalem Aweke lsl x7, x7, #5 3874693ff72SZelalem Aweke#endif /* ENABLE_RME */ 3884693ff72SZelalem Aweke 389caa84939SJeenu Viswambharan /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 390caa84939SJeenu Viswambharan bfi x7, x18, #0, #1 391caa84939SJeenu Viswambharan 392*88655be9SArvind Ram Prakash /* check for system register traps */ 393*88655be9SArvind Ram Prakash mrs x16, esr_el3 394*88655be9SArvind Ram Prakash ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH 395*88655be9SArvind Ram Prakash cmp x17, #EC_AARCH64_SYS 396*88655be9SArvind Ram Prakash b.eq sysreg_handler64 397*88655be9SArvind Ram Prakash 398f8a35797SJayanth Dodderi Chidanand mov sp, x12 399f8a35797SJayanth Dodderi Chidanand 400f8a35797SJayanth Dodderi Chidanand /* 401f8a35797SJayanth Dodderi Chidanand * Per SMCCC documentation, bits [23:17] must be zero for Fast 402f8a35797SJayanth Dodderi Chidanand * SMCs. Other values are reserved for future use. Ensure that 403f8a35797SJayanth Dodderi Chidanand * these bits are zeroes, if not report as unknown SMC. 404f8a35797SJayanth Dodderi Chidanand */ 405f8a35797SJayanth Dodderi Chidanand tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/ 406f8a35797SJayanth Dodderi Chidanand tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT) 407f8a35797SJayanth Dodderi Chidanand b.ne smc_unknown 408f8a35797SJayanth Dodderi Chidanand 4090fe7b9f2SOlivier Deprez /* 4100fe7b9f2SOlivier Deprez * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID 4110fe7b9f2SOlivier Deprez * passed through x0. Copy the SVE hint bit to flags and mask the 4120fe7b9f2SOlivier Deprez * bit in smc_fid passed to the standard service dispatcher. 4130fe7b9f2SOlivier Deprez * A service/dispatcher can retrieve the SVE hint bit state from 4140fe7b9f2SOlivier Deprez * flags using the appropriate helper. 4150fe7b9f2SOlivier Deprez */ 416f8a35797SJayanth Dodderi Chidanand2: 417b2d85178SOlivier Deprez and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 418b2d85178SOlivier Deprez orr x7, x7, x16 4190fe7b9f2SOlivier Deprez bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 4200fe7b9f2SOlivier Deprez 421cc485e27SMadhukar Pappireddy /* Get the unique owning entity number */ 422cc485e27SMadhukar Pappireddy ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 423cc485e27SMadhukar Pappireddy ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 424cc485e27SMadhukar Pappireddy orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 425cc485e27SMadhukar Pappireddy 426cc485e27SMadhukar Pappireddy /* Load descriptor index from array of indices */ 427c367b75eSMadhukar Pappireddy adrp x14, rt_svc_descs_indices 428c367b75eSMadhukar Pappireddy add x14, x14, :lo12:rt_svc_descs_indices 429cc485e27SMadhukar Pappireddy ldrb w15, [x14, x16] 430cc485e27SMadhukar Pappireddy 431cc485e27SMadhukar Pappireddy /* Any index greater than 127 is invalid. Check bit 7. */ 432cc485e27SMadhukar Pappireddy tbnz w15, 7, smc_unknown 433cc485e27SMadhukar Pappireddy 434cc485e27SMadhukar Pappireddy /* 435cc485e27SMadhukar Pappireddy * Get the descriptor using the index 436cc485e27SMadhukar Pappireddy * x11 = (base + off), w15 = index 437cc485e27SMadhukar Pappireddy * 438cc485e27SMadhukar Pappireddy * handler = (base + off) + (index << log2(size)) 439cc485e27SMadhukar Pappireddy */ 440a5c4212fSHsin-Hsiung Wang adr_l x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 441cc485e27SMadhukar Pappireddy lsl w10, w15, #RT_SVC_SIZE_LOG2 442cc485e27SMadhukar Pappireddy ldr x15, [x11, w10, uxtw] 443cc485e27SMadhukar Pappireddy 444a6ef4393SDouglas Raillard /* 445a6ef4393SDouglas Raillard * Call the Secure Monitor Call handler and then drop directly into 446a6ef4393SDouglas Raillard * el3_exit() which will program any remaining architectural state 447a6ef4393SDouglas Raillard * prior to issuing the ERET to the desired lower EL. 448caa84939SJeenu Viswambharan */ 449caa84939SJeenu Viswambharan#if DEBUG 450caa84939SJeenu Viswambharan cbz x15, rt_svc_fw_critical_error 451caa84939SJeenu Viswambharan#endif 452caa84939SJeenu Viswambharan blr x15 453caa84939SJeenu Viswambharan 454bbf8f6f9SYatharth Kochar b el3_exit 4554f6ad66aSAchin Gupta 456ccd81f1eSAndre Przywarasysreg_handler64: 457ccd81f1eSAndre Przywara mov x0, x16 /* ESR_EL3, containing syndrome information */ 458ccd81f1eSAndre Przywara mov x1, x6 /* lower EL's context */ 459*88655be9SArvind Ram Prakash mov x2, x7 /* flags, used to find security state */ 460ccd81f1eSAndre Przywara mov x19, x6 /* save context pointer for after the call */ 461ccd81f1eSAndre Przywara mov sp, x12 /* EL3 runtime stack, as loaded above */ 462ccd81f1eSAndre Przywara 463ccd81f1eSAndre Przywara /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */ 464ccd81f1eSAndre Przywara bl handle_sysreg_trap 465ccd81f1eSAndre Przywara /* 466ccd81f1eSAndre Przywara * returns: 4673c789bfcSManish Pandey * -1: unhandled trap, UNDEF injection into lower EL 468ccd81f1eSAndre Przywara * 0: handled trap, return to the trapping instruction (repeating it) 469ccd81f1eSAndre Przywara * 1: handled trap, return to the next instruction 470ccd81f1eSAndre Przywara */ 471ccd81f1eSAndre Przywara 472ccd81f1eSAndre Przywara tst w0, w0 4733c789bfcSManish Pandey b.mi 2f /* negative: undefined exception injection */ 474ccd81f1eSAndre Przywara 4753c789bfcSManish Pandey b.eq 1f /* zero: do not change ELR_EL3 */ 4763c789bfcSManish Pandey /* positive: advance the PC to continue after the instruction */ 477ccd81f1eSAndre Przywara ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 478ccd81f1eSAndre Przywara add x1, x1, #4 479ccd81f1eSAndre Przywara str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 480ccd81f1eSAndre Przywara1: 481ccd81f1eSAndre Przywara b el3_exit 4823c789bfcSManish Pandey2: 4833c789bfcSManish Pandey /* 4843c789bfcSManish Pandey * UNDEF injection to lower EL, the support is only provided for lower 4853c789bfcSManish Pandey * EL in AArch64 mode, for AArch32 mode it will do elx_panic as before. 4863c789bfcSManish Pandey */ 4873c789bfcSManish Pandey mrs x0, spsr_el3 4883c789bfcSManish Pandey tst x0, #(SPSR_M_MASK << SPSR_M_SHIFT) 4893c789bfcSManish Pandey b.ne elx_panic 4903c789bfcSManish Pandey /* Pass context pointer as an argument to inject_undef64 */ 4913c789bfcSManish Pandey mov x0, x19 4923c789bfcSManish Pandey bl inject_undef64 4933c789bfcSManish Pandey b el3_exit 494ccd81f1eSAndre Przywara 495caa84939SJeenu Viswambharansmc_unknown: 496caa84939SJeenu Viswambharan /* 497cc485e27SMadhukar Pappireddy * Unknown SMC call. Populate return value with SMC_UNK and call 498cc485e27SMadhukar Pappireddy * el3_exit() which will restore the remaining architectural state 499cc485e27SMadhukar Pappireddy * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET 500cc485e27SMadhukar Pappireddy * to the desired lower EL. 501caa84939SJeenu Viswambharan */ 5024abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 503cc485e27SMadhukar Pappireddy str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 504cc485e27SMadhukar Pappireddy b el3_exit 505caa84939SJeenu Viswambharan 506caa84939SJeenu Viswambharansmc_prohibited: 5073b8456bdSManish V Badarkhe restore_ptw_el1_sys_regs 5083b8456bdSManish V Badarkhe ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 509c3260f9bSSoby Mathew ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 5104abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 511f461fe34SAnthony Steinhauser exception_return 512caa84939SJeenu Viswambharan 513ed108b56SAlexei Fedorov#if DEBUG 514caa84939SJeenu Viswambharanrt_svc_fw_critical_error: 515a6ef4393SDouglas Raillard /* Switch to SP_ELx */ 516ed108b56SAlexei Fedorov msr spsel, #MODE_SP_ELX 517a806dad5SJeenu Viswambharan no_ret report_unhandled_exception 518ed108b56SAlexei Fedorov#endif 519ccd81f1eSAndre Przywaraendfunc sync_exception_handler 5201f461979SJustin Chadwell 5211f461979SJustin Chadwell /* --------------------------------------------------------------------- 5223991b889SManish Pandey * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 5233991b889SManish Pandey * interrupts. 5243991b889SManish Pandey * 5253991b889SManish Pandey * Note that x30 has been explicitly saved and can be used here 5263991b889SManish Pandey * --------------------------------------------------------------------- 5273991b889SManish Pandey */ 5283991b889SManish Pandeyfunc handle_interrupt_exception 5293991b889SManish Pandey /* 5303991b889SManish Pandey * Save general purpose and ARMv8.3-PAuth registers (if enabled). 5313991b889SManish Pandey * Also save PMCR_EL0 and set the PSTATE to a known state. 5323991b889SManish Pandey */ 5333991b889SManish Pandey bl prepare_el3_entry 5343991b889SManish Pandey 5353991b889SManish Pandey /* Save the EL3 system registers needed to return from this exception */ 5363991b889SManish Pandey mrs x0, spsr_el3 5373991b889SManish Pandey mrs x1, elr_el3 5383991b889SManish Pandey stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 5393991b889SManish Pandey 5403991b889SManish Pandey /* Switch to the runtime stack i.e. SP_EL0 */ 5413991b889SManish Pandey ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 5423991b889SManish Pandey mov x20, sp 5433991b889SManish Pandey msr spsel, #MODE_SP_EL0 5443991b889SManish Pandey mov sp, x2 5453991b889SManish Pandey 5463991b889SManish Pandey /* 5473991b889SManish Pandey * Find out whether this is a valid interrupt type. 5483991b889SManish Pandey * If the interrupt controller reports a spurious interrupt then return 5493991b889SManish Pandey * to where we came from. 5503991b889SManish Pandey */ 5513991b889SManish Pandey bl plat_ic_get_pending_interrupt_type 5523991b889SManish Pandey cmp x0, #INTR_TYPE_INVAL 5533991b889SManish Pandey b.eq interrupt_exit 5543991b889SManish Pandey 5553991b889SManish Pandey /* 5563991b889SManish Pandey * Get the registered handler for this interrupt type. 5573991b889SManish Pandey * A NULL return value could be 'cause of the following conditions: 5583991b889SManish Pandey * 5593991b889SManish Pandey * a. An interrupt of a type was routed correctly but a handler for its 5603991b889SManish Pandey * type was not registered. 5613991b889SManish Pandey * 5623991b889SManish Pandey * b. An interrupt of a type was not routed correctly so a handler for 5633991b889SManish Pandey * its type was not registered. 5643991b889SManish Pandey * 5653991b889SManish Pandey * c. An interrupt of a type was routed correctly to EL3, but was 5663991b889SManish Pandey * deasserted before its pending state could be read. Another 5673991b889SManish Pandey * interrupt of a different type pended at the same time and its 5683991b889SManish Pandey * type was reported as pending instead. However, a handler for this 5693991b889SManish Pandey * type was not registered. 5703991b889SManish Pandey * 5713991b889SManish Pandey * a. and b. can only happen due to a programming error. The 5723991b889SManish Pandey * occurrence of c. could be beyond the control of Trusted Firmware. 5733991b889SManish Pandey * It makes sense to return from this exception instead of reporting an 5743991b889SManish Pandey * error. 5753991b889SManish Pandey */ 5763991b889SManish Pandey bl get_interrupt_type_handler 5773991b889SManish Pandey cbz x0, interrupt_exit 5783991b889SManish Pandey mov x21, x0 5793991b889SManish Pandey 5803991b889SManish Pandey mov x0, #INTR_ID_UNAVAILABLE 5813991b889SManish Pandey 5823991b889SManish Pandey /* Set the current security state in the 'flags' parameter */ 5833991b889SManish Pandey mrs x2, scr_el3 5843991b889SManish Pandey ubfx x1, x2, #0, #1 5853991b889SManish Pandey 5863991b889SManish Pandey /* Restore the reference to the 'handle' i.e. SP_EL3 */ 5873991b889SManish Pandey mov x2, x20 5883991b889SManish Pandey 5893991b889SManish Pandey /* x3 will point to a cookie (not used now) */ 5903991b889SManish Pandey mov x3, xzr 5913991b889SManish Pandey 5923991b889SManish Pandey /* Call the interrupt type handler */ 5933991b889SManish Pandey blr x21 5943991b889SManish Pandey 5953991b889SManish Pandeyinterrupt_exit: 5963991b889SManish Pandey /* Return from exception, possibly in a different security state */ 5973991b889SManish Pandey b el3_exit 5983991b889SManish Pandeyendfunc handle_interrupt_exception 5993991b889SManish Pandey 6006d22b089SManish Pandeyfunc imp_def_el3_handler 6016d22b089SManish Pandey /* Save GP registers */ 6026d22b089SManish Pandey stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 6036d22b089SManish Pandey stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 6046d22b089SManish Pandey stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 6056d22b089SManish Pandey 6066d22b089SManish Pandey /* Get the cpu_ops pointer */ 6076d22b089SManish Pandey bl get_cpu_ops_ptr 6086d22b089SManish Pandey 6096d22b089SManish Pandey /* Get the cpu_ops exception handler */ 6106d22b089SManish Pandey ldr x0, [x0, #CPU_E_HANDLER_FUNC] 6116d22b089SManish Pandey 6126d22b089SManish Pandey /* 6136d22b089SManish Pandey * If the reserved function pointer is NULL, this CPU does not have an 6146d22b089SManish Pandey * implementation defined exception handler function 6156d22b089SManish Pandey */ 6166d22b089SManish Pandey cbz x0, el3_handler_exit 6176d22b089SManish Pandey mrs x1, esr_el3 6186d22b089SManish Pandey ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH 6196d22b089SManish Pandey blr x0 6206d22b089SManish Pandeyel3_handler_exit: 6216d22b089SManish Pandey ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 6226d22b089SManish Pandey ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] 6236d22b089SManish Pandey ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 6246d22b089SManish Pandey restore_x30 6256d22b089SManish Pandey no_ret report_unhandled_exception 6266d22b089SManish Pandeyendfunc imp_def_el3_handler 6276d22b089SManish Pandey 6286d22b089SManish Pandey/* 6296d22b089SManish Pandey * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode. 6306d22b089SManish Pandey * 6316d22b089SManish Pandey * This scenario may arise when there is an error (EA) in the system which is not 6326d22b089SManish Pandey * yet signaled to PE while executing in lower EL. During entry into EL3, the errors 6336d22b089SManish Pandey * are synchronized either implicitly or explicitly causing async EA to pend at EL3. 6346d22b089SManish Pandey * 6356d22b089SManish Pandey * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is 6366d22b089SManish Pandey * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL. 6376d22b089SManish Pandey * 6386d22b089SManish Pandey * This function assumes x30 has been saved. 6396d22b089SManish Pandey */ 6406d22b089SManish Pandeyfunc reflect_pending_async_ea_to_lower_el 6416d22b089SManish Pandey /* 6426d22b089SManish Pandey * As the original exception was not handled we need to ensure that we return 6436d22b089SManish Pandey * back to the instruction which caused the exception. To acheive that, eret 6446d22b089SManish Pandey * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise 6456d22b089SManish Pandey * (Label "skip_smc_check"). 6466d22b089SManish Pandey * 6476d22b089SManish Pandey * LIMITATION: It could be that async EA is masked at the target exception level 6486d22b089SManish Pandey * or the priority of async EA wrt to the EL3/secure interrupt is lower, which 6496d22b089SManish Pandey * causes back and forth between lower EL and EL3. In case of back and forth between 6506d22b089SManish Pandey * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage 6516d22b089SManish Pandey * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic 6526d22b089SManish Pandey * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop 6536d22b089SManish Pandey * counter retains its value but if we do a normal el3_exit this flag gets cleared. 6546d22b089SManish Pandey * However, setting SCR_EL3.IESB = 1, should give priority to SError handling 6556d22b089SManish Pandey * as per AArch64.TakeException pseudo code in Arm ARM. 6566d22b089SManish Pandey * 6576d22b089SManish Pandey * TODO: In future if EL3 gets a capability to inject a virtual SError to lower 6586d22b089SManish Pandey * ELs, we can remove the el3_panic and handle the original exception first and 6596d22b089SManish Pandey * inject SError to lower EL before ereting back. 6606d22b089SManish Pandey */ 6616d22b089SManish Pandey stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 6626d22b089SManish Pandey ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3] 6636d22b089SManish Pandey mrs x28, elr_el3 6646d22b089SManish Pandey cmp x29, x28 6656d22b089SManish Pandey b.eq check_loop_ctr 6666d22b089SManish Pandey str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3] 6676d22b089SManish Pandey /* Zero the loop counter */ 6686d22b089SManish Pandey str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 6696d22b089SManish Pandey b skip_loop_ctr 6706d22b089SManish Pandeycheck_loop_ctr: 6716d22b089SManish Pandey ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 6726d22b089SManish Pandey add x29, x29, #1 6736d22b089SManish Pandey str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 6746d22b089SManish Pandey cmp x29, #ASYNC_EA_REPLAY_COUNTER 6756d22b089SManish Pandey b.ge el3_panic 6766d22b089SManish Pandeyskip_loop_ctr: 6776d22b089SManish Pandey /* 6786d22b089SManish Pandey * Logic to distinguish if we came from SMC or any other exception. 6796d22b089SManish Pandey * Use offsets in vector entry to get which exception we are handling. 6806d22b089SManish Pandey * In each vector entry of size 0x200, address "0x0-0x80" is for sync 6816d22b089SManish Pandey * exception and "0x80-0x200" is for async exceptions. 6826d22b089SManish Pandey * Use vector base address (vbar_el3) and exception offset (LR) to 6836d22b089SManish Pandey * calculate whether the address we came from is any of the following 6846d22b089SManish Pandey * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680" 6856d22b089SManish Pandey */ 6866d22b089SManish Pandey mrs x29, vbar_el3 6876d22b089SManish Pandey sub x30, x30, x29 6886d22b089SManish Pandey and x30, x30, #0x1ff 6896d22b089SManish Pandey cmp x30, #0x80 6906d22b089SManish Pandey b.ge skip_smc_check 6916d22b089SManish Pandey /* Its a synchronous exception, Now check if it is SMC or not? */ 6926d22b089SManish Pandey mrs x30, esr_el3 6936d22b089SManish Pandey ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 6946d22b089SManish Pandey cmp x30, #EC_AARCH32_SMC 6956d22b089SManish Pandey b.eq subtract_elr_el3 6966d22b089SManish Pandey cmp x30, #EC_AARCH64_SMC 6976d22b089SManish Pandey b.eq subtract_elr_el3 6986d22b089SManish Pandey b skip_smc_check 6996d22b089SManish Pandeysubtract_elr_el3: 7006d22b089SManish Pandey sub x28, x28, #4 7016d22b089SManish Pandeyskip_smc_check: 7026d22b089SManish Pandey msr elr_el3, x28 7036d22b089SManish Pandey ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 7046d22b089SManish Pandey ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 7056d22b089SManish Pandey exception_return 7066d22b089SManish Pandeyendfunc reflect_pending_async_ea_to_lower_el 7076d22b089SManish Pandey 7083991b889SManish Pandey /* --------------------------------------------------------------------- 7091f461979SJustin Chadwell * The following code handles exceptions caused by BRK instructions. 7101f461979SJustin Chadwell * Following a BRK instruction, the only real valid cause of action is 7111f461979SJustin Chadwell * to print some information and panic, as the code that caused it is 7121f461979SJustin Chadwell * likely in an inconsistent internal state. 7131f461979SJustin Chadwell * 7141f461979SJustin Chadwell * This is initially intended to be used in conjunction with 7151f461979SJustin Chadwell * __builtin_trap. 7161f461979SJustin Chadwell * --------------------------------------------------------------------- 7171f461979SJustin Chadwell */ 7181f461979SJustin Chadwell#ifdef MONITOR_TRAPS 7191f461979SJustin Chadwellfunc brk_handler 7201f461979SJustin Chadwell /* Extract the ISS */ 7211f461979SJustin Chadwell mrs x10, esr_el3 7221f461979SJustin Chadwell ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 7231f461979SJustin Chadwell 7241f461979SJustin Chadwell /* Ensure the console is initialized */ 7251f461979SJustin Chadwell bl plat_crash_console_init 7261f461979SJustin Chadwell 7271f461979SJustin Chadwell adr x4, brk_location 7281f461979SJustin Chadwell bl asm_print_str 7291f461979SJustin Chadwell mrs x4, elr_el3 7301f461979SJustin Chadwell bl asm_print_hex 7311f461979SJustin Chadwell bl asm_print_newline 7321f461979SJustin Chadwell 7331f461979SJustin Chadwell adr x4, brk_message 7341f461979SJustin Chadwell bl asm_print_str 7351f461979SJustin Chadwell mov x4, x10 7361f461979SJustin Chadwell mov x5, #28 7371f461979SJustin Chadwell bl asm_print_hex_bits 7381f461979SJustin Chadwell bl asm_print_newline 7391f461979SJustin Chadwell 7401f461979SJustin Chadwell no_ret plat_panic_handler 7411f461979SJustin Chadwellendfunc brk_handler 7421f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 743