14f6ad66aSAchin Gupta/* 2461c0a5dSElizabeth Ho * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 54f6ad66aSAchin Gupta */ 64f6ad66aSAchin Gupta 709d40e0eSAntonio Nino Diaz#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 94f6ad66aSAchin Gupta#include <arch.h> 1035e98e55SDan Handley#include <asm_macros.S> 1109d40e0eSAntonio Nino Diaz#include <bl31/ea_handle.h> 1209d40e0eSAntonio Nino Diaz#include <bl31/interrupt_mgmt.h> 13ccd81f1eSAndre Przywara#include <bl31/sync_handle.h> 1409d40e0eSAntonio Nino Diaz#include <common/runtime_svc.h> 1597043ac9SDan Handley#include <context.h> 163b8456bdSManish V Badarkhe#include <el3_common_macros.S> 1709d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h> 1809d40e0eSAntonio Nino Diaz#include <lib/smccc.h> 194f6ad66aSAchin Gupta 204f6ad66aSAchin Gupta .globl runtime_exceptions 214f6ad66aSAchin Gupta 22f62ad322SDimitris Papastamos .globl sync_exception_sp_el0 23f62ad322SDimitris Papastamos .globl irq_sp_el0 24f62ad322SDimitris Papastamos .globl fiq_sp_el0 25f62ad322SDimitris Papastamos .globl serror_sp_el0 26f62ad322SDimitris Papastamos 27f62ad322SDimitris Papastamos .globl sync_exception_sp_elx 28f62ad322SDimitris Papastamos .globl irq_sp_elx 29f62ad322SDimitris Papastamos .globl fiq_sp_elx 30f62ad322SDimitris Papastamos .globl serror_sp_elx 31f62ad322SDimitris Papastamos 32f62ad322SDimitris Papastamos .globl sync_exception_aarch64 33f62ad322SDimitris Papastamos .globl irq_aarch64 34f62ad322SDimitris Papastamos .globl fiq_aarch64 35f62ad322SDimitris Papastamos .globl serror_aarch64 36f62ad322SDimitris Papastamos 37f62ad322SDimitris Papastamos .globl sync_exception_aarch32 38f62ad322SDimitris Papastamos .globl irq_aarch32 39f62ad322SDimitris Papastamos .globl fiq_aarch32 40f62ad322SDimitris Papastamos .globl serror_aarch32 41f62ad322SDimitris Papastamos 4276454abfSJeenu Viswambharan /* 43d87c0e27SManish Pandey * Save LR and make x30 available as most of the routines in vector entry 44d87c0e27SManish Pandey * need a free register 45d87c0e27SManish Pandey */ 46d87c0e27SManish Pandey .macro save_x30 47d87c0e27SManish Pandey str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 48d87c0e27SManish Pandey .endm 49d87c0e27SManish Pandey 50d04c04a4SManish Pandey .macro restore_x30 51d04c04a4SManish Pandey ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 52d04c04a4SManish Pandey .endm 5314c6016aSJeenu Viswambharan 54e290a8fcSAlexei Fedorov /* 55d04c04a4SManish Pandey * Macro that synchronizes errors (EA) and checks for pending SError. 56d04c04a4SManish Pandey * On detecting a pending SError it either reflects it back to lower 57d04c04a4SManish Pandey * EL (KFH) or handles it in EL3 (FFH) based on EA routing model. 58e290a8fcSAlexei Fedorov */ 59d04c04a4SManish Pandey .macro sync_and_handle_pending_serror 606597fcf1SManish Pandey synchronize_errors 61d04c04a4SManish Pandey mrs x30, ISR_EL1 62d04c04a4SManish Pandey tbz x30, #ISR_A_SHIFT, 2f 63*f87e54f7SManish Pandey#if FFH_SUPPORT 64d04c04a4SManish Pandey mrs x30, scr_el3 65d04c04a4SManish Pandey tst x30, #SCR_EA_BIT 66d04c04a4SManish Pandey b.eq 1f 67d04c04a4SManish Pandey bl handle_pending_async_ea 68d04c04a4SManish Pandey b 2f 69c2d32a5fSMadhukar Pappireddy#endif 70d04c04a4SManish Pandey1: 71d04c04a4SManish Pandey /* This function never returns, but need LR for decision making */ 72d04c04a4SManish Pandey bl reflect_pending_async_ea_to_lower_el 73d04c04a4SManish Pandey2: 7476a91d87SManish Pandey .endm 75c2d32a5fSMadhukar Pappireddy 76a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 77a6ef4393SDouglas Raillard * This macro handles Synchronous exceptions. 78a6ef4393SDouglas Raillard * Only SMC exceptions are supported. 79a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 80dce74b89SAchin Gupta */ 81dce74b89SAchin Gupta .macro handle_sync_exception 82872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 83872be88aSdp-arm /* 84a6ef4393SDouglas Raillard * Read the timestamp value and store it in per-cpu data. The value 85a6ef4393SDouglas Raillard * will be extracted from per-cpu data by the C level SMC handler and 86a6ef4393SDouglas Raillard * saved to the PMF timestamp region. 87872be88aSdp-arm */ 88872be88aSdp-arm mrs x30, cntpct_el0 89872be88aSdp-arm str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 90872be88aSdp-arm mrs x29, tpidr_el3 91872be88aSdp-arm str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 92872be88aSdp-arm ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 93872be88aSdp-arm#endif 94872be88aSdp-arm 95dce74b89SAchin Gupta mrs x30, esr_el3 96dce74b89SAchin Gupta ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 97dce74b89SAchin Gupta 98a6ef4393SDouglas Raillard /* Handle SMC exceptions separately from other synchronous exceptions */ 99dce74b89SAchin Gupta cmp x30, #EC_AARCH32_SMC 100dce74b89SAchin Gupta b.eq smc_handler32 101dce74b89SAchin Gupta 102dce74b89SAchin Gupta cmp x30, #EC_AARCH64_SMC 103ccd81f1eSAndre Przywara b.eq sync_handler64 104ccd81f1eSAndre Przywara 105ccd81f1eSAndre Przywara cmp x30, #EC_AARCH64_SYS 106ccd81f1eSAndre Przywara b.eq sync_handler64 107dce74b89SAchin Gupta 108df8f3188SJeenu Viswambharan /* Synchronous exceptions other than the above are assumed to be EA */ 1094d91838bSJulius Werner ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 1106f7de9a8SManish Pandey b handle_lower_el_sync_ea 111dce74b89SAchin Gupta .endm 112dce74b89SAchin Gupta 113e0ae9fabSSandrine Bailleuxvector_base runtime_exceptions 114e0ae9fabSSandrine Bailleux 115a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 116a6ef4393SDouglas Raillard * Current EL with SP_EL0 : 0x0 - 0x200 117a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1184f6ad66aSAchin Gupta */ 119e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_el0 1201f461979SJustin Chadwell#ifdef MONITOR_TRAPS 1211f461979SJustin Chadwell stp x29, x30, [sp, #-16]! 1221f461979SJustin Chadwell 1231f461979SJustin Chadwell mrs x30, esr_el3 1241f461979SJustin Chadwell ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 1251f461979SJustin Chadwell 1261f461979SJustin Chadwell /* Check for BRK */ 1271f461979SJustin Chadwell cmp x30, #EC_BRK 1281f461979SJustin Chadwell b.eq brk_handler 1291f461979SJustin Chadwell 1301f461979SJustin Chadwell ldp x29, x30, [sp], #16 1311f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 1321f461979SJustin Chadwell 133a6ef4393SDouglas Raillard /* We don't expect any synchronous exceptions from EL3 */ 1344d91838bSJulius Werner b report_unhandled_exception 135a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_el0 1364f6ad66aSAchin Gupta 137e0ae9fabSSandrine Bailleuxvector_entry irq_sp_el0 138a6ef4393SDouglas Raillard /* 139a6ef4393SDouglas Raillard * EL3 code is non-reentrant. Any asynchronous exception is a serious 140a6ef4393SDouglas Raillard * error. Loop infinitely. 141a6ef4393SDouglas Raillard */ 1424d91838bSJulius Werner b report_unhandled_interrupt 143a9203edaSRoberto Vargasend_vector_entry irq_sp_el0 1444f6ad66aSAchin Gupta 145e0ae9fabSSandrine Bailleux 146e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_el0 1474d91838bSJulius Werner b report_unhandled_interrupt 148a9203edaSRoberto Vargasend_vector_entry fiq_sp_el0 1494f6ad66aSAchin Gupta 150e0ae9fabSSandrine Bailleux 151e0ae9fabSSandrine Bailleuxvector_entry serror_sp_el0 152eaeaa4d0SJeenu Viswambharan no_ret plat_handle_el3_ea 153a9203edaSRoberto Vargasend_vector_entry serror_sp_el0 1544f6ad66aSAchin Gupta 155a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 156a6ef4393SDouglas Raillard * Current EL with SP_ELx: 0x200 - 0x400 157a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1584f6ad66aSAchin Gupta */ 159e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_elx 160a6ef4393SDouglas Raillard /* 161a6ef4393SDouglas Raillard * This exception will trigger if anything went wrong during a previous 162a6ef4393SDouglas Raillard * exception entry or exit or while handling an earlier unexpected 163a6ef4393SDouglas Raillard * synchronous exception. There is a high probability that SP_EL3 is 164a6ef4393SDouglas Raillard * corrupted. 165caa84939SJeenu Viswambharan */ 1664d91838bSJulius Werner b report_unhandled_exception 167a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_elx 1684f6ad66aSAchin Gupta 169e0ae9fabSSandrine Bailleuxvector_entry irq_sp_elx 1704d91838bSJulius Werner b report_unhandled_interrupt 171a9203edaSRoberto Vargasend_vector_entry irq_sp_elx 172a7934d69SJeenu Viswambharan 173e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_elx 1744d91838bSJulius Werner b report_unhandled_interrupt 175a9203edaSRoberto Vargasend_vector_entry fiq_sp_elx 176a7934d69SJeenu Viswambharan 177e0ae9fabSSandrine Bailleuxvector_entry serror_sp_elx 178*f87e54f7SManish Pandey#if FFH_SUPPORT 17976a91d87SManish Pandey /* 18076a91d87SManish Pandey * This will trigger if the exception was taken due to SError in EL3 or 18176a91d87SManish Pandey * because of pending asynchronous external aborts from lower EL that got 182d04c04a4SManish Pandey * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1) 183d04c04a4SManish Pandey * during EL3 entry. For the former case we continue with "plat_handle_el3_ea". 184d04c04a4SManish Pandey * The later case will occur when PSTATE.A bit is cleared in 185d04c04a4SManish Pandey * "handle_pending_async_ea". This means we are doing a nested 186d04c04a4SManish Pandey * exception in EL3. Call the handler for async EA which will eret back to 187d04c04a4SManish Pandey * original el3 handler if it is nested exception. Also, unmask EA so that we 188d04c04a4SManish Pandey * catch any further EA arise when handling this nested exception at EL3. 18976a91d87SManish Pandey */ 190d87c0e27SManish Pandey save_x30 191d04c04a4SManish Pandey ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 192d04c04a4SManish Pandey cbz x30, 1f 193d04c04a4SManish Pandey /* 194d04c04a4SManish Pandey * This is nested exception handling, clear the flag to avoid taking this 195d04c04a4SManish Pandey * path for further exceptions caused by EA handling 196d04c04a4SManish Pandey */ 197d04c04a4SManish Pandey str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 198d04c04a4SManish Pandey unmask_async_ea 19976a91d87SManish Pandey b handle_lower_el_async_ea 20076a91d87SManish Pandey1: 201d04c04a4SManish Pandey restore_x30 202c2d32a5fSMadhukar Pappireddy#endif 203eaeaa4d0SJeenu Viswambharan no_ret plat_handle_el3_ea 204d04c04a4SManish Pandey 205a9203edaSRoberto Vargasend_vector_entry serror_sp_elx 2064f6ad66aSAchin Gupta 207a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 20844804252SSandrine Bailleux * Lower EL using AArch64 : 0x400 - 0x600 209a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2104f6ad66aSAchin Gupta */ 211e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch64 212a6ef4393SDouglas Raillard /* 213a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 214a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 215a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 216a6ef4393SDouglas Raillard * state can be saved. 217caa84939SJeenu Viswambharan */ 218d87c0e27SManish Pandey save_x30 2193b8456bdSManish V Badarkhe apply_at_speculative_wa 220d04c04a4SManish Pandey sync_and_handle_pending_serror 221d04c04a4SManish Pandey unmask_async_ea 222caa84939SJeenu Viswambharan handle_sync_exception 223a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch64 2244f6ad66aSAchin Gupta 225e0ae9fabSSandrine Bailleuxvector_entry irq_aarch64 226d87c0e27SManish Pandey save_x30 2273b8456bdSManish V Badarkhe apply_at_speculative_wa 228d04c04a4SManish Pandey sync_and_handle_pending_serror 229d04c04a4SManish Pandey unmask_async_ea 2303991b889SManish Pandey b handle_interrupt_exception 231a9203edaSRoberto Vargasend_vector_entry irq_aarch64 2324f6ad66aSAchin Gupta 233e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch64 234d87c0e27SManish Pandey save_x30 2353b8456bdSManish V Badarkhe apply_at_speculative_wa 236d04c04a4SManish Pandey sync_and_handle_pending_serror 237d04c04a4SManish Pandey unmask_async_ea 2383991b889SManish Pandey b handle_interrupt_exception 239a9203edaSRoberto Vargasend_vector_entry fiq_aarch64 2404f6ad66aSAchin Gupta 241d04c04a4SManish Pandey /* 242d04c04a4SManish Pandey * Need to synchronize any outstanding SError since we can get a burst of errors. 243d04c04a4SManish Pandey * So reuse the sync mechanism to catch any further errors which are pending. 244d04c04a4SManish Pandey */ 245e0ae9fabSSandrine Bailleuxvector_entry serror_aarch64 246d87c0e27SManish Pandey save_x30 2473b8456bdSManish V Badarkhe apply_at_speculative_wa 248d04c04a4SManish Pandey sync_and_handle_pending_serror 249d04c04a4SManish Pandey unmask_async_ea 2506f7de9a8SManish Pandey b handle_lower_el_async_ea 251a9203edaSRoberto Vargasend_vector_entry serror_aarch64 2524f6ad66aSAchin Gupta 253a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 25444804252SSandrine Bailleux * Lower EL using AArch32 : 0x600 - 0x800 255a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2564f6ad66aSAchin Gupta */ 257e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch32 258a6ef4393SDouglas Raillard /* 259a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 260a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 261a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 262a6ef4393SDouglas Raillard * state can be saved. 263caa84939SJeenu Viswambharan */ 264d87c0e27SManish Pandey save_x30 2653b8456bdSManish V Badarkhe apply_at_speculative_wa 266d04c04a4SManish Pandey sync_and_handle_pending_serror 267d04c04a4SManish Pandey unmask_async_ea 268caa84939SJeenu Viswambharan handle_sync_exception 269a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch32 2704f6ad66aSAchin Gupta 271e0ae9fabSSandrine Bailleuxvector_entry irq_aarch32 272d87c0e27SManish Pandey save_x30 2733b8456bdSManish V Badarkhe apply_at_speculative_wa 274d04c04a4SManish Pandey sync_and_handle_pending_serror 275d04c04a4SManish Pandey unmask_async_ea 2763991b889SManish Pandey b handle_interrupt_exception 277a9203edaSRoberto Vargasend_vector_entry irq_aarch32 2784f6ad66aSAchin Gupta 279e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch32 280d87c0e27SManish Pandey save_x30 2813b8456bdSManish V Badarkhe apply_at_speculative_wa 282d04c04a4SManish Pandey sync_and_handle_pending_serror 283d04c04a4SManish Pandey unmask_async_ea 2843991b889SManish Pandey b handle_interrupt_exception 285a9203edaSRoberto Vargasend_vector_entry fiq_aarch32 2864f6ad66aSAchin Gupta 287d04c04a4SManish Pandey /* 288d04c04a4SManish Pandey * Need to synchronize any outstanding SError since we can get a burst of errors. 289d04c04a4SManish Pandey * So reuse the sync mechanism to catch any further errors which are pending. 290d04c04a4SManish Pandey */ 291e0ae9fabSSandrine Bailleuxvector_entry serror_aarch32 292d87c0e27SManish Pandey save_x30 2933b8456bdSManish V Badarkhe apply_at_speculative_wa 294d04c04a4SManish Pandey sync_and_handle_pending_serror 295d04c04a4SManish Pandey unmask_async_ea 2966f7de9a8SManish Pandey b handle_lower_el_async_ea 297a9203edaSRoberto Vargasend_vector_entry serror_aarch32 298a7934d69SJeenu Viswambharan 2991f461979SJustin Chadwell#ifdef MONITOR_TRAPS 3001f461979SJustin Chadwell .section .rodata.brk_string, "aS" 3011f461979SJustin Chadwellbrk_location: 3021f461979SJustin Chadwell .asciz "Error at instruction 0x" 3031f461979SJustin Chadwellbrk_message: 3041f461979SJustin Chadwell .asciz "Unexpected BRK instruction with value 0x" 3051f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 3061f461979SJustin Chadwell 3072f370465SAntonio Nino Diaz /* --------------------------------------------------------------------- 308caa84939SJeenu Viswambharan * The following code handles secure monitor calls. 309a6ef4393SDouglas Raillard * Depending upon the execution state from where the SMC has been 310a6ef4393SDouglas Raillard * invoked, it frees some general purpose registers to perform the 311a6ef4393SDouglas Raillard * remaining tasks. They involve finding the runtime service handler 312a6ef4393SDouglas Raillard * that is the target of the SMC & switching to runtime stacks (SP_EL0) 313a6ef4393SDouglas Raillard * before calling the handler. 314caa84939SJeenu Viswambharan * 315a6ef4393SDouglas Raillard * Note that x30 has been explicitly saved and can be used here 316a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 317caa84939SJeenu Viswambharan */ 318ccd81f1eSAndre Przywarafunc sync_exception_handler 319caa84939SJeenu Viswambharansmc_handler32: 320caa84939SJeenu Viswambharan /* Check whether aarch32 issued an SMC64 */ 321caa84939SJeenu Viswambharan tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 322caa84939SJeenu Viswambharan 323ccd81f1eSAndre Przywarasync_handler64: 3245283962eSAntonio Nino Diaz /* NOTE: The code below must preserve x0-x4 */ 3255283962eSAntonio Nino Diaz 326e290a8fcSAlexei Fedorov /* 327ed108b56SAlexei Fedorov * Save general purpose and ARMv8.3-PAuth registers (if enabled). 3281d6d6802SBoyan Karatotev * Also save PMCR_EL0 and set the PSTATE to a known state. 329e290a8fcSAlexei Fedorov */ 33097215e0fSDaniel Boulby bl prepare_el3_entry 331e290a8fcSAlexei Fedorov 332b86048c4SAntonio Nino Diaz#if ENABLE_PAUTH 333ed108b56SAlexei Fedorov /* Load and program APIAKey firmware key */ 334ed108b56SAlexei Fedorov bl pauth_load_bl31_apiakey 335b86048c4SAntonio Nino Diaz#endif 3365283962eSAntonio Nino Diaz 337a6ef4393SDouglas Raillard /* 338a6ef4393SDouglas Raillard * Populate the parameters for the SMC handler. 339a6ef4393SDouglas Raillard * We already have x0-x4 in place. x5 will point to a cookie (not used 340a6ef4393SDouglas Raillard * now). x6 will point to the context structure (SP_EL3) and x7 will 341201ca5b6SDimitris Papastamos * contain flags we need to pass to the handler. 342caa84939SJeenu Viswambharan */ 343caa84939SJeenu Viswambharan mov x5, xzr 344caa84939SJeenu Viswambharan mov x6, sp 345caa84939SJeenu Viswambharan 346a6ef4393SDouglas Raillard /* 347a6ef4393SDouglas Raillard * Restore the saved C runtime stack value which will become the new 348a6ef4393SDouglas Raillard * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 349a6ef4393SDouglas Raillard * structure prior to the last ERET from EL3. 350caa84939SJeenu Viswambharan */ 351caa84939SJeenu Viswambharan ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 352caa84939SJeenu Viswambharan 353caa84939SJeenu Viswambharan /* Switch to SP_EL0 */ 354ed108b56SAlexei Fedorov msr spsel, #MODE_SP_EL0 355caa84939SJeenu Viswambharan 356a6ef4393SDouglas Raillard /* 357e61713b0SManish Pandey * Save the SPSR_EL3 and ELR_EL3 in case there is a world 358a6ef4393SDouglas Raillard * switch during SMC handling. 359a6ef4393SDouglas Raillard * TODO: Revisit if all system registers can be saved later. 360caa84939SJeenu Viswambharan */ 361caa84939SJeenu Viswambharan mrs x16, spsr_el3 362caa84939SJeenu Viswambharan mrs x17, elr_el3 363caa84939SJeenu Viswambharan stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 364e61713b0SManish Pandey 365e61713b0SManish Pandey /* Load SCR_EL3 */ 366e61713b0SManish Pandey mrs x18, scr_el3 367caa84939SJeenu Viswambharan 368ccd81f1eSAndre Przywara /* check for system register traps */ 369ccd81f1eSAndre Przywara mrs x16, esr_el3 370ccd81f1eSAndre Przywara ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH 371ccd81f1eSAndre Przywara cmp x17, #EC_AARCH64_SYS 372ccd81f1eSAndre Przywara b.eq sysreg_handler64 373ccd81f1eSAndre Przywara 3744693ff72SZelalem Aweke /* Clear flag register */ 3754693ff72SZelalem Aweke mov x7, xzr 3764693ff72SZelalem Aweke 3774693ff72SZelalem Aweke#if ENABLE_RME 3784693ff72SZelalem Aweke /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ 379461c0a5dSElizabeth Ho ubfx x7, x18, #SCR_NSE_SHIFT, #1 3804693ff72SZelalem Aweke 3814693ff72SZelalem Aweke /* 3824693ff72SZelalem Aweke * Shift copied SCR_EL3.NSE bit by 5 to create space for 3830fe7b9f2SOlivier Deprez * SCR_EL3.NS bit. Bit 5 of the flag corresponds to 3844693ff72SZelalem Aweke * the SCR_EL3.NSE bit. 3854693ff72SZelalem Aweke */ 3864693ff72SZelalem Aweke lsl x7, x7, #5 3874693ff72SZelalem Aweke#endif /* ENABLE_RME */ 3884693ff72SZelalem Aweke 389caa84939SJeenu Viswambharan /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 390caa84939SJeenu Viswambharan bfi x7, x18, #0, #1 391caa84939SJeenu Viswambharan 392f8a35797SJayanth Dodderi Chidanand mov sp, x12 393f8a35797SJayanth Dodderi Chidanand 394f8a35797SJayanth Dodderi Chidanand /* 395f8a35797SJayanth Dodderi Chidanand * Per SMCCC documentation, bits [23:17] must be zero for Fast 396f8a35797SJayanth Dodderi Chidanand * SMCs. Other values are reserved for future use. Ensure that 397f8a35797SJayanth Dodderi Chidanand * these bits are zeroes, if not report as unknown SMC. 398f8a35797SJayanth Dodderi Chidanand */ 399f8a35797SJayanth Dodderi Chidanand tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/ 400f8a35797SJayanth Dodderi Chidanand tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT) 401f8a35797SJayanth Dodderi Chidanand b.ne smc_unknown 402f8a35797SJayanth Dodderi Chidanand 4030fe7b9f2SOlivier Deprez /* 4040fe7b9f2SOlivier Deprez * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID 4050fe7b9f2SOlivier Deprez * passed through x0. Copy the SVE hint bit to flags and mask the 4060fe7b9f2SOlivier Deprez * bit in smc_fid passed to the standard service dispatcher. 4070fe7b9f2SOlivier Deprez * A service/dispatcher can retrieve the SVE hint bit state from 4080fe7b9f2SOlivier Deprez * flags using the appropriate helper. 4090fe7b9f2SOlivier Deprez */ 410f8a35797SJayanth Dodderi Chidanand2: 411b2d85178SOlivier Deprez and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 412b2d85178SOlivier Deprez orr x7, x7, x16 4130fe7b9f2SOlivier Deprez bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 4140fe7b9f2SOlivier Deprez 415cc485e27SMadhukar Pappireddy /* Get the unique owning entity number */ 416cc485e27SMadhukar Pappireddy ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 417cc485e27SMadhukar Pappireddy ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 418cc485e27SMadhukar Pappireddy orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 419cc485e27SMadhukar Pappireddy 420cc485e27SMadhukar Pappireddy /* Load descriptor index from array of indices */ 421c367b75eSMadhukar Pappireddy adrp x14, rt_svc_descs_indices 422c367b75eSMadhukar Pappireddy add x14, x14, :lo12:rt_svc_descs_indices 423cc485e27SMadhukar Pappireddy ldrb w15, [x14, x16] 424cc485e27SMadhukar Pappireddy 425cc485e27SMadhukar Pappireddy /* Any index greater than 127 is invalid. Check bit 7. */ 426cc485e27SMadhukar Pappireddy tbnz w15, 7, smc_unknown 427cc485e27SMadhukar Pappireddy 428cc485e27SMadhukar Pappireddy /* 429cc485e27SMadhukar Pappireddy * Get the descriptor using the index 430cc485e27SMadhukar Pappireddy * x11 = (base + off), w15 = index 431cc485e27SMadhukar Pappireddy * 432cc485e27SMadhukar Pappireddy * handler = (base + off) + (index << log2(size)) 433cc485e27SMadhukar Pappireddy */ 434cc485e27SMadhukar Pappireddy adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 435cc485e27SMadhukar Pappireddy lsl w10, w15, #RT_SVC_SIZE_LOG2 436cc485e27SMadhukar Pappireddy ldr x15, [x11, w10, uxtw] 437cc485e27SMadhukar Pappireddy 438a6ef4393SDouglas Raillard /* 439a6ef4393SDouglas Raillard * Call the Secure Monitor Call handler and then drop directly into 440a6ef4393SDouglas Raillard * el3_exit() which will program any remaining architectural state 441a6ef4393SDouglas Raillard * prior to issuing the ERET to the desired lower EL. 442caa84939SJeenu Viswambharan */ 443caa84939SJeenu Viswambharan#if DEBUG 444caa84939SJeenu Viswambharan cbz x15, rt_svc_fw_critical_error 445caa84939SJeenu Viswambharan#endif 446caa84939SJeenu Viswambharan blr x15 447caa84939SJeenu Viswambharan 448bbf8f6f9SYatharth Kochar b el3_exit 4494f6ad66aSAchin Gupta 450ccd81f1eSAndre Przywarasysreg_handler64: 451ccd81f1eSAndre Przywara mov x0, x16 /* ESR_EL3, containing syndrome information */ 452ccd81f1eSAndre Przywara mov x1, x6 /* lower EL's context */ 453ccd81f1eSAndre Przywara mov x19, x6 /* save context pointer for after the call */ 454ccd81f1eSAndre Przywara mov sp, x12 /* EL3 runtime stack, as loaded above */ 455ccd81f1eSAndre Przywara 456ccd81f1eSAndre Przywara /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */ 457ccd81f1eSAndre Przywara bl handle_sysreg_trap 458ccd81f1eSAndre Przywara /* 459ccd81f1eSAndre Przywara * returns: 460ccd81f1eSAndre Przywara * -1: unhandled trap, panic 461ccd81f1eSAndre Przywara * 0: handled trap, return to the trapping instruction (repeating it) 462ccd81f1eSAndre Przywara * 1: handled trap, return to the next instruction 463ccd81f1eSAndre Przywara */ 464ccd81f1eSAndre Przywara 465ccd81f1eSAndre Przywara tst w0, w0 46617d07a55SGovindraj Raja b.mi elx_panic /* negative return value: panic */ 467ccd81f1eSAndre Przywara b.eq 1f /* zero: do not change ELR_EL3 */ 468ccd81f1eSAndre Przywara 469ccd81f1eSAndre Przywara /* advance the PC to continue after the instruction */ 470ccd81f1eSAndre Przywara ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 471ccd81f1eSAndre Przywara add x1, x1, #4 472ccd81f1eSAndre Przywara str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 473ccd81f1eSAndre Przywara1: 474ccd81f1eSAndre Przywara b el3_exit 475ccd81f1eSAndre Przywara 476caa84939SJeenu Viswambharansmc_unknown: 477caa84939SJeenu Viswambharan /* 478cc485e27SMadhukar Pappireddy * Unknown SMC call. Populate return value with SMC_UNK and call 479cc485e27SMadhukar Pappireddy * el3_exit() which will restore the remaining architectural state 480cc485e27SMadhukar Pappireddy * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET 481cc485e27SMadhukar Pappireddy * to the desired lower EL. 482caa84939SJeenu Viswambharan */ 4834abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 484cc485e27SMadhukar Pappireddy str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 485cc485e27SMadhukar Pappireddy b el3_exit 486caa84939SJeenu Viswambharan 487caa84939SJeenu Viswambharansmc_prohibited: 4883b8456bdSManish V Badarkhe restore_ptw_el1_sys_regs 4893b8456bdSManish V Badarkhe ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 490c3260f9bSSoby Mathew ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 4914abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 492f461fe34SAnthony Steinhauser exception_return 493caa84939SJeenu Viswambharan 494ed108b56SAlexei Fedorov#if DEBUG 495caa84939SJeenu Viswambharanrt_svc_fw_critical_error: 496a6ef4393SDouglas Raillard /* Switch to SP_ELx */ 497ed108b56SAlexei Fedorov msr spsel, #MODE_SP_ELX 498a806dad5SJeenu Viswambharan no_ret report_unhandled_exception 499ed108b56SAlexei Fedorov#endif 500ccd81f1eSAndre Przywaraendfunc sync_exception_handler 5011f461979SJustin Chadwell 5021f461979SJustin Chadwell /* --------------------------------------------------------------------- 5033991b889SManish Pandey * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 5043991b889SManish Pandey * interrupts. 5053991b889SManish Pandey * 5063991b889SManish Pandey * Note that x30 has been explicitly saved and can be used here 5073991b889SManish Pandey * --------------------------------------------------------------------- 5083991b889SManish Pandey */ 5093991b889SManish Pandeyfunc handle_interrupt_exception 5103991b889SManish Pandey /* 5113991b889SManish Pandey * Save general purpose and ARMv8.3-PAuth registers (if enabled). 5123991b889SManish Pandey * Also save PMCR_EL0 and set the PSTATE to a known state. 5133991b889SManish Pandey */ 5143991b889SManish Pandey bl prepare_el3_entry 5153991b889SManish Pandey 5163991b889SManish Pandey#if ENABLE_PAUTH 5173991b889SManish Pandey /* Load and program APIAKey firmware key */ 5183991b889SManish Pandey bl pauth_load_bl31_apiakey 5193991b889SManish Pandey#endif 5203991b889SManish Pandey 5213991b889SManish Pandey /* Save the EL3 system registers needed to return from this exception */ 5223991b889SManish Pandey mrs x0, spsr_el3 5233991b889SManish Pandey mrs x1, elr_el3 5243991b889SManish Pandey stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 5253991b889SManish Pandey 5263991b889SManish Pandey /* Switch to the runtime stack i.e. SP_EL0 */ 5273991b889SManish Pandey ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 5283991b889SManish Pandey mov x20, sp 5293991b889SManish Pandey msr spsel, #MODE_SP_EL0 5303991b889SManish Pandey mov sp, x2 5313991b889SManish Pandey 5323991b889SManish Pandey /* 5333991b889SManish Pandey * Find out whether this is a valid interrupt type. 5343991b889SManish Pandey * If the interrupt controller reports a spurious interrupt then return 5353991b889SManish Pandey * to where we came from. 5363991b889SManish Pandey */ 5373991b889SManish Pandey bl plat_ic_get_pending_interrupt_type 5383991b889SManish Pandey cmp x0, #INTR_TYPE_INVAL 5393991b889SManish Pandey b.eq interrupt_exit 5403991b889SManish Pandey 5413991b889SManish Pandey /* 5423991b889SManish Pandey * Get the registered handler for this interrupt type. 5433991b889SManish Pandey * A NULL return value could be 'cause of the following conditions: 5443991b889SManish Pandey * 5453991b889SManish Pandey * a. An interrupt of a type was routed correctly but a handler for its 5463991b889SManish Pandey * type was not registered. 5473991b889SManish Pandey * 5483991b889SManish Pandey * b. An interrupt of a type was not routed correctly so a handler for 5493991b889SManish Pandey * its type was not registered. 5503991b889SManish Pandey * 5513991b889SManish Pandey * c. An interrupt of a type was routed correctly to EL3, but was 5523991b889SManish Pandey * deasserted before its pending state could be read. Another 5533991b889SManish Pandey * interrupt of a different type pended at the same time and its 5543991b889SManish Pandey * type was reported as pending instead. However, a handler for this 5553991b889SManish Pandey * type was not registered. 5563991b889SManish Pandey * 5573991b889SManish Pandey * a. and b. can only happen due to a programming error. The 5583991b889SManish Pandey * occurrence of c. could be beyond the control of Trusted Firmware. 5593991b889SManish Pandey * It makes sense to return from this exception instead of reporting an 5603991b889SManish Pandey * error. 5613991b889SManish Pandey */ 5623991b889SManish Pandey bl get_interrupt_type_handler 5633991b889SManish Pandey cbz x0, interrupt_exit 5643991b889SManish Pandey mov x21, x0 5653991b889SManish Pandey 5663991b889SManish Pandey mov x0, #INTR_ID_UNAVAILABLE 5673991b889SManish Pandey 5683991b889SManish Pandey /* Set the current security state in the 'flags' parameter */ 5693991b889SManish Pandey mrs x2, scr_el3 5703991b889SManish Pandey ubfx x1, x2, #0, #1 5713991b889SManish Pandey 5723991b889SManish Pandey /* Restore the reference to the 'handle' i.e. SP_EL3 */ 5733991b889SManish Pandey mov x2, x20 5743991b889SManish Pandey 5753991b889SManish Pandey /* x3 will point to a cookie (not used now) */ 5763991b889SManish Pandey mov x3, xzr 5773991b889SManish Pandey 5783991b889SManish Pandey /* Call the interrupt type handler */ 5793991b889SManish Pandey blr x21 5803991b889SManish Pandey 5813991b889SManish Pandeyinterrupt_exit: 5823991b889SManish Pandey /* Return from exception, possibly in a different security state */ 5833991b889SManish Pandey b el3_exit 5843991b889SManish Pandeyendfunc handle_interrupt_exception 5853991b889SManish Pandey 5863991b889SManish Pandey /* --------------------------------------------------------------------- 5871f461979SJustin Chadwell * The following code handles exceptions caused by BRK instructions. 5881f461979SJustin Chadwell * Following a BRK instruction, the only real valid cause of action is 5891f461979SJustin Chadwell * to print some information and panic, as the code that caused it is 5901f461979SJustin Chadwell * likely in an inconsistent internal state. 5911f461979SJustin Chadwell * 5921f461979SJustin Chadwell * This is initially intended to be used in conjunction with 5931f461979SJustin Chadwell * __builtin_trap. 5941f461979SJustin Chadwell * --------------------------------------------------------------------- 5951f461979SJustin Chadwell */ 5961f461979SJustin Chadwell#ifdef MONITOR_TRAPS 5971f461979SJustin Chadwellfunc brk_handler 5981f461979SJustin Chadwell /* Extract the ISS */ 5991f461979SJustin Chadwell mrs x10, esr_el3 6001f461979SJustin Chadwell ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 6011f461979SJustin Chadwell 6021f461979SJustin Chadwell /* Ensure the console is initialized */ 6031f461979SJustin Chadwell bl plat_crash_console_init 6041f461979SJustin Chadwell 6051f461979SJustin Chadwell adr x4, brk_location 6061f461979SJustin Chadwell bl asm_print_str 6071f461979SJustin Chadwell mrs x4, elr_el3 6081f461979SJustin Chadwell bl asm_print_hex 6091f461979SJustin Chadwell bl asm_print_newline 6101f461979SJustin Chadwell 6111f461979SJustin Chadwell adr x4, brk_message 6121f461979SJustin Chadwell bl asm_print_str 6131f461979SJustin Chadwell mov x4, x10 6141f461979SJustin Chadwell mov x5, #28 6151f461979SJustin Chadwell bl asm_print_hex_bits 6161f461979SJustin Chadwell bl asm_print_newline 6171f461979SJustin Chadwell 6181f461979SJustin Chadwell no_ret plat_panic_handler 6191f461979SJustin Chadwellendfunc brk_handler 6201f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 621