14f6ad66aSAchin Gupta/* 2461c0a5dSElizabeth Ho * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 54f6ad66aSAchin Gupta */ 64f6ad66aSAchin Gupta 709d40e0eSAntonio Nino Diaz#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 94f6ad66aSAchin Gupta#include <arch.h> 1035e98e55SDan Handley#include <asm_macros.S> 1109d40e0eSAntonio Nino Diaz#include <bl31/ea_handle.h> 1209d40e0eSAntonio Nino Diaz#include <bl31/interrupt_mgmt.h> 13ccd81f1eSAndre Przywara#include <bl31/sync_handle.h> 1409d40e0eSAntonio Nino Diaz#include <common/runtime_svc.h> 1597043ac9SDan Handley#include <context.h> 163b8456bdSManish V Badarkhe#include <el3_common_macros.S> 1709d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h> 1809d40e0eSAntonio Nino Diaz#include <lib/smccc.h> 194f6ad66aSAchin Gupta 204f6ad66aSAchin Gupta .globl runtime_exceptions 214f6ad66aSAchin Gupta 22f62ad322SDimitris Papastamos .globl sync_exception_sp_el0 23f62ad322SDimitris Papastamos .globl irq_sp_el0 24f62ad322SDimitris Papastamos .globl fiq_sp_el0 25f62ad322SDimitris Papastamos .globl serror_sp_el0 26f62ad322SDimitris Papastamos 27f62ad322SDimitris Papastamos .globl sync_exception_sp_elx 28f62ad322SDimitris Papastamos .globl irq_sp_elx 29f62ad322SDimitris Papastamos .globl fiq_sp_elx 30f62ad322SDimitris Papastamos .globl serror_sp_elx 31f62ad322SDimitris Papastamos 32f62ad322SDimitris Papastamos .globl sync_exception_aarch64 33f62ad322SDimitris Papastamos .globl irq_aarch64 34f62ad322SDimitris Papastamos .globl fiq_aarch64 35f62ad322SDimitris Papastamos .globl serror_aarch64 36f62ad322SDimitris Papastamos 37f62ad322SDimitris Papastamos .globl sync_exception_aarch32 38f62ad322SDimitris Papastamos .globl irq_aarch32 39f62ad322SDimitris Papastamos .globl fiq_aarch32 40f62ad322SDimitris Papastamos .globl serror_aarch32 41f62ad322SDimitris Papastamos 4276454abfSJeenu Viswambharan /* 43d87c0e27SManish Pandey * Save LR and make x30 available as most of the routines in vector entry 44d87c0e27SManish Pandey * need a free register 45d87c0e27SManish Pandey */ 46d87c0e27SManish Pandey .macro save_x30 47d87c0e27SManish Pandey str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 48d87c0e27SManish Pandey .endm 49d87c0e27SManish Pandey 50*d04c04a4SManish Pandey .macro restore_x30 51*d04c04a4SManish Pandey ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 52*d04c04a4SManish Pandey .endm 5314c6016aSJeenu Viswambharan 54e290a8fcSAlexei Fedorov /* 55*d04c04a4SManish Pandey * Macro that synchronizes errors (EA) and checks for pending SError. 56*d04c04a4SManish Pandey * On detecting a pending SError it either reflects it back to lower 57*d04c04a4SManish Pandey * EL (KFH) or handles it in EL3 (FFH) based on EA routing model. 58e290a8fcSAlexei Fedorov */ 59*d04c04a4SManish Pandey .macro sync_and_handle_pending_serror 60c2d32a5fSMadhukar Pappireddy dsb sy 61c2d32a5fSMadhukar Pappireddy isb 62*d04c04a4SManish Pandey mrs x30, ISR_EL1 63*d04c04a4SManish Pandey tbz x30, #ISR_A_SHIFT, 2f 64*d04c04a4SManish Pandey#if HANDLE_EA_EL3_FIRST_NS 65*d04c04a4SManish Pandey mrs x30, scr_el3 66*d04c04a4SManish Pandey tst x30, #SCR_EA_BIT 67*d04c04a4SManish Pandey b.eq 1f 68*d04c04a4SManish Pandey bl handle_pending_async_ea 69*d04c04a4SManish Pandey b 2f 70c2d32a5fSMadhukar Pappireddy#endif 71*d04c04a4SManish Pandey1: 72*d04c04a4SManish Pandey /* This function never returns, but need LR for decision making */ 73*d04c04a4SManish Pandey bl reflect_pending_async_ea_to_lower_el 74*d04c04a4SManish Pandey2: 7576a91d87SManish Pandey .endm 76c2d32a5fSMadhukar Pappireddy 77a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 78a6ef4393SDouglas Raillard * This macro handles Synchronous exceptions. 79a6ef4393SDouglas Raillard * Only SMC exceptions are supported. 80a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 81dce74b89SAchin Gupta */ 82dce74b89SAchin Gupta .macro handle_sync_exception 83872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 84872be88aSdp-arm /* 85a6ef4393SDouglas Raillard * Read the timestamp value and store it in per-cpu data. The value 86a6ef4393SDouglas Raillard * will be extracted from per-cpu data by the C level SMC handler and 87a6ef4393SDouglas Raillard * saved to the PMF timestamp region. 88872be88aSdp-arm */ 89872be88aSdp-arm mrs x30, cntpct_el0 90872be88aSdp-arm str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 91872be88aSdp-arm mrs x29, tpidr_el3 92872be88aSdp-arm str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 93872be88aSdp-arm ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 94872be88aSdp-arm#endif 95872be88aSdp-arm 96dce74b89SAchin Gupta mrs x30, esr_el3 97dce74b89SAchin Gupta ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 98dce74b89SAchin Gupta 99a6ef4393SDouglas Raillard /* Handle SMC exceptions separately from other synchronous exceptions */ 100dce74b89SAchin Gupta cmp x30, #EC_AARCH32_SMC 101dce74b89SAchin Gupta b.eq smc_handler32 102dce74b89SAchin Gupta 103dce74b89SAchin Gupta cmp x30, #EC_AARCH64_SMC 104ccd81f1eSAndre Przywara b.eq sync_handler64 105ccd81f1eSAndre Przywara 106ccd81f1eSAndre Przywara cmp x30, #EC_AARCH64_SYS 107ccd81f1eSAndre Przywara b.eq sync_handler64 108dce74b89SAchin Gupta 109df8f3188SJeenu Viswambharan /* Synchronous exceptions other than the above are assumed to be EA */ 1104d91838bSJulius Werner ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 1116f7de9a8SManish Pandey b handle_lower_el_sync_ea 112dce74b89SAchin Gupta .endm 113dce74b89SAchin Gupta 114e0ae9fabSSandrine Bailleuxvector_base runtime_exceptions 115e0ae9fabSSandrine Bailleux 116a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 117a6ef4393SDouglas Raillard * Current EL with SP_EL0 : 0x0 - 0x200 118a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1194f6ad66aSAchin Gupta */ 120e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_el0 1211f461979SJustin Chadwell#ifdef MONITOR_TRAPS 1221f461979SJustin Chadwell stp x29, x30, [sp, #-16]! 1231f461979SJustin Chadwell 1241f461979SJustin Chadwell mrs x30, esr_el3 1251f461979SJustin Chadwell ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 1261f461979SJustin Chadwell 1271f461979SJustin Chadwell /* Check for BRK */ 1281f461979SJustin Chadwell cmp x30, #EC_BRK 1291f461979SJustin Chadwell b.eq brk_handler 1301f461979SJustin Chadwell 1311f461979SJustin Chadwell ldp x29, x30, [sp], #16 1321f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 1331f461979SJustin Chadwell 134a6ef4393SDouglas Raillard /* We don't expect any synchronous exceptions from EL3 */ 1354d91838bSJulius Werner b report_unhandled_exception 136a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_el0 1374f6ad66aSAchin Gupta 138e0ae9fabSSandrine Bailleuxvector_entry irq_sp_el0 139a6ef4393SDouglas Raillard /* 140a6ef4393SDouglas Raillard * EL3 code is non-reentrant. Any asynchronous exception is a serious 141a6ef4393SDouglas Raillard * error. Loop infinitely. 142a6ef4393SDouglas Raillard */ 1434d91838bSJulius Werner b report_unhandled_interrupt 144a9203edaSRoberto Vargasend_vector_entry irq_sp_el0 1454f6ad66aSAchin Gupta 146e0ae9fabSSandrine Bailleux 147e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_el0 1484d91838bSJulius Werner b report_unhandled_interrupt 149a9203edaSRoberto Vargasend_vector_entry fiq_sp_el0 1504f6ad66aSAchin Gupta 151e0ae9fabSSandrine Bailleux 152e0ae9fabSSandrine Bailleuxvector_entry serror_sp_el0 153eaeaa4d0SJeenu Viswambharan no_ret plat_handle_el3_ea 154a9203edaSRoberto Vargasend_vector_entry serror_sp_el0 1554f6ad66aSAchin Gupta 156a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 157a6ef4393SDouglas Raillard * Current EL with SP_ELx: 0x200 - 0x400 158a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1594f6ad66aSAchin Gupta */ 160e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_elx 161a6ef4393SDouglas Raillard /* 162a6ef4393SDouglas Raillard * This exception will trigger if anything went wrong during a previous 163a6ef4393SDouglas Raillard * exception entry or exit or while handling an earlier unexpected 164a6ef4393SDouglas Raillard * synchronous exception. There is a high probability that SP_EL3 is 165a6ef4393SDouglas Raillard * corrupted. 166caa84939SJeenu Viswambharan */ 1674d91838bSJulius Werner b report_unhandled_exception 168a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_elx 1694f6ad66aSAchin Gupta 170e0ae9fabSSandrine Bailleuxvector_entry irq_sp_elx 1714d91838bSJulius Werner b report_unhandled_interrupt 172a9203edaSRoberto Vargasend_vector_entry irq_sp_elx 173a7934d69SJeenu Viswambharan 174e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_elx 1754d91838bSJulius Werner b report_unhandled_interrupt 176a9203edaSRoberto Vargasend_vector_entry fiq_sp_elx 177a7934d69SJeenu Viswambharan 178e0ae9fabSSandrine Bailleuxvector_entry serror_sp_elx 179*d04c04a4SManish Pandey#if HANDLE_EA_EL3_FIRST_NS 18076a91d87SManish Pandey /* 18176a91d87SManish Pandey * This will trigger if the exception was taken due to SError in EL3 or 18276a91d87SManish Pandey * because of pending asynchronous external aborts from lower EL that got 183*d04c04a4SManish Pandey * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1) 184*d04c04a4SManish Pandey * during EL3 entry. For the former case we continue with "plat_handle_el3_ea". 185*d04c04a4SManish Pandey * The later case will occur when PSTATE.A bit is cleared in 186*d04c04a4SManish Pandey * "handle_pending_async_ea". This means we are doing a nested 187*d04c04a4SManish Pandey * exception in EL3. Call the handler for async EA which will eret back to 188*d04c04a4SManish Pandey * original el3 handler if it is nested exception. Also, unmask EA so that we 189*d04c04a4SManish Pandey * catch any further EA arise when handling this nested exception at EL3. 19076a91d87SManish Pandey */ 191d87c0e27SManish Pandey save_x30 192*d04c04a4SManish Pandey ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 193*d04c04a4SManish Pandey cbz x30, 1f 194*d04c04a4SManish Pandey /* 195*d04c04a4SManish Pandey * This is nested exception handling, clear the flag to avoid taking this 196*d04c04a4SManish Pandey * path for further exceptions caused by EA handling 197*d04c04a4SManish Pandey */ 198*d04c04a4SManish Pandey str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG] 199*d04c04a4SManish Pandey unmask_async_ea 20076a91d87SManish Pandey b handle_lower_el_async_ea 20176a91d87SManish Pandey1: 202*d04c04a4SManish Pandey restore_x30 203c2d32a5fSMadhukar Pappireddy#endif 204eaeaa4d0SJeenu Viswambharan no_ret plat_handle_el3_ea 205*d04c04a4SManish Pandey 206a9203edaSRoberto Vargasend_vector_entry serror_sp_elx 2074f6ad66aSAchin Gupta 208a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 20944804252SSandrine Bailleux * Lower EL using AArch64 : 0x400 - 0x600 210a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2114f6ad66aSAchin Gupta */ 212e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch64 213a6ef4393SDouglas Raillard /* 214a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 215a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 216a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 217a6ef4393SDouglas Raillard * state can be saved. 218caa84939SJeenu Viswambharan */ 219d87c0e27SManish Pandey save_x30 2203b8456bdSManish V Badarkhe apply_at_speculative_wa 221*d04c04a4SManish Pandey sync_and_handle_pending_serror 222*d04c04a4SManish Pandey unmask_async_ea 223caa84939SJeenu Viswambharan handle_sync_exception 224a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch64 2254f6ad66aSAchin Gupta 226e0ae9fabSSandrine Bailleuxvector_entry irq_aarch64 227d87c0e27SManish Pandey save_x30 2283b8456bdSManish V Badarkhe apply_at_speculative_wa 229*d04c04a4SManish Pandey sync_and_handle_pending_serror 230*d04c04a4SManish Pandey unmask_async_ea 2313991b889SManish Pandey b handle_interrupt_exception 232a9203edaSRoberto Vargasend_vector_entry irq_aarch64 2334f6ad66aSAchin Gupta 234e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch64 235d87c0e27SManish Pandey save_x30 2363b8456bdSManish V Badarkhe apply_at_speculative_wa 237*d04c04a4SManish Pandey sync_and_handle_pending_serror 238*d04c04a4SManish Pandey unmask_async_ea 2393991b889SManish Pandey b handle_interrupt_exception 240a9203edaSRoberto Vargasend_vector_entry fiq_aarch64 2414f6ad66aSAchin Gupta 242*d04c04a4SManish Pandey /* 243*d04c04a4SManish Pandey * Need to synchronize any outstanding SError since we can get a burst of errors. 244*d04c04a4SManish Pandey * So reuse the sync mechanism to catch any further errors which are pending. 245*d04c04a4SManish Pandey */ 246e0ae9fabSSandrine Bailleuxvector_entry serror_aarch64 247d87c0e27SManish Pandey save_x30 2483b8456bdSManish V Badarkhe apply_at_speculative_wa 249*d04c04a4SManish Pandey sync_and_handle_pending_serror 250*d04c04a4SManish Pandey unmask_async_ea 2516f7de9a8SManish Pandey b handle_lower_el_async_ea 252a9203edaSRoberto Vargasend_vector_entry serror_aarch64 2534f6ad66aSAchin Gupta 254a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 25544804252SSandrine Bailleux * Lower EL using AArch32 : 0x600 - 0x800 256a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2574f6ad66aSAchin Gupta */ 258e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch32 259a6ef4393SDouglas Raillard /* 260a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 261a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 262a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 263a6ef4393SDouglas Raillard * state can be saved. 264caa84939SJeenu Viswambharan */ 265d87c0e27SManish Pandey save_x30 2663b8456bdSManish V Badarkhe apply_at_speculative_wa 267*d04c04a4SManish Pandey sync_and_handle_pending_serror 268*d04c04a4SManish Pandey unmask_async_ea 269caa84939SJeenu Viswambharan handle_sync_exception 270a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch32 2714f6ad66aSAchin Gupta 272e0ae9fabSSandrine Bailleuxvector_entry irq_aarch32 273d87c0e27SManish Pandey save_x30 2743b8456bdSManish V Badarkhe apply_at_speculative_wa 275*d04c04a4SManish Pandey sync_and_handle_pending_serror 276*d04c04a4SManish Pandey unmask_async_ea 2773991b889SManish Pandey b handle_interrupt_exception 278a9203edaSRoberto Vargasend_vector_entry irq_aarch32 2794f6ad66aSAchin Gupta 280e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch32 281d87c0e27SManish Pandey save_x30 2823b8456bdSManish V Badarkhe apply_at_speculative_wa 283*d04c04a4SManish Pandey sync_and_handle_pending_serror 284*d04c04a4SManish Pandey unmask_async_ea 2853991b889SManish Pandey b handle_interrupt_exception 286a9203edaSRoberto Vargasend_vector_entry fiq_aarch32 2874f6ad66aSAchin Gupta 288*d04c04a4SManish Pandey /* 289*d04c04a4SManish Pandey * Need to synchronize any outstanding SError since we can get a burst of errors. 290*d04c04a4SManish Pandey * So reuse the sync mechanism to catch any further errors which are pending. 291*d04c04a4SManish Pandey */ 292e0ae9fabSSandrine Bailleuxvector_entry serror_aarch32 293d87c0e27SManish Pandey save_x30 2943b8456bdSManish V Badarkhe apply_at_speculative_wa 295*d04c04a4SManish Pandey sync_and_handle_pending_serror 296*d04c04a4SManish Pandey unmask_async_ea 2976f7de9a8SManish Pandey b handle_lower_el_async_ea 298a9203edaSRoberto Vargasend_vector_entry serror_aarch32 299a7934d69SJeenu Viswambharan 3001f461979SJustin Chadwell#ifdef MONITOR_TRAPS 3011f461979SJustin Chadwell .section .rodata.brk_string, "aS" 3021f461979SJustin Chadwellbrk_location: 3031f461979SJustin Chadwell .asciz "Error at instruction 0x" 3041f461979SJustin Chadwellbrk_message: 3051f461979SJustin Chadwell .asciz "Unexpected BRK instruction with value 0x" 3061f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 3071f461979SJustin Chadwell 3082f370465SAntonio Nino Diaz /* --------------------------------------------------------------------- 309caa84939SJeenu Viswambharan * The following code handles secure monitor calls. 310a6ef4393SDouglas Raillard * Depending upon the execution state from where the SMC has been 311a6ef4393SDouglas Raillard * invoked, it frees some general purpose registers to perform the 312a6ef4393SDouglas Raillard * remaining tasks. They involve finding the runtime service handler 313a6ef4393SDouglas Raillard * that is the target of the SMC & switching to runtime stacks (SP_EL0) 314a6ef4393SDouglas Raillard * before calling the handler. 315caa84939SJeenu Viswambharan * 316a6ef4393SDouglas Raillard * Note that x30 has been explicitly saved and can be used here 317a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 318caa84939SJeenu Viswambharan */ 319ccd81f1eSAndre Przywarafunc sync_exception_handler 320caa84939SJeenu Viswambharansmc_handler32: 321caa84939SJeenu Viswambharan /* Check whether aarch32 issued an SMC64 */ 322caa84939SJeenu Viswambharan tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 323caa84939SJeenu Viswambharan 324ccd81f1eSAndre Przywarasync_handler64: 3255283962eSAntonio Nino Diaz /* NOTE: The code below must preserve x0-x4 */ 3265283962eSAntonio Nino Diaz 327e290a8fcSAlexei Fedorov /* 328ed108b56SAlexei Fedorov * Save general purpose and ARMv8.3-PAuth registers (if enabled). 3291d6d6802SBoyan Karatotev * Also save PMCR_EL0 and set the PSTATE to a known state. 330e290a8fcSAlexei Fedorov */ 33197215e0fSDaniel Boulby bl prepare_el3_entry 332e290a8fcSAlexei Fedorov 333b86048c4SAntonio Nino Diaz#if ENABLE_PAUTH 334ed108b56SAlexei Fedorov /* Load and program APIAKey firmware key */ 335ed108b56SAlexei Fedorov bl pauth_load_bl31_apiakey 336b86048c4SAntonio Nino Diaz#endif 3375283962eSAntonio Nino Diaz 338a6ef4393SDouglas Raillard /* 339a6ef4393SDouglas Raillard * Populate the parameters for the SMC handler. 340a6ef4393SDouglas Raillard * We already have x0-x4 in place. x5 will point to a cookie (not used 341a6ef4393SDouglas Raillard * now). x6 will point to the context structure (SP_EL3) and x7 will 342201ca5b6SDimitris Papastamos * contain flags we need to pass to the handler. 343caa84939SJeenu Viswambharan */ 344caa84939SJeenu Viswambharan mov x5, xzr 345caa84939SJeenu Viswambharan mov x6, sp 346caa84939SJeenu Viswambharan 347a6ef4393SDouglas Raillard /* 348a6ef4393SDouglas Raillard * Restore the saved C runtime stack value which will become the new 349a6ef4393SDouglas Raillard * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 350a6ef4393SDouglas Raillard * structure prior to the last ERET from EL3. 351caa84939SJeenu Viswambharan */ 352caa84939SJeenu Viswambharan ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 353caa84939SJeenu Viswambharan 354caa84939SJeenu Viswambharan /* Switch to SP_EL0 */ 355ed108b56SAlexei Fedorov msr spsel, #MODE_SP_EL0 356caa84939SJeenu Viswambharan 357a6ef4393SDouglas Raillard /* 358e61713b0SManish Pandey * Save the SPSR_EL3 and ELR_EL3 in case there is a world 359a6ef4393SDouglas Raillard * switch during SMC handling. 360a6ef4393SDouglas Raillard * TODO: Revisit if all system registers can be saved later. 361caa84939SJeenu Viswambharan */ 362caa84939SJeenu Viswambharan mrs x16, spsr_el3 363caa84939SJeenu Viswambharan mrs x17, elr_el3 364caa84939SJeenu Viswambharan stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 365e61713b0SManish Pandey 366e61713b0SManish Pandey /* Load SCR_EL3 */ 367e61713b0SManish Pandey mrs x18, scr_el3 368caa84939SJeenu Viswambharan 369ccd81f1eSAndre Przywara /* check for system register traps */ 370ccd81f1eSAndre Przywara mrs x16, esr_el3 371ccd81f1eSAndre Przywara ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH 372ccd81f1eSAndre Przywara cmp x17, #EC_AARCH64_SYS 373ccd81f1eSAndre Przywara b.eq sysreg_handler64 374ccd81f1eSAndre Przywara 3754693ff72SZelalem Aweke /* Clear flag register */ 3764693ff72SZelalem Aweke mov x7, xzr 3774693ff72SZelalem Aweke 3784693ff72SZelalem Aweke#if ENABLE_RME 3794693ff72SZelalem Aweke /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ 380461c0a5dSElizabeth Ho ubfx x7, x18, #SCR_NSE_SHIFT, #1 3814693ff72SZelalem Aweke 3824693ff72SZelalem Aweke /* 3834693ff72SZelalem Aweke * Shift copied SCR_EL3.NSE bit by 5 to create space for 3840fe7b9f2SOlivier Deprez * SCR_EL3.NS bit. Bit 5 of the flag corresponds to 3854693ff72SZelalem Aweke * the SCR_EL3.NSE bit. 3864693ff72SZelalem Aweke */ 3874693ff72SZelalem Aweke lsl x7, x7, #5 3884693ff72SZelalem Aweke#endif /* ENABLE_RME */ 3894693ff72SZelalem Aweke 390caa84939SJeenu Viswambharan /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 391caa84939SJeenu Viswambharan bfi x7, x18, #0, #1 392caa84939SJeenu Viswambharan 393f8a35797SJayanth Dodderi Chidanand mov sp, x12 394f8a35797SJayanth Dodderi Chidanand 395f8a35797SJayanth Dodderi Chidanand /* 396f8a35797SJayanth Dodderi Chidanand * Per SMCCC documentation, bits [23:17] must be zero for Fast 397f8a35797SJayanth Dodderi Chidanand * SMCs. Other values are reserved for future use. Ensure that 398f8a35797SJayanth Dodderi Chidanand * these bits are zeroes, if not report as unknown SMC. 399f8a35797SJayanth Dodderi Chidanand */ 400f8a35797SJayanth Dodderi Chidanand tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/ 401f8a35797SJayanth Dodderi Chidanand tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT) 402f8a35797SJayanth Dodderi Chidanand b.ne smc_unknown 403f8a35797SJayanth Dodderi Chidanand 4040fe7b9f2SOlivier Deprez /* 4050fe7b9f2SOlivier Deprez * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID 4060fe7b9f2SOlivier Deprez * passed through x0. Copy the SVE hint bit to flags and mask the 4070fe7b9f2SOlivier Deprez * bit in smc_fid passed to the standard service dispatcher. 4080fe7b9f2SOlivier Deprez * A service/dispatcher can retrieve the SVE hint bit state from 4090fe7b9f2SOlivier Deprez * flags using the appropriate helper. 4100fe7b9f2SOlivier Deprez */ 411f8a35797SJayanth Dodderi Chidanand2: 412b2d85178SOlivier Deprez and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 413b2d85178SOlivier Deprez orr x7, x7, x16 4140fe7b9f2SOlivier Deprez bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT) 4150fe7b9f2SOlivier Deprez 416cc485e27SMadhukar Pappireddy /* Get the unique owning entity number */ 417cc485e27SMadhukar Pappireddy ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 418cc485e27SMadhukar Pappireddy ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 419cc485e27SMadhukar Pappireddy orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 420cc485e27SMadhukar Pappireddy 421cc485e27SMadhukar Pappireddy /* Load descriptor index from array of indices */ 422c367b75eSMadhukar Pappireddy adrp x14, rt_svc_descs_indices 423c367b75eSMadhukar Pappireddy add x14, x14, :lo12:rt_svc_descs_indices 424cc485e27SMadhukar Pappireddy ldrb w15, [x14, x16] 425cc485e27SMadhukar Pappireddy 426cc485e27SMadhukar Pappireddy /* Any index greater than 127 is invalid. Check bit 7. */ 427cc485e27SMadhukar Pappireddy tbnz w15, 7, smc_unknown 428cc485e27SMadhukar Pappireddy 429cc485e27SMadhukar Pappireddy /* 430cc485e27SMadhukar Pappireddy * Get the descriptor using the index 431cc485e27SMadhukar Pappireddy * x11 = (base + off), w15 = index 432cc485e27SMadhukar Pappireddy * 433cc485e27SMadhukar Pappireddy * handler = (base + off) + (index << log2(size)) 434cc485e27SMadhukar Pappireddy */ 435cc485e27SMadhukar Pappireddy adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 436cc485e27SMadhukar Pappireddy lsl w10, w15, #RT_SVC_SIZE_LOG2 437cc485e27SMadhukar Pappireddy ldr x15, [x11, w10, uxtw] 438cc485e27SMadhukar Pappireddy 439a6ef4393SDouglas Raillard /* 440a6ef4393SDouglas Raillard * Call the Secure Monitor Call handler and then drop directly into 441a6ef4393SDouglas Raillard * el3_exit() which will program any remaining architectural state 442a6ef4393SDouglas Raillard * prior to issuing the ERET to the desired lower EL. 443caa84939SJeenu Viswambharan */ 444caa84939SJeenu Viswambharan#if DEBUG 445caa84939SJeenu Viswambharan cbz x15, rt_svc_fw_critical_error 446caa84939SJeenu Viswambharan#endif 447caa84939SJeenu Viswambharan blr x15 448caa84939SJeenu Viswambharan 449bbf8f6f9SYatharth Kochar b el3_exit 4504f6ad66aSAchin Gupta 451ccd81f1eSAndre Przywarasysreg_handler64: 452ccd81f1eSAndre Przywara mov x0, x16 /* ESR_EL3, containing syndrome information */ 453ccd81f1eSAndre Przywara mov x1, x6 /* lower EL's context */ 454ccd81f1eSAndre Przywara mov x19, x6 /* save context pointer for after the call */ 455ccd81f1eSAndre Przywara mov sp, x12 /* EL3 runtime stack, as loaded above */ 456ccd81f1eSAndre Przywara 457ccd81f1eSAndre Przywara /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */ 458ccd81f1eSAndre Przywara bl handle_sysreg_trap 459ccd81f1eSAndre Przywara /* 460ccd81f1eSAndre Przywara * returns: 461ccd81f1eSAndre Przywara * -1: unhandled trap, panic 462ccd81f1eSAndre Przywara * 0: handled trap, return to the trapping instruction (repeating it) 463ccd81f1eSAndre Przywara * 1: handled trap, return to the next instruction 464ccd81f1eSAndre Przywara */ 465ccd81f1eSAndre Przywara 466ccd81f1eSAndre Przywara tst w0, w0 46717d07a55SGovindraj Raja b.mi elx_panic /* negative return value: panic */ 468ccd81f1eSAndre Przywara b.eq 1f /* zero: do not change ELR_EL3 */ 469ccd81f1eSAndre Przywara 470ccd81f1eSAndre Przywara /* advance the PC to continue after the instruction */ 471ccd81f1eSAndre Przywara ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 472ccd81f1eSAndre Przywara add x1, x1, #4 473ccd81f1eSAndre Przywara str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3] 474ccd81f1eSAndre Przywara1: 475ccd81f1eSAndre Przywara b el3_exit 476ccd81f1eSAndre Przywara 477caa84939SJeenu Viswambharansmc_unknown: 478caa84939SJeenu Viswambharan /* 479cc485e27SMadhukar Pappireddy * Unknown SMC call. Populate return value with SMC_UNK and call 480cc485e27SMadhukar Pappireddy * el3_exit() which will restore the remaining architectural state 481cc485e27SMadhukar Pappireddy * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET 482cc485e27SMadhukar Pappireddy * to the desired lower EL. 483caa84939SJeenu Viswambharan */ 4844abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 485cc485e27SMadhukar Pappireddy str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 486cc485e27SMadhukar Pappireddy b el3_exit 487caa84939SJeenu Viswambharan 488caa84939SJeenu Viswambharansmc_prohibited: 4893b8456bdSManish V Badarkhe restore_ptw_el1_sys_regs 4903b8456bdSManish V Badarkhe ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 491c3260f9bSSoby Mathew ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 4924abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 493f461fe34SAnthony Steinhauser exception_return 494caa84939SJeenu Viswambharan 495ed108b56SAlexei Fedorov#if DEBUG 496caa84939SJeenu Viswambharanrt_svc_fw_critical_error: 497a6ef4393SDouglas Raillard /* Switch to SP_ELx */ 498ed108b56SAlexei Fedorov msr spsel, #MODE_SP_ELX 499a806dad5SJeenu Viswambharan no_ret report_unhandled_exception 500ed108b56SAlexei Fedorov#endif 501ccd81f1eSAndre Przywaraendfunc sync_exception_handler 5021f461979SJustin Chadwell 5031f461979SJustin Chadwell /* --------------------------------------------------------------------- 5043991b889SManish Pandey * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 5053991b889SManish Pandey * interrupts. 5063991b889SManish Pandey * 5073991b889SManish Pandey * Note that x30 has been explicitly saved and can be used here 5083991b889SManish Pandey * --------------------------------------------------------------------- 5093991b889SManish Pandey */ 5103991b889SManish Pandeyfunc handle_interrupt_exception 5113991b889SManish Pandey /* 5123991b889SManish Pandey * Save general purpose and ARMv8.3-PAuth registers (if enabled). 5133991b889SManish Pandey * Also save PMCR_EL0 and set the PSTATE to a known state. 5143991b889SManish Pandey */ 5153991b889SManish Pandey bl prepare_el3_entry 5163991b889SManish Pandey 5173991b889SManish Pandey#if ENABLE_PAUTH 5183991b889SManish Pandey /* Load and program APIAKey firmware key */ 5193991b889SManish Pandey bl pauth_load_bl31_apiakey 5203991b889SManish Pandey#endif 5213991b889SManish Pandey 5223991b889SManish Pandey /* Save the EL3 system registers needed to return from this exception */ 5233991b889SManish Pandey mrs x0, spsr_el3 5243991b889SManish Pandey mrs x1, elr_el3 5253991b889SManish Pandey stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 5263991b889SManish Pandey 5273991b889SManish Pandey /* Switch to the runtime stack i.e. SP_EL0 */ 5283991b889SManish Pandey ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 5293991b889SManish Pandey mov x20, sp 5303991b889SManish Pandey msr spsel, #MODE_SP_EL0 5313991b889SManish Pandey mov sp, x2 5323991b889SManish Pandey 5333991b889SManish Pandey /* 5343991b889SManish Pandey * Find out whether this is a valid interrupt type. 5353991b889SManish Pandey * If the interrupt controller reports a spurious interrupt then return 5363991b889SManish Pandey * to where we came from. 5373991b889SManish Pandey */ 5383991b889SManish Pandey bl plat_ic_get_pending_interrupt_type 5393991b889SManish Pandey cmp x0, #INTR_TYPE_INVAL 5403991b889SManish Pandey b.eq interrupt_exit 5413991b889SManish Pandey 5423991b889SManish Pandey /* 5433991b889SManish Pandey * Get the registered handler for this interrupt type. 5443991b889SManish Pandey * A NULL return value could be 'cause of the following conditions: 5453991b889SManish Pandey * 5463991b889SManish Pandey * a. An interrupt of a type was routed correctly but a handler for its 5473991b889SManish Pandey * type was not registered. 5483991b889SManish Pandey * 5493991b889SManish Pandey * b. An interrupt of a type was not routed correctly so a handler for 5503991b889SManish Pandey * its type was not registered. 5513991b889SManish Pandey * 5523991b889SManish Pandey * c. An interrupt of a type was routed correctly to EL3, but was 5533991b889SManish Pandey * deasserted before its pending state could be read. Another 5543991b889SManish Pandey * interrupt of a different type pended at the same time and its 5553991b889SManish Pandey * type was reported as pending instead. However, a handler for this 5563991b889SManish Pandey * type was not registered. 5573991b889SManish Pandey * 5583991b889SManish Pandey * a. and b. can only happen due to a programming error. The 5593991b889SManish Pandey * occurrence of c. could be beyond the control of Trusted Firmware. 5603991b889SManish Pandey * It makes sense to return from this exception instead of reporting an 5613991b889SManish Pandey * error. 5623991b889SManish Pandey */ 5633991b889SManish Pandey bl get_interrupt_type_handler 5643991b889SManish Pandey cbz x0, interrupt_exit 5653991b889SManish Pandey mov x21, x0 5663991b889SManish Pandey 5673991b889SManish Pandey mov x0, #INTR_ID_UNAVAILABLE 5683991b889SManish Pandey 5693991b889SManish Pandey /* Set the current security state in the 'flags' parameter */ 5703991b889SManish Pandey mrs x2, scr_el3 5713991b889SManish Pandey ubfx x1, x2, #0, #1 5723991b889SManish Pandey 5733991b889SManish Pandey /* Restore the reference to the 'handle' i.e. SP_EL3 */ 5743991b889SManish Pandey mov x2, x20 5753991b889SManish Pandey 5763991b889SManish Pandey /* x3 will point to a cookie (not used now) */ 5773991b889SManish Pandey mov x3, xzr 5783991b889SManish Pandey 5793991b889SManish Pandey /* Call the interrupt type handler */ 5803991b889SManish Pandey blr x21 5813991b889SManish Pandey 5823991b889SManish Pandeyinterrupt_exit: 5833991b889SManish Pandey /* Return from exception, possibly in a different security state */ 5843991b889SManish Pandey b el3_exit 5853991b889SManish Pandeyendfunc handle_interrupt_exception 5863991b889SManish Pandey 5873991b889SManish Pandey /* --------------------------------------------------------------------- 5881f461979SJustin Chadwell * The following code handles exceptions caused by BRK instructions. 5891f461979SJustin Chadwell * Following a BRK instruction, the only real valid cause of action is 5901f461979SJustin Chadwell * to print some information and panic, as the code that caused it is 5911f461979SJustin Chadwell * likely in an inconsistent internal state. 5921f461979SJustin Chadwell * 5931f461979SJustin Chadwell * This is initially intended to be used in conjunction with 5941f461979SJustin Chadwell * __builtin_trap. 5951f461979SJustin Chadwell * --------------------------------------------------------------------- 5961f461979SJustin Chadwell */ 5971f461979SJustin Chadwell#ifdef MONITOR_TRAPS 5981f461979SJustin Chadwellfunc brk_handler 5991f461979SJustin Chadwell /* Extract the ISS */ 6001f461979SJustin Chadwell mrs x10, esr_el3 6011f461979SJustin Chadwell ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 6021f461979SJustin Chadwell 6031f461979SJustin Chadwell /* Ensure the console is initialized */ 6041f461979SJustin Chadwell bl plat_crash_console_init 6051f461979SJustin Chadwell 6061f461979SJustin Chadwell adr x4, brk_location 6071f461979SJustin Chadwell bl asm_print_str 6081f461979SJustin Chadwell mrs x4, elr_el3 6091f461979SJustin Chadwell bl asm_print_hex 6101f461979SJustin Chadwell bl asm_print_newline 6111f461979SJustin Chadwell 6121f461979SJustin Chadwell adr x4, brk_message 6131f461979SJustin Chadwell bl asm_print_str 6141f461979SJustin Chadwell mov x4, x10 6151f461979SJustin Chadwell mov x5, #28 6161f461979SJustin Chadwell bl asm_print_hex_bits 6171f461979SJustin Chadwell bl asm_print_newline 6181f461979SJustin Chadwell 6191f461979SJustin Chadwell no_ret plat_panic_handler 6201f461979SJustin Chadwellendfunc brk_handler 6211f461979SJustin Chadwell#endif /* MONITOR_TRAPS */ 622