14f6ad66aSAchin Gupta/* 2201ca5b6SDimitris Papastamos * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 54f6ad66aSAchin Gupta */ 64f6ad66aSAchin Gupta 74f6ad66aSAchin Gupta#include <arch.h> 835e98e55SDan Handley#include <asm_macros.S> 997043ac9SDan Handley#include <context.h> 10872be88aSdp-arm#include <cpu_data.h> 11*76454abfSJeenu Viswambharan#include <ea_handle.h> 12dce74b89SAchin Gupta#include <interrupt_mgmt.h> 135f0cdb05SDan Handley#include <platform_def.h> 1497043ac9SDan Handley#include <runtime_svc.h> 152f370465SAntonio Nino Diaz#include <smccc.h> 164f6ad66aSAchin Gupta 174f6ad66aSAchin Gupta .globl runtime_exceptions 184f6ad66aSAchin Gupta 19f62ad322SDimitris Papastamos .globl sync_exception_sp_el0 20f62ad322SDimitris Papastamos .globl irq_sp_el0 21f62ad322SDimitris Papastamos .globl fiq_sp_el0 22f62ad322SDimitris Papastamos .globl serror_sp_el0 23f62ad322SDimitris Papastamos 24f62ad322SDimitris Papastamos .globl sync_exception_sp_elx 25f62ad322SDimitris Papastamos .globl irq_sp_elx 26f62ad322SDimitris Papastamos .globl fiq_sp_elx 27f62ad322SDimitris Papastamos .globl serror_sp_elx 28f62ad322SDimitris Papastamos 29f62ad322SDimitris Papastamos .globl sync_exception_aarch64 30f62ad322SDimitris Papastamos .globl irq_aarch64 31f62ad322SDimitris Papastamos .globl fiq_aarch64 32f62ad322SDimitris Papastamos .globl serror_aarch64 33f62ad322SDimitris Papastamos 34f62ad322SDimitris Papastamos .globl sync_exception_aarch32 35f62ad322SDimitris Papastamos .globl irq_aarch32 36f62ad322SDimitris Papastamos .globl fiq_aarch32 37f62ad322SDimitris Papastamos .globl serror_aarch32 38f62ad322SDimitris Papastamos 39*76454abfSJeenu Viswambharan /* 40*76454abfSJeenu Viswambharan * Handle External Abort by delegating to the platform's EA handler. 41*76454abfSJeenu Viswambharan * Once the platform handler returns, the macro exits EL3 and returns to 42*76454abfSJeenu Viswambharan * where the abort was taken from. 43*76454abfSJeenu Viswambharan * 44*76454abfSJeenu Viswambharan * This macro assumes that x30 is available for use. 45*76454abfSJeenu Viswambharan * 46*76454abfSJeenu Viswambharan * 'abort_type' is a constant passed to the platform handler, indicating 47*76454abfSJeenu Viswambharan * the cause of the External Abort. 48*76454abfSJeenu Viswambharan */ 49*76454abfSJeenu Viswambharan .macro handle_ea abort_type 50*76454abfSJeenu Viswambharan /* Save GP registers */ 51*76454abfSJeenu Viswambharan bl save_gp_registers 52*76454abfSJeenu Viswambharan 53*76454abfSJeenu Viswambharan /* Setup exception class and syndrome arguments for platform handler */ 54*76454abfSJeenu Viswambharan mov x0, \abort_type 55*76454abfSJeenu Viswambharan mrs x1, esr_el3 56*76454abfSJeenu Viswambharan adr x30, el3_exit 57*76454abfSJeenu Viswambharan b delegate_ea 58*76454abfSJeenu Viswambharan .endm 59*76454abfSJeenu Viswambharan 60a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 61a6ef4393SDouglas Raillard * This macro handles Synchronous exceptions. 62a6ef4393SDouglas Raillard * Only SMC exceptions are supported. 63a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 64dce74b89SAchin Gupta */ 65dce74b89SAchin Gupta .macro handle_sync_exception 660c8d4fefSAchin Gupta /* Enable the SError interrupt */ 670c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 680c8d4fefSAchin Gupta 69dce74b89SAchin Gupta str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 70872be88aSdp-arm 71872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION 72872be88aSdp-arm /* 73a6ef4393SDouglas Raillard * Read the timestamp value and store it in per-cpu data. The value 74a6ef4393SDouglas Raillard * will be extracted from per-cpu data by the C level SMC handler and 75a6ef4393SDouglas Raillard * saved to the PMF timestamp region. 76872be88aSdp-arm */ 77872be88aSdp-arm mrs x30, cntpct_el0 78872be88aSdp-arm str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 79872be88aSdp-arm mrs x29, tpidr_el3 80872be88aSdp-arm str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 81872be88aSdp-arm ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 82872be88aSdp-arm#endif 83872be88aSdp-arm 84dce74b89SAchin Gupta mrs x30, esr_el3 85dce74b89SAchin Gupta ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 86dce74b89SAchin Gupta 87a6ef4393SDouglas Raillard /* Handle SMC exceptions separately from other synchronous exceptions */ 88dce74b89SAchin Gupta cmp x30, #EC_AARCH32_SMC 89dce74b89SAchin Gupta b.eq smc_handler32 90dce74b89SAchin Gupta 91dce74b89SAchin Gupta cmp x30, #EC_AARCH64_SMC 92dce74b89SAchin Gupta b.eq smc_handler64 93dce74b89SAchin Gupta 94*76454abfSJeenu Viswambharan /* Check for I/D aborts from lower EL */ 95*76454abfSJeenu Viswambharan cmp x30, #EC_IABORT_LOWER_EL 96*76454abfSJeenu Viswambharan b.eq 1f 97*76454abfSJeenu Viswambharan 98*76454abfSJeenu Viswambharan cmp x30, #EC_DABORT_LOWER_EL 99*76454abfSJeenu Viswambharan b.ne 2f 100*76454abfSJeenu Viswambharan 101*76454abfSJeenu Viswambharan1: 102*76454abfSJeenu Viswambharan /* Test for EA bit in the instruction syndrome */ 103*76454abfSJeenu Viswambharan mrs x30, esr_el3 104*76454abfSJeenu Viswambharan tbz x30, #ESR_ISS_EABORT_EA_BIT, 2f 105*76454abfSJeenu Viswambharan handle_ea #ERROR_EA_SYNC 106*76454abfSJeenu Viswambharan 107*76454abfSJeenu Viswambharan2: 108a6ef4393SDouglas Raillard /* Other kinds of synchronous exceptions are not handled */ 1094d91838bSJulius Werner ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 1104d91838bSJulius Werner b report_unhandled_exception 111dce74b89SAchin Gupta .endm 112dce74b89SAchin Gupta 113dce74b89SAchin Gupta 114a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 115a6ef4393SDouglas Raillard * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 116a6ef4393SDouglas Raillard * interrupts. 117a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 118dce74b89SAchin Gupta */ 119dce74b89SAchin Gupta .macro handle_interrupt_exception label 1200c8d4fefSAchin Gupta /* Enable the SError interrupt */ 1210c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 1220c8d4fefSAchin Gupta 123dce74b89SAchin Gupta str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 124dce74b89SAchin Gupta bl save_gp_registers 125dce74b89SAchin Gupta 126a6ef4393SDouglas Raillard /* Save the EL3 system registers needed to return from this exception */ 1275717aae1SAchin Gupta mrs x0, spsr_el3 1285717aae1SAchin Gupta mrs x1, elr_el3 1295717aae1SAchin Gupta stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 1305717aae1SAchin Gupta 131dce74b89SAchin Gupta /* Switch to the runtime stack i.e. SP_EL0 */ 132dce74b89SAchin Gupta ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 133dce74b89SAchin Gupta mov x20, sp 134dce74b89SAchin Gupta msr spsel, #0 135dce74b89SAchin Gupta mov sp, x2 136dce74b89SAchin Gupta 137dce74b89SAchin Gupta /* 138a6ef4393SDouglas Raillard * Find out whether this is a valid interrupt type. 139a6ef4393SDouglas Raillard * If the interrupt controller reports a spurious interrupt then return 140a6ef4393SDouglas Raillard * to where we came from. 141dce74b89SAchin Gupta */ 1429865ac15SDan Handley bl plat_ic_get_pending_interrupt_type 143dce74b89SAchin Gupta cmp x0, #INTR_TYPE_INVAL 144dce74b89SAchin Gupta b.eq interrupt_exit_\label 145dce74b89SAchin Gupta 146dce74b89SAchin Gupta /* 147a6ef4393SDouglas Raillard * Get the registered handler for this interrupt type. 148a6ef4393SDouglas Raillard * A NULL return value could be 'cause of the following conditions: 1495717aae1SAchin Gupta * 150a6ef4393SDouglas Raillard * a. An interrupt of a type was routed correctly but a handler for its 151a6ef4393SDouglas Raillard * type was not registered. 1525717aae1SAchin Gupta * 153a6ef4393SDouglas Raillard * b. An interrupt of a type was not routed correctly so a handler for 154a6ef4393SDouglas Raillard * its type was not registered. 1555717aae1SAchin Gupta * 156a6ef4393SDouglas Raillard * c. An interrupt of a type was routed correctly to EL3, but was 157a6ef4393SDouglas Raillard * deasserted before its pending state could be read. Another 158a6ef4393SDouglas Raillard * interrupt of a different type pended at the same time and its 159a6ef4393SDouglas Raillard * type was reported as pending instead. However, a handler for this 160a6ef4393SDouglas Raillard * type was not registered. 1615717aae1SAchin Gupta * 162a6ef4393SDouglas Raillard * a. and b. can only happen due to a programming error. The 163a6ef4393SDouglas Raillard * occurrence of c. could be beyond the control of Trusted Firmware. 164a6ef4393SDouglas Raillard * It makes sense to return from this exception instead of reporting an 165a6ef4393SDouglas Raillard * error. 166dce74b89SAchin Gupta */ 167dce74b89SAchin Gupta bl get_interrupt_type_handler 1685717aae1SAchin Gupta cbz x0, interrupt_exit_\label 169dce74b89SAchin Gupta mov x21, x0 170dce74b89SAchin Gupta 171dce74b89SAchin Gupta mov x0, #INTR_ID_UNAVAILABLE 172dce74b89SAchin Gupta 173dce74b89SAchin Gupta /* Set the current security state in the 'flags' parameter */ 174dce74b89SAchin Gupta mrs x2, scr_el3 175dce74b89SAchin Gupta ubfx x1, x2, #0, #1 176dce74b89SAchin Gupta 177dce74b89SAchin Gupta /* Restore the reference to the 'handle' i.e. SP_EL3 */ 178dce74b89SAchin Gupta mov x2, x20 179dce74b89SAchin Gupta 180b460b8bfSSoby Mathew /* x3 will point to a cookie (not used now) */ 181b460b8bfSSoby Mathew mov x3, xzr 182b460b8bfSSoby Mathew 183dce74b89SAchin Gupta /* Call the interrupt type handler */ 184dce74b89SAchin Gupta blr x21 185dce74b89SAchin Gupta 186dce74b89SAchin Guptainterrupt_exit_\label: 187dce74b89SAchin Gupta /* Return from exception, possibly in a different security state */ 188dce74b89SAchin Gupta b el3_exit 189dce74b89SAchin Gupta 190dce74b89SAchin Gupta .endm 191dce74b89SAchin Gupta 192dce74b89SAchin Gupta 193e0ae9fabSSandrine Bailleuxvector_base runtime_exceptions 194e0ae9fabSSandrine Bailleux 195a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 196a6ef4393SDouglas Raillard * Current EL with SP_EL0 : 0x0 - 0x200 197a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 1984f6ad66aSAchin Gupta */ 199e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_el0 200a6ef4393SDouglas Raillard /* We don't expect any synchronous exceptions from EL3 */ 2014d91838bSJulius Werner b report_unhandled_exception 202a7934d69SJeenu Viswambharan check_vector_size sync_exception_sp_el0 2034f6ad66aSAchin Gupta 204e0ae9fabSSandrine Bailleuxvector_entry irq_sp_el0 205a6ef4393SDouglas Raillard /* 206a6ef4393SDouglas Raillard * EL3 code is non-reentrant. Any asynchronous exception is a serious 207a6ef4393SDouglas Raillard * error. Loop infinitely. 208a6ef4393SDouglas Raillard */ 2094d91838bSJulius Werner b report_unhandled_interrupt 210a7934d69SJeenu Viswambharan check_vector_size irq_sp_el0 2114f6ad66aSAchin Gupta 212e0ae9fabSSandrine Bailleux 213e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_el0 2144d91838bSJulius Werner b report_unhandled_interrupt 215a7934d69SJeenu Viswambharan check_vector_size fiq_sp_el0 2164f6ad66aSAchin Gupta 217e0ae9fabSSandrine Bailleux 218e0ae9fabSSandrine Bailleuxvector_entry serror_sp_el0 2194d91838bSJulius Werner b report_unhandled_exception 220a7934d69SJeenu Viswambharan check_vector_size serror_sp_el0 2214f6ad66aSAchin Gupta 222a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 223a6ef4393SDouglas Raillard * Current EL with SP_ELx: 0x200 - 0x400 224a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2254f6ad66aSAchin Gupta */ 226e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_elx 227a6ef4393SDouglas Raillard /* 228a6ef4393SDouglas Raillard * This exception will trigger if anything went wrong during a previous 229a6ef4393SDouglas Raillard * exception entry or exit or while handling an earlier unexpected 230a6ef4393SDouglas Raillard * synchronous exception. There is a high probability that SP_EL3 is 231a6ef4393SDouglas Raillard * corrupted. 232caa84939SJeenu Viswambharan */ 2334d91838bSJulius Werner b report_unhandled_exception 234a7934d69SJeenu Viswambharan check_vector_size sync_exception_sp_elx 2354f6ad66aSAchin Gupta 236e0ae9fabSSandrine Bailleuxvector_entry irq_sp_elx 2374d91838bSJulius Werner b report_unhandled_interrupt 238a7934d69SJeenu Viswambharan check_vector_size irq_sp_elx 239a7934d69SJeenu Viswambharan 240e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_elx 2414d91838bSJulius Werner b report_unhandled_interrupt 242a7934d69SJeenu Viswambharan check_vector_size fiq_sp_elx 243a7934d69SJeenu Viswambharan 244e0ae9fabSSandrine Bailleuxvector_entry serror_sp_elx 2454d91838bSJulius Werner b report_unhandled_exception 246a7934d69SJeenu Viswambharan check_vector_size serror_sp_elx 2474f6ad66aSAchin Gupta 248a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 24944804252SSandrine Bailleux * Lower EL using AArch64 : 0x400 - 0x600 250a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2514f6ad66aSAchin Gupta */ 252e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch64 253a6ef4393SDouglas Raillard /* 254a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 255a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 256a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 257a6ef4393SDouglas Raillard * state can be saved. 258caa84939SJeenu Viswambharan */ 259caa84939SJeenu Viswambharan handle_sync_exception 260a7934d69SJeenu Viswambharan check_vector_size sync_exception_aarch64 2614f6ad66aSAchin Gupta 262e0ae9fabSSandrine Bailleuxvector_entry irq_aarch64 263dce74b89SAchin Gupta handle_interrupt_exception irq_aarch64 264a7934d69SJeenu Viswambharan check_vector_size irq_aarch64 2654f6ad66aSAchin Gupta 266e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch64 267dce74b89SAchin Gupta handle_interrupt_exception fiq_aarch64 268a7934d69SJeenu Viswambharan check_vector_size fiq_aarch64 2694f6ad66aSAchin Gupta 270e0ae9fabSSandrine Bailleuxvector_entry serror_aarch64 271*76454abfSJeenu Viswambharan msr daifclr, #DAIF_ABT_BIT 272*76454abfSJeenu Viswambharan 273a6ef4393SDouglas Raillard /* 274*76454abfSJeenu Viswambharan * Explicitly save x30 so as to free up a register and to enable 275*76454abfSJeenu Viswambharan * branching 276a6ef4393SDouglas Raillard */ 277*76454abfSJeenu Viswambharan str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 278*76454abfSJeenu Viswambharan handle_ea #ERROR_EA_ASYNC 279a7934d69SJeenu Viswambharan check_vector_size serror_aarch64 2804f6ad66aSAchin Gupta 281a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 28244804252SSandrine Bailleux * Lower EL using AArch32 : 0x600 - 0x800 283a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 2844f6ad66aSAchin Gupta */ 285e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch32 286a6ef4393SDouglas Raillard /* 287a6ef4393SDouglas Raillard * This exception vector will be the entry point for SMCs and traps 288a6ef4393SDouglas Raillard * that are unhandled at lower ELs most commonly. SP_EL3 should point 289a6ef4393SDouglas Raillard * to a valid cpu context where the general purpose and system register 290a6ef4393SDouglas Raillard * state can be saved. 291caa84939SJeenu Viswambharan */ 292caa84939SJeenu Viswambharan handle_sync_exception 293a7934d69SJeenu Viswambharan check_vector_size sync_exception_aarch32 2944f6ad66aSAchin Gupta 295e0ae9fabSSandrine Bailleuxvector_entry irq_aarch32 296dce74b89SAchin Gupta handle_interrupt_exception irq_aarch32 297a7934d69SJeenu Viswambharan check_vector_size irq_aarch32 2984f6ad66aSAchin Gupta 299e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch32 300dce74b89SAchin Gupta handle_interrupt_exception fiq_aarch32 301a7934d69SJeenu Viswambharan check_vector_size fiq_aarch32 3024f6ad66aSAchin Gupta 303e0ae9fabSSandrine Bailleuxvector_entry serror_aarch32 304*76454abfSJeenu Viswambharan msr daifclr, #DAIF_ABT_BIT 305*76454abfSJeenu Viswambharan 306a6ef4393SDouglas Raillard /* 307*76454abfSJeenu Viswambharan * Explicitly save x30 so as to free up a register and to enable 308*76454abfSJeenu Viswambharan * branching 309a6ef4393SDouglas Raillard */ 310*76454abfSJeenu Viswambharan str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 311*76454abfSJeenu Viswambharan handle_ea #ERROR_EA_ASYNC 312a7934d69SJeenu Viswambharan check_vector_size serror_aarch32 313a7934d69SJeenu Viswambharan 314caa84939SJeenu Viswambharan 315a6ef4393SDouglas Raillard /* --------------------------------------------------------------------- 3162f370465SAntonio Nino Diaz * This macro takes an argument in x16 that is the index in the 3172f370465SAntonio Nino Diaz * 'rt_svc_descs_indices' array, checks that the value in the array is 3182f370465SAntonio Nino Diaz * valid, and loads in x15 the pointer to the handler of that service. 3192f370465SAntonio Nino Diaz * --------------------------------------------------------------------- 3202f370465SAntonio Nino Diaz */ 3212f370465SAntonio Nino Diaz .macro load_rt_svc_desc_pointer 3222f370465SAntonio Nino Diaz /* Load descriptor index from array of indices */ 3232f370465SAntonio Nino Diaz adr x14, rt_svc_descs_indices 3242f370465SAntonio Nino Diaz ldrb w15, [x14, x16] 3252f370465SAntonio Nino Diaz 3262f370465SAntonio Nino Diaz#if SMCCC_MAJOR_VERSION == 1 3272f370465SAntonio Nino Diaz /* Any index greater than 127 is invalid. Check bit 7. */ 3282f370465SAntonio Nino Diaz tbnz w15, 7, smc_unknown 3292f370465SAntonio Nino Diaz#elif SMCCC_MAJOR_VERSION == 2 3302f370465SAntonio Nino Diaz /* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */ 3312f370465SAntonio Nino Diaz cmp w15, #31 3322f370465SAntonio Nino Diaz b.hi smc_unknown 3332f370465SAntonio Nino Diaz#endif /* SMCCC_MAJOR_VERSION */ 3342f370465SAntonio Nino Diaz 3352f370465SAntonio Nino Diaz /* 3362f370465SAntonio Nino Diaz * Get the descriptor using the index 3372f370465SAntonio Nino Diaz * x11 = (base + off), w15 = index 3382f370465SAntonio Nino Diaz * 3392f370465SAntonio Nino Diaz * handler = (base + off) + (index << log2(size)) 3402f370465SAntonio Nino Diaz */ 3412f370465SAntonio Nino Diaz adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 3422f370465SAntonio Nino Diaz lsl w10, w15, #RT_SVC_SIZE_LOG2 3432f370465SAntonio Nino Diaz ldr x15, [x11, w10, uxtw] 3442f370465SAntonio Nino Diaz .endm 3452f370465SAntonio Nino Diaz 3462f370465SAntonio Nino Diaz /* --------------------------------------------------------------------- 347caa84939SJeenu Viswambharan * The following code handles secure monitor calls. 348a6ef4393SDouglas Raillard * Depending upon the execution state from where the SMC has been 349a6ef4393SDouglas Raillard * invoked, it frees some general purpose registers to perform the 350a6ef4393SDouglas Raillard * remaining tasks. They involve finding the runtime service handler 351a6ef4393SDouglas Raillard * that is the target of the SMC & switching to runtime stacks (SP_EL0) 352a6ef4393SDouglas Raillard * before calling the handler. 353caa84939SJeenu Viswambharan * 354a6ef4393SDouglas Raillard * Note that x30 has been explicitly saved and can be used here 355a6ef4393SDouglas Raillard * --------------------------------------------------------------------- 356caa84939SJeenu Viswambharan */ 3570a30cf54SAndrew Thoelkefunc smc_handler 358caa84939SJeenu Viswambharansmc_handler32: 359caa84939SJeenu Viswambharan /* Check whether aarch32 issued an SMC64 */ 360caa84939SJeenu Viswambharan tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 361caa84939SJeenu Viswambharan 362caa84939SJeenu Viswambharansmc_handler64: 363a6ef4393SDouglas Raillard /* 364a6ef4393SDouglas Raillard * Populate the parameters for the SMC handler. 365a6ef4393SDouglas Raillard * We already have x0-x4 in place. x5 will point to a cookie (not used 366a6ef4393SDouglas Raillard * now). x6 will point to the context structure (SP_EL3) and x7 will 367201ca5b6SDimitris Papastamos * contain flags we need to pass to the handler. 368a6ef4393SDouglas Raillard * 3692f370465SAntonio Nino Diaz * Save x4-x29 and sp_el0. 370caa84939SJeenu Viswambharan */ 371ef653d93SJeenu Viswambharan stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 372ef653d93SJeenu Viswambharan stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 373ef653d93SJeenu Viswambharan stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 374ef653d93SJeenu Viswambharan stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 375ef653d93SJeenu Viswambharan stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 376ef653d93SJeenu Viswambharan stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 377ef653d93SJeenu Viswambharan stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 378ef653d93SJeenu Viswambharan stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 379ef653d93SJeenu Viswambharan stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 380ef653d93SJeenu Viswambharan stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 381ef653d93SJeenu Viswambharan stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 382ef653d93SJeenu Viswambharan stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 383ef653d93SJeenu Viswambharan stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 384ef653d93SJeenu Viswambharan mrs x18, sp_el0 385ef653d93SJeenu Viswambharan str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 386c3260f9bSSoby Mathew 387caa84939SJeenu Viswambharan mov x5, xzr 388caa84939SJeenu Viswambharan mov x6, sp 389caa84939SJeenu Viswambharan 3902f370465SAntonio Nino Diaz#if SMCCC_MAJOR_VERSION == 1 3912f370465SAntonio Nino Diaz 392caa84939SJeenu Viswambharan /* Get the unique owning entity number */ 393caa84939SJeenu Viswambharan ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 394caa84939SJeenu Viswambharan ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 395caa84939SJeenu Viswambharan orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 396caa84939SJeenu Viswambharan 3972f370465SAntonio Nino Diaz load_rt_svc_desc_pointer 398caa84939SJeenu Viswambharan 3992f370465SAntonio Nino Diaz#elif SMCCC_MAJOR_VERSION == 2 4002f370465SAntonio Nino Diaz 4012f370465SAntonio Nino Diaz /* Bit 31 must be set */ 4022f370465SAntonio Nino Diaz tbz x0, #FUNCID_TYPE_SHIFT, smc_unknown 4032f370465SAntonio Nino Diaz 4042f370465SAntonio Nino Diaz /* 4052f370465SAntonio Nino Diaz * Check MSB of namespace to decide between compatibility/vendor and 4062f370465SAntonio Nino Diaz * SPCI/SPRT 4072f370465SAntonio Nino Diaz */ 4082f370465SAntonio Nino Diaz tbz x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor 4092f370465SAntonio Nino Diaz 4102f370465SAntonio Nino Diaz /* Namespaces SPRT and SPCI currently unimplemented */ 4112f370465SAntonio Nino Diaz b smc_unknown 4122f370465SAntonio Nino Diaz 4132f370465SAntonio Nino Diazcompat_or_vendor: 4142f370465SAntonio Nino Diaz 4152f370465SAntonio Nino Diaz /* Namespace is b'00 (compatibility) or b'01 (vendor) */ 4162f370465SAntonio Nino Diaz 4172f370465SAntonio Nino Diaz /* 4182f370465SAntonio Nino Diaz * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create 4192f370465SAntonio Nino Diaz * a 5-bit index into the rt_svc_descs_indices array. 4202f370465SAntonio Nino Diaz * 4212f370465SAntonio Nino Diaz * The low 16 entries of the rt_svc_descs_indices array correspond to 4222f370465SAntonio Nino Diaz * OENs of the compatibility namespace and the top 16 entries of the 4232f370465SAntonio Nino Diaz * array are assigned to the vendor namespace descriptor. 4242f370465SAntonio Nino Diaz */ 4252f370465SAntonio Nino Diaz ubfx x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1) 4262f370465SAntonio Nino Diaz 4272f370465SAntonio Nino Diaz load_rt_svc_desc_pointer 4282f370465SAntonio Nino Diaz 4292f370465SAntonio Nino Diaz#endif /* SMCCC_MAJOR_VERSION */ 430caa84939SJeenu Viswambharan 431a6ef4393SDouglas Raillard /* 432a6ef4393SDouglas Raillard * Restore the saved C runtime stack value which will become the new 433a6ef4393SDouglas Raillard * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 434a6ef4393SDouglas Raillard * structure prior to the last ERET from EL3. 435caa84939SJeenu Viswambharan */ 436caa84939SJeenu Viswambharan ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 437caa84939SJeenu Viswambharan 438caa84939SJeenu Viswambharan /* Switch to SP_EL0 */ 439caa84939SJeenu Viswambharan msr spsel, #0 440caa84939SJeenu Viswambharan 441a6ef4393SDouglas Raillard /* 442a6ef4393SDouglas Raillard * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world 443a6ef4393SDouglas Raillard * switch during SMC handling. 444a6ef4393SDouglas Raillard * TODO: Revisit if all system registers can be saved later. 445caa84939SJeenu Viswambharan */ 446caa84939SJeenu Viswambharan mrs x16, spsr_el3 447caa84939SJeenu Viswambharan mrs x17, elr_el3 448caa84939SJeenu Viswambharan mrs x18, scr_el3 449caa84939SJeenu Viswambharan stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 450b51da821SAchin Gupta str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 451caa84939SJeenu Viswambharan 452caa84939SJeenu Viswambharan /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 453caa84939SJeenu Viswambharan bfi x7, x18, #0, #1 454caa84939SJeenu Viswambharan 455caa84939SJeenu Viswambharan mov sp, x12 456caa84939SJeenu Viswambharan 457a6ef4393SDouglas Raillard /* 458a6ef4393SDouglas Raillard * Call the Secure Monitor Call handler and then drop directly into 459a6ef4393SDouglas Raillard * el3_exit() which will program any remaining architectural state 460a6ef4393SDouglas Raillard * prior to issuing the ERET to the desired lower EL. 461caa84939SJeenu Viswambharan */ 462caa84939SJeenu Viswambharan#if DEBUG 463caa84939SJeenu Viswambharan cbz x15, rt_svc_fw_critical_error 464caa84939SJeenu Viswambharan#endif 465caa84939SJeenu Viswambharan blr x15 466caa84939SJeenu Viswambharan 467bbf8f6f9SYatharth Kochar b el3_exit 4684f6ad66aSAchin Gupta 469caa84939SJeenu Viswambharansmc_unknown: 470caa84939SJeenu Viswambharan /* 471ef653d93SJeenu Viswambharan * Unknown SMC call. Populate return value with SMC_UNK, restore 472ef653d93SJeenu Viswambharan * GP registers, and return to caller. 473caa84939SJeenu Viswambharan */ 4744abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 475ef653d93SJeenu Viswambharan str x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 476ef653d93SJeenu Viswambharan b restore_gp_registers_eret 477caa84939SJeenu Viswambharan 478caa84939SJeenu Viswambharansmc_prohibited: 479c3260f9bSSoby Mathew ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 4804abd7fa7SAntonio Nino Diaz mov x0, #SMC_UNK 481caa84939SJeenu Viswambharan eret 482caa84939SJeenu Viswambharan 483caa84939SJeenu Viswambharanrt_svc_fw_critical_error: 484a6ef4393SDouglas Raillard /* Switch to SP_ELx */ 485a6ef4393SDouglas Raillard msr spsel, #1 486a806dad5SJeenu Viswambharan no_ret report_unhandled_exception 4878b779620SKévin Petitendfunc smc_handler 488*76454abfSJeenu Viswambharan 489*76454abfSJeenu Viswambharan/* 490*76454abfSJeenu Viswambharan * Delegate External Abort handling to platform's EA handler. This function 491*76454abfSJeenu Viswambharan * assumes that all GP registers have been saved by the caller. 492*76454abfSJeenu Viswambharan * 493*76454abfSJeenu Viswambharan * x0: EA reason 494*76454abfSJeenu Viswambharan * x1: EA syndrome 495*76454abfSJeenu Viswambharan */ 496*76454abfSJeenu Viswambharanfunc delegate_ea 497*76454abfSJeenu Viswambharan /* Save EL3 state */ 498*76454abfSJeenu Viswambharan mrs x2, spsr_el3 499*76454abfSJeenu Viswambharan mrs x3, elr_el3 500*76454abfSJeenu Viswambharan stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 501*76454abfSJeenu Viswambharan 502*76454abfSJeenu Viswambharan /* 503*76454abfSJeenu Viswambharan * Save ESR as handling might involve lower ELs, and returning back to 504*76454abfSJeenu Viswambharan * EL3 from there would trample the original ESR. 505*76454abfSJeenu Viswambharan */ 506*76454abfSJeenu Viswambharan mrs x4, scr_el3 507*76454abfSJeenu Viswambharan mrs x5, esr_el3 508*76454abfSJeenu Viswambharan stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 509*76454abfSJeenu Viswambharan 510*76454abfSJeenu Viswambharan /* 511*76454abfSJeenu Viswambharan * Setup rest of arguments, and call platform External Abort handler. 512*76454abfSJeenu Viswambharan * 513*76454abfSJeenu Viswambharan * x0: EA reason (already in place) 514*76454abfSJeenu Viswambharan * x1: Exception syndrome (already in place). 515*76454abfSJeenu Viswambharan * x2: Cookie (unused for now). 516*76454abfSJeenu Viswambharan * x3: Context pointer. 517*76454abfSJeenu Viswambharan * x4: Flags (security state from SCR for now). 518*76454abfSJeenu Viswambharan */ 519*76454abfSJeenu Viswambharan mov x2, xzr 520*76454abfSJeenu Viswambharan mov x3, sp 521*76454abfSJeenu Viswambharan ubfx x4, x4, #0, #1 522*76454abfSJeenu Viswambharan 523*76454abfSJeenu Viswambharan /* Switch to runtime stack */ 524*76454abfSJeenu Viswambharan ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 525*76454abfSJeenu Viswambharan msr spsel, #0 526*76454abfSJeenu Viswambharan mov sp, x5 527*76454abfSJeenu Viswambharan 528*76454abfSJeenu Viswambharan mov x29, x30 529*76454abfSJeenu Viswambharan bl plat_ea_handler 530*76454abfSJeenu Viswambharan mov x30, x29 531*76454abfSJeenu Viswambharan 532*76454abfSJeenu Viswambharan /* Make SP point to context */ 533*76454abfSJeenu Viswambharan msr spsel, #1 534*76454abfSJeenu Viswambharan 535*76454abfSJeenu Viswambharan /* Restore EL3 state */ 536*76454abfSJeenu Viswambharan ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 537*76454abfSJeenu Viswambharan msr spsr_el3, x1 538*76454abfSJeenu Viswambharan msr elr_el3, x2 539*76454abfSJeenu Viswambharan 540*76454abfSJeenu Viswambharan /* Restore ESR_EL3 and SCR_EL3 */ 541*76454abfSJeenu Viswambharan ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 542*76454abfSJeenu Viswambharan msr scr_el3, x3 543*76454abfSJeenu Viswambharan msr esr_el3, x4 544*76454abfSJeenu Viswambharan 545*76454abfSJeenu Viswambharan ret 546*76454abfSJeenu Viswambharanendfunc delegate_ea 547