1ccd81f1eSAndre Przywara /* 2*3c789bfcSManish Pandey * Copyright (c) 2022-2024, Arm Limited. All rights reserved. 30ed3be6fSVarun Wadekar * Copyright (c) 2023, NVIDIA Corporation. All rights reserved. 4ccd81f1eSAndre Przywara * 5ccd81f1eSAndre Przywara * SPDX-License-Identifier: BSD-3-Clause 6ccd81f1eSAndre Przywara * 7ccd81f1eSAndre Przywara * Dispatch synchronous system register traps from lower ELs. 8ccd81f1eSAndre Przywara */ 9ccd81f1eSAndre Przywara 10*3c789bfcSManish Pandey #include <arch_features.h> 11*3c789bfcSManish Pandey #include <arch_helpers.h> 12ccd81f1eSAndre Przywara #include <bl31/sync_handle.h> 13ccd81f1eSAndre Przywara #include <context.h> 14*3c789bfcSManish Pandey #include <lib/el3_runtime/context_mgmt.h> 15ccd81f1eSAndre Przywara 16ccd81f1eSAndre Przywara int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx) 17ccd81f1eSAndre Przywara { 180ed3be6fSVarun Wadekar uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK; 190ed3be6fSVarun Wadekar 201ae75529SAndre Przywara #if ENABLE_FEAT_RNG_TRAP 210ed3be6fSVarun Wadekar if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) { 221ae75529SAndre Przywara return plat_handle_rng_trap(esr_el3, ctx); 23ccd81f1eSAndre Przywara } 240ed3be6fSVarun Wadekar #endif 250ed3be6fSVarun Wadekar 260ed3be6fSVarun Wadekar #if IMPDEF_SYSREG_TRAP 270ed3be6fSVarun Wadekar if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) { 280ed3be6fSVarun Wadekar return plat_handle_impdef_trap(esr_el3, ctx); 290ed3be6fSVarun Wadekar } 300ed3be6fSVarun Wadekar #endif 310ed3be6fSVarun Wadekar 320ed3be6fSVarun Wadekar return TRAP_RET_UNHANDLED; 33ccd81f1eSAndre Przywara } 34*3c789bfcSManish Pandey 35*3c789bfcSManish Pandey static bool is_tge_enabled(void) 36*3c789bfcSManish Pandey { 37*3c789bfcSManish Pandey u_register_t hcr_el2 = read_hcr_el2(); 38*3c789bfcSManish Pandey 39*3c789bfcSManish Pandey return ((read_feat_vhe_id_field() != 0U) && ((hcr_el2 & HCR_TGE_BIT) != 0U)); 40*3c789bfcSManish Pandey } 41*3c789bfcSManish Pandey 42*3c789bfcSManish Pandey /* 43*3c789bfcSManish Pandey * This function is to ensure that undef injection does not happen into 44*3c789bfcSManish Pandey * non-existent S-EL2. This could happen when trap happens from S-EL{1,0} 45*3c789bfcSManish Pandey * and non-secure world is running with TGE bit set, considering EL3 does 46*3c789bfcSManish Pandey * not save/restore EL2 registers if only one world has EL2 enabled. 47*3c789bfcSManish Pandey * So reading hcr_el2.TGE would give NS world value. 48*3c789bfcSManish Pandey */ 49*3c789bfcSManish Pandey static bool is_secure_trap_without_sel2(u_register_t scr) 50*3c789bfcSManish Pandey { 51*3c789bfcSManish Pandey return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0); 52*3c789bfcSManish Pandey } 53*3c789bfcSManish Pandey 54*3c789bfcSManish Pandey static unsigned int target_el(unsigned int from_el, u_register_t scr) 55*3c789bfcSManish Pandey { 56*3c789bfcSManish Pandey if (from_el > MODE_EL1) { 57*3c789bfcSManish Pandey return from_el; 58*3c789bfcSManish Pandey } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) { 59*3c789bfcSManish Pandey return MODE_EL2; 60*3c789bfcSManish Pandey } else { 61*3c789bfcSManish Pandey return MODE_EL1; 62*3c789bfcSManish Pandey } 63*3c789bfcSManish Pandey } 64*3c789bfcSManish Pandey 65*3c789bfcSManish Pandey static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el) 66*3c789bfcSManish Pandey { 67*3c789bfcSManish Pandey unsigned int outgoing_el = GET_EL(spsr_el3); 68*3c789bfcSManish Pandey u_register_t elr_el3 = 0; 69*3c789bfcSManish Pandey 70*3c789bfcSManish Pandey if (outgoing_el == target_el) { 71*3c789bfcSManish Pandey /* 72*3c789bfcSManish Pandey * Target EL is either EL1 or EL2, lsb can tell us the SPsel 73*3c789bfcSManish Pandey * Thread mode : 0 74*3c789bfcSManish Pandey * Handler mode : 1 75*3c789bfcSManish Pandey */ 76*3c789bfcSManish Pandey if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) { 77*3c789bfcSManish Pandey elr_el3 = vbar + CURRENT_EL_SPX; 78*3c789bfcSManish Pandey } else { 79*3c789bfcSManish Pandey elr_el3 = vbar + CURRENT_EL_SP0; 80*3c789bfcSManish Pandey } 81*3c789bfcSManish Pandey } else { 82*3c789bfcSManish Pandey /* Vector address for Lower EL using Aarch64 */ 83*3c789bfcSManish Pandey elr_el3 = vbar + LOWER_EL_AARCH64; 84*3c789bfcSManish Pandey } 85*3c789bfcSManish Pandey 86*3c789bfcSManish Pandey return elr_el3; 87*3c789bfcSManish Pandey } 88*3c789bfcSManish Pandey 89*3c789bfcSManish Pandey /* 90*3c789bfcSManish Pandey * Explicitly create all bits of SPSR to get PSTATE at exception return. 91*3c789bfcSManish Pandey * 92*3c789bfcSManish Pandey * The code is based on "Aarch64.exceptions.takeexception" described in 93*3c789bfcSManish Pandey * DDI0602 revision 2023-06. 94*3c789bfcSManish Pandey * "https://developer.arm.com/documentation/ddi0602/2023-06/Shared-Pseudocode/ 95*3c789bfcSManish Pandey * aarch64-exceptions-takeexception" 96*3c789bfcSManish Pandey * 97*3c789bfcSManish Pandey * NOTE: This piece of code must be reviewed every release to ensure that 98*3c789bfcSManish Pandey * we keep up with new ARCH features which introduces a new SPSR bit. 99*3c789bfcSManish Pandey */ 100*3c789bfcSManish Pandey static u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el) 101*3c789bfcSManish Pandey { 102*3c789bfcSManish Pandey u_register_t new_spsr = 0; 103*3c789bfcSManish Pandey u_register_t sctlr; 104*3c789bfcSManish Pandey 105*3c789bfcSManish Pandey /* Set M bits for target EL in AArch64 mode, also get sctlr */ 106*3c789bfcSManish Pandey if (target_el == MODE_EL2) { 107*3c789bfcSManish Pandey sctlr = read_sctlr_el2(); 108*3c789bfcSManish Pandey new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H; 109*3c789bfcSManish Pandey } else { 110*3c789bfcSManish Pandey sctlr = read_sctlr_el1(); 111*3c789bfcSManish Pandey new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H; 112*3c789bfcSManish Pandey } 113*3c789bfcSManish Pandey 114*3c789bfcSManish Pandey /* Mask all exceptions, update DAIF bits */ 115*3c789bfcSManish Pandey new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT; 116*3c789bfcSManish Pandey 117*3c789bfcSManish Pandey /* If FEAT_BTI is present, clear BTYPE bits */ 118*3c789bfcSManish Pandey new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64); 119*3c789bfcSManish Pandey if (is_armv8_5_bti_present()) { 120*3c789bfcSManish Pandey new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64); 121*3c789bfcSManish Pandey } 122*3c789bfcSManish Pandey 123*3c789bfcSManish Pandey /* If SSBS is implemented, take the value from SCTLR.DSSBS */ 124*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64; 125*3c789bfcSManish Pandey if (is_feat_ssbs_present()) { 126*3c789bfcSManish Pandey if ((sctlr & SCTLR_DSSBS_BIT) != 0U) { 127*3c789bfcSManish Pandey new_spsr |= SPSR_SSBS_BIT_AARCH64; 128*3c789bfcSManish Pandey } else { 129*3c789bfcSManish Pandey new_spsr &= ~SPSR_SSBS_BIT_AARCH64; 130*3c789bfcSManish Pandey } 131*3c789bfcSManish Pandey } 132*3c789bfcSManish Pandey 133*3c789bfcSManish Pandey /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */ 134*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64; 135*3c789bfcSManish Pandey if (is_feat_nmi_present()) { 136*3c789bfcSManish Pandey if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) { 137*3c789bfcSManish Pandey new_spsr &= ~SPSR_ALLINT_BIT_AARCH64; 138*3c789bfcSManish Pandey } else { 139*3c789bfcSManish Pandey new_spsr |= SPSR_ALLINT_BIT_AARCH64; 140*3c789bfcSManish Pandey } 141*3c789bfcSManish Pandey } 142*3c789bfcSManish Pandey 143*3c789bfcSManish Pandey /* Clear PSTATE.IL bit explicitly */ 144*3c789bfcSManish Pandey new_spsr &= ~SPSR_IL_BIT; 145*3c789bfcSManish Pandey 146*3c789bfcSManish Pandey /* Clear PSTATE.SS bit explicitly */ 147*3c789bfcSManish Pandey new_spsr &= ~SPSR_SS_BIT; 148*3c789bfcSManish Pandey 149*3c789bfcSManish Pandey /* Update PSTATE.PAN bit */ 150*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_PAN_BIT; 151*3c789bfcSManish Pandey if (is_feat_pan_present() && 152*3c789bfcSManish Pandey ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) && 153*3c789bfcSManish Pandey ((sctlr & SCTLR_SPAN_BIT) == 0U)) { 154*3c789bfcSManish Pandey new_spsr |= SPSR_PAN_BIT; 155*3c789bfcSManish Pandey } 156*3c789bfcSManish Pandey 157*3c789bfcSManish Pandey /* Clear UAO bit if FEAT_UAO is present */ 158*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64; 159*3c789bfcSManish Pandey if (is_feat_uao_present()) { 160*3c789bfcSManish Pandey new_spsr &= ~SPSR_UAO_BIT_AARCH64; 161*3c789bfcSManish Pandey } 162*3c789bfcSManish Pandey 163*3c789bfcSManish Pandey /* DIT bits are unchanged */ 164*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_DIT_BIT; 165*3c789bfcSManish Pandey 166*3c789bfcSManish Pandey /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */ 167*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64; 168*3c789bfcSManish Pandey if (read_feat_mte_id_field() >= MTE_IMPLEMENTED_ELX) { 169*3c789bfcSManish Pandey new_spsr |= SPSR_TCO_BIT_AARCH64; 170*3c789bfcSManish Pandey } 171*3c789bfcSManish Pandey 172*3c789bfcSManish Pandey /* NZCV bits are unchanged */ 173*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_NZCV; 174*3c789bfcSManish Pandey 175*3c789bfcSManish Pandey /* If FEAT_EBEP is present set PM bit */ 176*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64; 177*3c789bfcSManish Pandey if (is_feat_ebep_present()) { 178*3c789bfcSManish Pandey new_spsr |= SPSR_PM_BIT_AARCH64; 179*3c789bfcSManish Pandey } 180*3c789bfcSManish Pandey 181*3c789bfcSManish Pandey /* If FEAT_SEBEP is present clear PPEND bit */ 182*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_PPEND_BIT; 183*3c789bfcSManish Pandey if (is_feat_sebep_present()) { 184*3c789bfcSManish Pandey new_spsr &= ~SPSR_PPEND_BIT; 185*3c789bfcSManish Pandey } 186*3c789bfcSManish Pandey 187*3c789bfcSManish Pandey /* If FEAT_GCS is present, update EXLOCK bit */ 188*3c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64; 189*3c789bfcSManish Pandey if (is_feat_gcs_present()) { 190*3c789bfcSManish Pandey u_register_t gcscr; 191*3c789bfcSManish Pandey if (target_el == MODE_EL2) { 192*3c789bfcSManish Pandey gcscr = read_gcscr_el2(); 193*3c789bfcSManish Pandey } else { 194*3c789bfcSManish Pandey gcscr = read_gcscr_el1(); 195*3c789bfcSManish Pandey } 196*3c789bfcSManish Pandey new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0; 197*3c789bfcSManish Pandey } 198*3c789bfcSManish Pandey 199*3c789bfcSManish Pandey return new_spsr; 200*3c789bfcSManish Pandey } 201*3c789bfcSManish Pandey 202*3c789bfcSManish Pandey /* 203*3c789bfcSManish Pandey * Handler for injecting Undefined exception to lower EL which is caused by 204*3c789bfcSManish Pandey * lower EL accessing system registers of which (old)EL3 firmware is unaware. 205*3c789bfcSManish Pandey * 206*3c789bfcSManish Pandey * This is a safety net to avoid EL3 panics caused by system register access 207*3c789bfcSManish Pandey * that triggers an exception syndrome EC=0x18. 208*3c789bfcSManish Pandey */ 209*3c789bfcSManish Pandey void inject_undef64(cpu_context_t *ctx) 210*3c789bfcSManish Pandey { 211*3c789bfcSManish Pandey u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT; 212*3c789bfcSManish Pandey el3_state_t *state = get_el3state_ctx(ctx); 213*3c789bfcSManish Pandey u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3); 214*3c789bfcSManish Pandey u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3); 215*3c789bfcSManish Pandey u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 216*3c789bfcSManish Pandey u_register_t new_spsr = 0; 217*3c789bfcSManish Pandey unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3); 218*3c789bfcSManish Pandey 219*3c789bfcSManish Pandey if (to_el == MODE_EL2) { 220*3c789bfcSManish Pandey write_elr_el2(elr_el3); 221*3c789bfcSManish Pandey elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el); 222*3c789bfcSManish Pandey write_esr_el2(esr); 223*3c789bfcSManish Pandey write_spsr_el2(old_spsr); 224*3c789bfcSManish Pandey } else { 225*3c789bfcSManish Pandey write_elr_el1(elr_el3); 226*3c789bfcSManish Pandey elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el); 227*3c789bfcSManish Pandey write_esr_el1(esr); 228*3c789bfcSManish Pandey write_spsr_el1(old_spsr); 229*3c789bfcSManish Pandey } 230*3c789bfcSManish Pandey 231*3c789bfcSManish Pandey new_spsr = create_spsr(old_spsr, to_el); 232*3c789bfcSManish Pandey 233*3c789bfcSManish Pandey write_ctx_reg(state, CTX_SPSR_EL3, new_spsr); 234*3c789bfcSManish Pandey write_ctx_reg(state, CTX_ELR_EL3, elr_el3); 235*3c789bfcSManish Pandey } 236