1 /* 2 * Copyright (c) 2022-2024, Arm Limited. All rights reserved. 3 * Copyright (c) 2023, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 * 7 * Dispatch synchronous system register traps from lower ELs. 8 */ 9 10 #include <arch_features.h> 11 #include <arch_helpers.h> 12 #include <bl31/sync_handle.h> 13 #include <context.h> 14 #include <lib/el3_runtime/context_mgmt.h> 15 16 int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx) 17 { 18 uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK; 19 20 #if ENABLE_FEAT_RNG_TRAP 21 if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) { 22 return plat_handle_rng_trap(esr_el3, ctx); 23 } 24 #endif 25 26 #if IMPDEF_SYSREG_TRAP 27 if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) { 28 return plat_handle_impdef_trap(esr_el3, ctx); 29 } 30 #endif 31 32 return TRAP_RET_UNHANDLED; 33 } 34 35 static bool is_tge_enabled(void) 36 { 37 u_register_t hcr_el2 = read_hcr_el2(); 38 39 return ((read_feat_vhe_id_field() != 0U) && ((hcr_el2 & HCR_TGE_BIT) != 0U)); 40 } 41 42 /* 43 * This function is to ensure that undef injection does not happen into 44 * non-existent S-EL2. This could happen when trap happens from S-EL{1,0} 45 * and non-secure world is running with TGE bit set, considering EL3 does 46 * not save/restore EL2 registers if only one world has EL2 enabled. 47 * So reading hcr_el2.TGE would give NS world value. 48 */ 49 static bool is_secure_trap_without_sel2(u_register_t scr) 50 { 51 return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0); 52 } 53 54 static unsigned int target_el(unsigned int from_el, u_register_t scr) 55 { 56 if (from_el > MODE_EL1) { 57 return from_el; 58 } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) { 59 return MODE_EL2; 60 } else { 61 return MODE_EL1; 62 } 63 } 64 65 static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el) 66 { 67 unsigned int outgoing_el = GET_EL(spsr_el3); 68 u_register_t elr_el3 = 0; 69 70 if (outgoing_el == target_el) { 71 /* 72 * Target EL is either EL1 or EL2, lsb can tell us the SPsel 73 * Thread mode : 0 74 * Handler mode : 1 75 */ 76 if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) { 77 elr_el3 = vbar + CURRENT_EL_SPX; 78 } else { 79 elr_el3 = vbar + CURRENT_EL_SP0; 80 } 81 } else { 82 /* Vector address for Lower EL using Aarch64 */ 83 elr_el3 = vbar + LOWER_EL_AARCH64; 84 } 85 86 return elr_el3; 87 } 88 89 /* 90 * Explicitly create all bits of SPSR to get PSTATE at exception return. 91 * 92 * The code is based on "Aarch64.exceptions.takeexception" described in 93 * DDI0602 revision 2023-06. 94 * "https://developer.arm.com/documentation/ddi0602/2023-06/Shared-Pseudocode/ 95 * aarch64-exceptions-takeexception" 96 * 97 * NOTE: This piece of code must be reviewed every release to ensure that 98 * we keep up with new ARCH features which introduces a new SPSR bit. 99 */ 100 u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el) 101 { 102 u_register_t new_spsr = 0; 103 u_register_t sctlr; 104 105 /* Set M bits for target EL in AArch64 mode, also get sctlr */ 106 if (target_el == MODE_EL2) { 107 sctlr = read_sctlr_el2(); 108 new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H; 109 } else { 110 sctlr = read_sctlr_el1(); 111 new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H; 112 } 113 114 /* Mask all exceptions, update DAIF bits */ 115 new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT; 116 117 /* If FEAT_BTI is present, clear BTYPE bits */ 118 new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64); 119 if (is_armv8_5_bti_present()) { 120 new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64); 121 } 122 123 /* If SSBS is implemented, take the value from SCTLR.DSSBS */ 124 new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64; 125 if (is_feat_ssbs_present()) { 126 if ((sctlr & SCTLR_DSSBS_BIT) != 0U) { 127 new_spsr |= SPSR_SSBS_BIT_AARCH64; 128 } else { 129 new_spsr &= ~SPSR_SSBS_BIT_AARCH64; 130 } 131 } 132 133 /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */ 134 new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64; 135 if (is_feat_nmi_present()) { 136 if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) { 137 new_spsr &= ~SPSR_ALLINT_BIT_AARCH64; 138 } else { 139 new_spsr |= SPSR_ALLINT_BIT_AARCH64; 140 } 141 } 142 143 /* Clear PSTATE.IL bit explicitly */ 144 new_spsr &= ~SPSR_IL_BIT; 145 146 /* Clear PSTATE.SS bit explicitly */ 147 new_spsr &= ~SPSR_SS_BIT; 148 149 /* Update PSTATE.PAN bit */ 150 new_spsr |= old_spsr & SPSR_PAN_BIT; 151 if (is_feat_pan_present() && 152 ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) && 153 ((sctlr & SCTLR_SPAN_BIT) == 0U)) { 154 new_spsr |= SPSR_PAN_BIT; 155 } 156 157 /* Clear UAO bit if FEAT_UAO is present */ 158 new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64; 159 if (is_feat_uao_present()) { 160 new_spsr &= ~SPSR_UAO_BIT_AARCH64; 161 } 162 163 /* DIT bits are unchanged */ 164 new_spsr |= old_spsr & SPSR_DIT_BIT; 165 166 /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */ 167 new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64; 168 if (is_feat_mte2_present()) { 169 new_spsr |= SPSR_TCO_BIT_AARCH64; 170 } 171 172 /* NZCV bits are unchanged */ 173 new_spsr |= old_spsr & SPSR_NZCV; 174 175 /* If FEAT_EBEP is present set PM bit */ 176 new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64; 177 if (is_feat_ebep_present()) { 178 new_spsr |= SPSR_PM_BIT_AARCH64; 179 } 180 181 /* If FEAT_SEBEP is present clear PPEND bit */ 182 new_spsr |= old_spsr & SPSR_PPEND_BIT; 183 if (is_feat_sebep_present()) { 184 new_spsr &= ~SPSR_PPEND_BIT; 185 } 186 187 /* If FEAT_GCS is present, update EXLOCK bit */ 188 new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64; 189 if (is_feat_gcs_present()) { 190 u_register_t gcscr; 191 if (target_el == MODE_EL2) { 192 gcscr = read_gcscr_el2(); 193 } else { 194 gcscr = read_gcscr_el1(); 195 } 196 new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0; 197 } 198 199 return new_spsr; 200 } 201 202 /* 203 * Handler for injecting Undefined exception to lower EL which is caused by 204 * lower EL accessing system registers of which (old)EL3 firmware is unaware. 205 * 206 * This is a safety net to avoid EL3 panics caused by system register access 207 * that triggers an exception syndrome EC=0x18. 208 */ 209 void inject_undef64(cpu_context_t *ctx) 210 { 211 u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT; 212 el3_state_t *state = get_el3state_ctx(ctx); 213 u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3); 214 u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3); 215 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 216 u_register_t new_spsr = 0; 217 unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3); 218 219 if (to_el == MODE_EL2) { 220 write_elr_el2(elr_el3); 221 elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el); 222 write_esr_el2(esr); 223 write_spsr_el2(old_spsr); 224 } else { 225 write_elr_el1(elr_el3); 226 elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el); 227 write_esr_el1(esr); 228 write_spsr_el1(old_spsr); 229 } 230 231 new_spsr = create_spsr(old_spsr, to_el); 232 233 write_ctx_reg(state, CTX_SPSR_EL3, new_spsr); 234 write_ctx_reg(state, CTX_ELR_EL3, elr_el3); 235 } 236