1ccd81f1eSAndre Przywara /* 288655be9SArvind Ram Prakash * Copyright (c) 2022-2025, Arm Limited. All rights reserved. 30ed3be6fSVarun Wadekar * Copyright (c) 2023, NVIDIA Corporation. All rights reserved. 4ccd81f1eSAndre Przywara * 5ccd81f1eSAndre Przywara * SPDX-License-Identifier: BSD-3-Clause 6ccd81f1eSAndre Przywara * 7ccd81f1eSAndre Przywara * Dispatch synchronous system register traps from lower ELs. 8ccd81f1eSAndre Przywara */ 9ccd81f1eSAndre Przywara 103c789bfcSManish Pandey #include <arch_features.h> 113c789bfcSManish Pandey #include <arch_helpers.h> 12ccd81f1eSAndre Przywara #include <bl31/sync_handle.h> 13ccd81f1eSAndre Przywara #include <context.h> 143c789bfcSManish Pandey #include <lib/el3_runtime/context_mgmt.h> 15*f396aec8SArvind Ram Prakash #include <lib/extensions/idte3.h> 16ccd81f1eSAndre Przywara 1788655be9SArvind Ram Prakash int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx, 1888655be9SArvind Ram Prakash u_register_t flags __unused) 19ccd81f1eSAndre Przywara { 200ed3be6fSVarun Wadekar uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK; 210ed3be6fSVarun Wadekar 22*f396aec8SArvind Ram Prakash #if ENABLE_FEAT_IDTE3 23*f396aec8SArvind Ram Prakash /* 24*f396aec8SArvind Ram Prakash * Handle trap for system registers with the following encoding 25*f396aec8SArvind Ram Prakash * op0 == 3, op1 == 0/1, Crn == 0 (Group 3 & Group 5 ID registers) 26*f396aec8SArvind Ram Prakash */ 27*f396aec8SArvind Ram Prakash if ((esr_el3 & ISS_IDREG_OPCODE_MASK) == ISS_SYSREG_OPCODE_IDREG) { 28*f396aec8SArvind Ram Prakash return handle_idreg_trap(esr_el3, ctx, flags); 29*f396aec8SArvind Ram Prakash } 30*f396aec8SArvind Ram Prakash #endif 31*f396aec8SArvind Ram Prakash 321ae75529SAndre Przywara #if ENABLE_FEAT_RNG_TRAP 330ed3be6fSVarun Wadekar if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) { 341ae75529SAndre Przywara return plat_handle_rng_trap(esr_el3, ctx); 35ccd81f1eSAndre Przywara } 360ed3be6fSVarun Wadekar #endif 370ed3be6fSVarun Wadekar 380ed3be6fSVarun Wadekar #if IMPDEF_SYSREG_TRAP 390ed3be6fSVarun Wadekar if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) { 400ed3be6fSVarun Wadekar return plat_handle_impdef_trap(esr_el3, ctx); 410ed3be6fSVarun Wadekar } 420ed3be6fSVarun Wadekar #endif 430ed3be6fSVarun Wadekar 440ed3be6fSVarun Wadekar return TRAP_RET_UNHANDLED; 45ccd81f1eSAndre Przywara } 463c789bfcSManish Pandey 473c789bfcSManish Pandey static bool is_tge_enabled(void) 483c789bfcSManish Pandey { 493c789bfcSManish Pandey u_register_t hcr_el2 = read_hcr_el2(); 503c789bfcSManish Pandey 51aaaf2cc3SSona Mathew return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U)); 523c789bfcSManish Pandey } 533c789bfcSManish Pandey 543c789bfcSManish Pandey /* 553c789bfcSManish Pandey * This function is to ensure that undef injection does not happen into 563c789bfcSManish Pandey * non-existent S-EL2. This could happen when trap happens from S-EL{1,0} 573c789bfcSManish Pandey * and non-secure world is running with TGE bit set, considering EL3 does 583c789bfcSManish Pandey * not save/restore EL2 registers if only one world has EL2 enabled. 593c789bfcSManish Pandey * So reading hcr_el2.TGE would give NS world value. 603c789bfcSManish Pandey */ 613c789bfcSManish Pandey static bool is_secure_trap_without_sel2(u_register_t scr) 623c789bfcSManish Pandey { 633c789bfcSManish Pandey return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0); 643c789bfcSManish Pandey } 653c789bfcSManish Pandey 663c789bfcSManish Pandey static unsigned int target_el(unsigned int from_el, u_register_t scr) 673c789bfcSManish Pandey { 683c789bfcSManish Pandey if (from_el > MODE_EL1) { 693c789bfcSManish Pandey return from_el; 703c789bfcSManish Pandey } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) { 713c789bfcSManish Pandey return MODE_EL2; 723c789bfcSManish Pandey } else { 733c789bfcSManish Pandey return MODE_EL1; 743c789bfcSManish Pandey } 753c789bfcSManish Pandey } 763c789bfcSManish Pandey 773c789bfcSManish Pandey static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el) 783c789bfcSManish Pandey { 793c789bfcSManish Pandey unsigned int outgoing_el = GET_EL(spsr_el3); 803c789bfcSManish Pandey u_register_t elr_el3 = 0; 813c789bfcSManish Pandey 823c789bfcSManish Pandey if (outgoing_el == target_el) { 833c789bfcSManish Pandey /* 843c789bfcSManish Pandey * Target EL is either EL1 or EL2, lsb can tell us the SPsel 853c789bfcSManish Pandey * Thread mode : 0 863c789bfcSManish Pandey * Handler mode : 1 873c789bfcSManish Pandey */ 883c789bfcSManish Pandey if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) { 893c789bfcSManish Pandey elr_el3 = vbar + CURRENT_EL_SPX; 903c789bfcSManish Pandey } else { 913c789bfcSManish Pandey elr_el3 = vbar + CURRENT_EL_SP0; 923c789bfcSManish Pandey } 933c789bfcSManish Pandey } else { 943c789bfcSManish Pandey /* Vector address for Lower EL using Aarch64 */ 953c789bfcSManish Pandey elr_el3 = vbar + LOWER_EL_AARCH64; 963c789bfcSManish Pandey } 973c789bfcSManish Pandey 983c789bfcSManish Pandey return elr_el3; 993c789bfcSManish Pandey } 1003c789bfcSManish Pandey 1013c789bfcSManish Pandey /* 1023c789bfcSManish Pandey * Explicitly create all bits of SPSR to get PSTATE at exception return. 1033c789bfcSManish Pandey * 1043c789bfcSManish Pandey * The code is based on "Aarch64.exceptions.takeexception" described in 105025b1b81SJohn Powell * DDI0602 revision 2025-03. 106025b1b81SJohn Powell * "https://developer.arm.com/documentation/ddi0597/2025-03/Shared-Pseudocode/ 1073c789bfcSManish Pandey * aarch64-exceptions-takeexception" 1083c789bfcSManish Pandey * 109025b1b81SJohn Powell * NOTE: This piece of code must be reviewed every release against the latest 110025b1b81SJohn Powell * takeexception sequence to ensure that we keep up with new arch features that 111025b1b81SJohn Powell * affect the PSTATE. 112f152d3b2SManish Pandey * 113025b1b81SJohn Powell * TF-A 2.13 release review 114025b1b81SJohn Powell * 115025b1b81SJohn Powell * Review of version 2025-03 indicates we are missing support for one feature. 116f152d3b2SManish Pandey * - FEAT_UINJ (2024 extension) 1173c789bfcSManish Pandey */ 11803fafc0bSArvind Ram Prakash u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el) 1193c789bfcSManish Pandey { 1203c789bfcSManish Pandey u_register_t new_spsr = 0; 1213c789bfcSManish Pandey u_register_t sctlr; 1223c789bfcSManish Pandey 1233c789bfcSManish Pandey /* Set M bits for target EL in AArch64 mode, also get sctlr */ 1243c789bfcSManish Pandey if (target_el == MODE_EL2) { 1253c789bfcSManish Pandey sctlr = read_sctlr_el2(); 1263c789bfcSManish Pandey new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H; 1273c789bfcSManish Pandey } else { 1283c789bfcSManish Pandey sctlr = read_sctlr_el1(); 1293c789bfcSManish Pandey new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H; 1303c789bfcSManish Pandey } 1313c789bfcSManish Pandey 1323c789bfcSManish Pandey /* Mask all exceptions, update DAIF bits */ 1333c789bfcSManish Pandey new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT; 1343c789bfcSManish Pandey 1353c789bfcSManish Pandey /* If FEAT_BTI is present, clear BTYPE bits */ 1363c789bfcSManish Pandey new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64); 137aaaf2cc3SSona Mathew if (is_feat_bti_present()) { 1383c789bfcSManish Pandey new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64); 1393c789bfcSManish Pandey } 1403c789bfcSManish Pandey 1413c789bfcSManish Pandey /* If SSBS is implemented, take the value from SCTLR.DSSBS */ 1423c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64; 1433c789bfcSManish Pandey if (is_feat_ssbs_present()) { 1443c789bfcSManish Pandey if ((sctlr & SCTLR_DSSBS_BIT) != 0U) { 1453c789bfcSManish Pandey new_spsr |= SPSR_SSBS_BIT_AARCH64; 1463c789bfcSManish Pandey } else { 1473c789bfcSManish Pandey new_spsr &= ~SPSR_SSBS_BIT_AARCH64; 1483c789bfcSManish Pandey } 1493c789bfcSManish Pandey } 1503c789bfcSManish Pandey 1513c789bfcSManish Pandey /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */ 1523c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64; 1533c789bfcSManish Pandey if (is_feat_nmi_present()) { 1543c789bfcSManish Pandey if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) { 1553c789bfcSManish Pandey new_spsr &= ~SPSR_ALLINT_BIT_AARCH64; 1563c789bfcSManish Pandey } else { 1573c789bfcSManish Pandey new_spsr |= SPSR_ALLINT_BIT_AARCH64; 1583c789bfcSManish Pandey } 1593c789bfcSManish Pandey } 1603c789bfcSManish Pandey 1613c789bfcSManish Pandey /* Clear PSTATE.IL bit explicitly */ 1623c789bfcSManish Pandey new_spsr &= ~SPSR_IL_BIT; 1633c789bfcSManish Pandey 1643c789bfcSManish Pandey /* Clear PSTATE.SS bit explicitly */ 1653c789bfcSManish Pandey new_spsr &= ~SPSR_SS_BIT; 1663c789bfcSManish Pandey 1673c789bfcSManish Pandey /* Update PSTATE.PAN bit */ 1683c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_PAN_BIT; 1693c789bfcSManish Pandey if (is_feat_pan_present() && 1703c789bfcSManish Pandey ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) && 1713c789bfcSManish Pandey ((sctlr & SCTLR_SPAN_BIT) == 0U)) { 1723c789bfcSManish Pandey new_spsr |= SPSR_PAN_BIT; 1733c789bfcSManish Pandey } 1743c789bfcSManish Pandey 1753c789bfcSManish Pandey /* Clear UAO bit if FEAT_UAO is present */ 1763c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64; 1773c789bfcSManish Pandey if (is_feat_uao_present()) { 1783c789bfcSManish Pandey new_spsr &= ~SPSR_UAO_BIT_AARCH64; 1793c789bfcSManish Pandey } 1803c789bfcSManish Pandey 1813c789bfcSManish Pandey /* DIT bits are unchanged */ 1823c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_DIT_BIT; 1833c789bfcSManish Pandey 1843c789bfcSManish Pandey /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */ 1853c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64; 186c282384dSGovindraj Raja if (is_feat_mte2_present()) { 1873c789bfcSManish Pandey new_spsr |= SPSR_TCO_BIT_AARCH64; 1883c789bfcSManish Pandey } 1893c789bfcSManish Pandey 1903c789bfcSManish Pandey /* NZCV bits are unchanged */ 1913c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_NZCV; 1923c789bfcSManish Pandey 1933c789bfcSManish Pandey /* If FEAT_EBEP is present set PM bit */ 1943c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64; 1953c789bfcSManish Pandey if (is_feat_ebep_present()) { 1963c789bfcSManish Pandey new_spsr |= SPSR_PM_BIT_AARCH64; 1973c789bfcSManish Pandey } 1983c789bfcSManish Pandey 1993c789bfcSManish Pandey /* If FEAT_SEBEP is present clear PPEND bit */ 2003c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_PPEND_BIT; 2013c789bfcSManish Pandey if (is_feat_sebep_present()) { 2023c789bfcSManish Pandey new_spsr &= ~SPSR_PPEND_BIT; 2033c789bfcSManish Pandey } 2043c789bfcSManish Pandey 2053c789bfcSManish Pandey /* If FEAT_GCS is present, update EXLOCK bit */ 2063c789bfcSManish Pandey new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64; 2073c789bfcSManish Pandey if (is_feat_gcs_present()) { 2083c789bfcSManish Pandey u_register_t gcscr; 2093c789bfcSManish Pandey if (target_el == MODE_EL2) { 2103c789bfcSManish Pandey gcscr = read_gcscr_el2(); 2113c789bfcSManish Pandey } else { 2123c789bfcSManish Pandey gcscr = read_gcscr_el1(); 2133c789bfcSManish Pandey } 2143c789bfcSManish Pandey new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0; 2153c789bfcSManish Pandey } 2163c789bfcSManish Pandey 217025b1b81SJohn Powell /* If FEAT_PAUTH_LR present then zero the PACM bit. */ 218025b1b81SJohn Powell new_spsr |= old_spsr & SPSR_PACM_BIT_AARCH64; 219025b1b81SJohn Powell if (is_feat_pauth_lr_present()) { 220025b1b81SJohn Powell new_spsr &= ~SPSR_PACM_BIT_AARCH64; 221025b1b81SJohn Powell } 222025b1b81SJohn Powell 2233c789bfcSManish Pandey return new_spsr; 2243c789bfcSManish Pandey } 2253c789bfcSManish Pandey 2263c789bfcSManish Pandey /* 2273c789bfcSManish Pandey * Handler for injecting Undefined exception to lower EL which is caused by 2283c789bfcSManish Pandey * lower EL accessing system registers of which (old)EL3 firmware is unaware. 2293c789bfcSManish Pandey * 2303c789bfcSManish Pandey * This is a safety net to avoid EL3 panics caused by system register access 2313c789bfcSManish Pandey * that triggers an exception syndrome EC=0x18. 2323c789bfcSManish Pandey */ 2333c789bfcSManish Pandey void inject_undef64(cpu_context_t *ctx) 2343c789bfcSManish Pandey { 2353c789bfcSManish Pandey u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT; 2363c789bfcSManish Pandey el3_state_t *state = get_el3state_ctx(ctx); 2373c789bfcSManish Pandey u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3); 2383c789bfcSManish Pandey u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3); 2393c789bfcSManish Pandey u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); 2403c789bfcSManish Pandey u_register_t new_spsr = 0; 2413c789bfcSManish Pandey unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3); 2423c789bfcSManish Pandey 2433c789bfcSManish Pandey if (to_el == MODE_EL2) { 2443c789bfcSManish Pandey write_elr_el2(elr_el3); 2453c789bfcSManish Pandey elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el); 2463c789bfcSManish Pandey write_esr_el2(esr); 2473c789bfcSManish Pandey write_spsr_el2(old_spsr); 2483c789bfcSManish Pandey } else { 2493c789bfcSManish Pandey write_elr_el1(elr_el3); 2503c789bfcSManish Pandey elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el); 2513c789bfcSManish Pandey write_esr_el1(esr); 2523c789bfcSManish Pandey write_spsr_el1(old_spsr); 2533c789bfcSManish Pandey } 2543c789bfcSManish Pandey 2553c789bfcSManish Pandey new_spsr = create_spsr(old_spsr, to_el); 2563c789bfcSManish Pandey 2573c789bfcSManish Pandey write_ctx_reg(state, CTX_SPSR_EL3, new_spsr); 2583c789bfcSManish Pandey write_ctx_reg(state, CTX_ELR_EL3, elr_el3); 2593c789bfcSManish Pandey } 260