1 /*
2 * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
3 * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Dispatch synchronous system register traps from lower ELs.
8 */
9
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <bl31/sync_handle.h>
13 #include <context.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 #include <lib/extensions/idte3.h>
16
handle_sysreg_trap(uint64_t esr_el3,cpu_context_t * ctx,u_register_t flags __unused)17 int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx,
18 u_register_t flags __unused)
19 {
20 uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK;
21
22 #if ENABLE_FEAT_IDTE3
23 /*
24 * Handle trap for system registers with the following encoding
25 * op0 == 3, op1 == 0/1, Crn == 0 (Group 3 & Group 5 ID registers)
26 */
27 if ((esr_el3 & ISS_IDREG_OPCODE_MASK) == ISS_SYSREG_OPCODE_IDREG) {
28 return handle_idreg_trap(esr_el3, ctx, flags);
29 }
30 #endif
31
32 #if ENABLE_FEAT_RNG_TRAP
33 if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
34 return plat_handle_rng_trap(esr_el3, ctx);
35 }
36 #endif
37
38 #if IMPDEF_SYSREG_TRAP
39 if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) {
40 return plat_handle_impdef_trap(esr_el3, ctx);
41 }
42 #endif
43
44 return TRAP_RET_UNHANDLED;
45 }
46
is_tge_enabled(void)47 static bool is_tge_enabled(void)
48 {
49 u_register_t hcr_el2 = read_hcr_el2();
50
51 return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
52 }
53
54 /*
55 * This function is to ensure that undef injection does not happen into
56 * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
57 * and non-secure world is running with TGE bit set, considering EL3 does
58 * not save/restore EL2 registers if only one world has EL2 enabled.
59 * So reading hcr_el2.TGE would give NS world value.
60 */
is_secure_trap_without_sel2(u_register_t scr)61 static bool is_secure_trap_without_sel2(u_register_t scr)
62 {
63 return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
64 }
65
target_el(unsigned int from_el,u_register_t scr)66 static unsigned int target_el(unsigned int from_el, u_register_t scr)
67 {
68 if (from_el > MODE_EL1) {
69 return from_el;
70 } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
71 return MODE_EL2;
72 } else {
73 return MODE_EL1;
74 }
75 }
76
get_elr_el3(u_register_t spsr_el3,u_register_t vbar,unsigned int target_el)77 static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
78 {
79 unsigned int outgoing_el = GET_EL(spsr_el3);
80 u_register_t elr_el3 = 0;
81
82 if (outgoing_el == target_el) {
83 /*
84 * Target EL is either EL1 or EL2, lsb can tell us the SPsel
85 * Thread mode : 0
86 * Handler mode : 1
87 */
88 if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
89 elr_el3 = vbar + CURRENT_EL_SPX;
90 } else {
91 elr_el3 = vbar + CURRENT_EL_SP0;
92 }
93 } else {
94 /* Vector address for Lower EL using Aarch64 */
95 elr_el3 = vbar + LOWER_EL_AARCH64;
96 }
97
98 return elr_el3;
99 }
100
101 /*
102 * Explicitly create all bits of SPSR to get PSTATE at exception return.
103 *
104 * The code is based on "Aarch64.exceptions.takeexception" described in
105 * DDI0602 revision 2025-03.
106 * "https://developer.arm.com/documentation/ddi0597/2025-03/Shared-Pseudocode/
107 * aarch64-exceptions-takeexception"
108 *
109 * NOTE: This piece of code must be reviewed every release against the latest
110 * takeexception sequence to ensure that we keep up with new arch features that
111 * affect the PSTATE.
112 *
113 * TF-A 2.13 release review
114 *
115 * Review of version 2025-03 indicates we are missing support for one feature.
116 * - FEAT_UINJ (2024 extension)
117 */
create_spsr(u_register_t old_spsr,unsigned int target_el)118 u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
119 {
120 u_register_t new_spsr = 0;
121 u_register_t sctlr;
122
123 /* Set M bits for target EL in AArch64 mode, also get sctlr */
124 if (target_el == MODE_EL2) {
125 sctlr = read_sctlr_el2();
126 new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
127 } else {
128 sctlr = read_sctlr_el1();
129 new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
130 }
131
132 /* Mask all exceptions, update DAIF bits */
133 new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
134
135 /* If FEAT_BTI is present, clear BTYPE bits */
136 new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
137 if (is_feat_bti_present()) {
138 new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
139 }
140
141 /* If SSBS is implemented, take the value from SCTLR.DSSBS */
142 new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
143 if (is_feat_ssbs_present()) {
144 if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
145 new_spsr |= SPSR_SSBS_BIT_AARCH64;
146 } else {
147 new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
148 }
149 }
150
151 /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
152 new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
153 if (is_feat_nmi_present()) {
154 if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
155 new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
156 } else {
157 new_spsr |= SPSR_ALLINT_BIT_AARCH64;
158 }
159 }
160
161 /* Clear PSTATE.IL bit explicitly */
162 new_spsr &= ~SPSR_IL_BIT;
163
164 /* Clear PSTATE.SS bit explicitly */
165 new_spsr &= ~SPSR_SS_BIT;
166
167 /* Update PSTATE.PAN bit */
168 new_spsr |= old_spsr & SPSR_PAN_BIT;
169 if (is_feat_pan_present() &&
170 ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
171 ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
172 new_spsr |= SPSR_PAN_BIT;
173 }
174
175 /* Clear UAO bit if FEAT_UAO is present */
176 new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
177 if (is_feat_uao_present()) {
178 new_spsr &= ~SPSR_UAO_BIT_AARCH64;
179 }
180
181 /* DIT bits are unchanged */
182 new_spsr |= old_spsr & SPSR_DIT_BIT;
183
184 /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
185 new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
186 if (is_feat_mte2_present()) {
187 new_spsr |= SPSR_TCO_BIT_AARCH64;
188 }
189
190 /* NZCV bits are unchanged */
191 new_spsr |= old_spsr & SPSR_NZCV;
192
193 /* If FEAT_EBEP is present set PM bit */
194 new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
195 if (is_feat_ebep_present()) {
196 new_spsr |= SPSR_PM_BIT_AARCH64;
197 }
198
199 /* If FEAT_SEBEP is present clear PPEND bit */
200 new_spsr |= old_spsr & SPSR_PPEND_BIT;
201 if (is_feat_sebep_present()) {
202 new_spsr &= ~SPSR_PPEND_BIT;
203 }
204
205 /* If FEAT_GCS is present, update EXLOCK bit */
206 new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
207 if (is_feat_gcs_present()) {
208 u_register_t gcscr;
209 if (target_el == MODE_EL2) {
210 gcscr = read_gcscr_el2();
211 } else {
212 gcscr = read_gcscr_el1();
213 }
214 new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
215 }
216
217 /* If FEAT_PAUTH_LR present then zero the PACM bit. */
218 new_spsr |= old_spsr & SPSR_PACM_BIT_AARCH64;
219 if (is_feat_pauth_lr_present()) {
220 new_spsr &= ~SPSR_PACM_BIT_AARCH64;
221 }
222
223 return new_spsr;
224 }
225
226 /*
227 * Handler for injecting Undefined exception to lower EL which is caused by
228 * lower EL accessing system registers of which (old)EL3 firmware is unaware.
229 *
230 * This is a safety net to avoid EL3 panics caused by system register access
231 * that triggers an exception syndrome EC=0x18.
232 */
inject_undef64(cpu_context_t * ctx)233 void inject_undef64(cpu_context_t *ctx)
234 {
235 u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
236 el3_state_t *state = get_el3state_ctx(ctx);
237 u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
238 u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
239 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
240 u_register_t new_spsr = 0;
241 unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3);
242
243 if (to_el == MODE_EL2) {
244 write_elr_el2(elr_el3);
245 elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
246 write_esr_el2(esr);
247 write_spsr_el2(old_spsr);
248 } else {
249 write_elr_el1(elr_el3);
250 elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
251 write_esr_el1(esr);
252 write_spsr_el1(old_spsr);
253 }
254
255 new_spsr = create_spsr(old_spsr, to_el);
256
257 write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
258 write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
259 }
260