xref: /rk3399_ARM-atf/bl31/bl31_traps.c (revision e7e231d39c68083e870cdaaa89ecc4e5045fdd64)
1 /*
2  * Copyright (c) 2022-2026, Arm Limited. All rights reserved.
3  * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Dispatch synchronous system register traps from lower ELs.
8  */
9 
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <bl31/sync_handle.h>
13 #include <context.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 #include <lib/extensions/idte3.h>
16 
handle_sysreg_trap(uint64_t esr_el3,cpu_context_t * ctx,u_register_t flags)17 int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx, u_register_t flags)
18 {
19 	uint64_t opcode = EXTRACT(ESR_ISS, esr_el3) & ~(MASK(ISS_SYS64_DIR) | MASK(ISS_SYS64_RT));
20 	uint8_t rt = EXTRACT(ISS_SYS64_RT, esr_el3);
21 
22 	if (is_feat_idte3_supported() &&
23 	    ((opcode >= ISS_SYSREG_OPCODE_IDREG_MIN &&
24 	      opcode <= ISS_SYSREG_OPCODE_IDREG_MAX) ||
25 	      opcode == ISS_SYSREG_OPCODE_GMID_EL1)) {
26 		return handle_idreg_trap(rt, opcode, ctx, flags);
27 	}
28 
29 	if (is_feat_rng_trap_supported() &&
30 	    (opcode == ISS_SYSREG_OPCODE_RNDR ||
31 	     opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
32 		/* Ignore XZR accesses and writes to the register */
33 		if (rt == ISS_SYSREG_RT_XZR || !EXTRACT(ISS_SYS64_DIR, esr_el3)) {
34 			return TRAP_RET_CONTINUE;
35 		}
36 
37 		return plat_handle_rng_trap(rt, opcode == ISS_SYSREG_OPCODE_RNDRRS, ctx);
38 	}
39 
40 #if IMPDEF_SYSREG_TRAP
41 	/* isolate selected bits and check they are all set */
42 	if (opcode & ISS_SYSREG_OPCODE_IMPDEF_MASK == ISS_SYSREG_OPCODE_IMPDEF_MASK) {
43 		return plat_handle_impdef_trap(esr_el3, ctx);
44 	}
45 #endif
46 
47 	return TRAP_RET_UNHANDLED;
48 }
49 
is_tge_enabled(void)50 static bool is_tge_enabled(void)
51 {
52 	u_register_t hcr_el2 = read_hcr_el2();
53 
54 	return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
55 }
56 
57 /*
58  * This function is to ensure that undef injection does not happen into
59  * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
60  * and non-secure world is running with TGE bit set, considering EL3 does
61  * not save/restore EL2 registers if only one world has EL2 enabled.
62  * So reading hcr_el2.TGE would give NS world value.
63  */
is_secure_trap_without_sel2(u_register_t scr)64 static bool is_secure_trap_without_sel2(u_register_t scr)
65 {
66 	return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
67 }
68 
target_el(unsigned int from_el,u_register_t scr)69 static unsigned int target_el(unsigned int from_el, u_register_t scr)
70 {
71 	if (from_el > MODE_EL1) {
72 		return from_el;
73 	} else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
74 		return MODE_EL2;
75 	} else {
76 		return MODE_EL1;
77 	}
78 }
79 
get_elr_el3(u_register_t spsr_el3,u_register_t vbar,unsigned int target_el)80 static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
81 {
82 	unsigned int outgoing_el = GET_EL(spsr_el3);
83 	u_register_t elr_el3 = 0;
84 
85 	if (outgoing_el == target_el) {
86 		/*
87 		 * Target EL is either EL1 or EL2, lsb can tell us the SPsel
88 		 *  Thread mode  : 0
89 		 *  Handler mode : 1
90 		 */
91 		if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
92 			elr_el3 = vbar + CURRENT_EL_SPX;
93 		} else {
94 			elr_el3 = vbar + CURRENT_EL_SP0;
95 		}
96 	} else {
97 		/* Vector address for Lower EL using Aarch64 */
98 		elr_el3 = vbar + LOWER_EL_AARCH64;
99 	}
100 
101 	return elr_el3;
102 }
103 
104 /*
105  * Explicitly create all bits of SPSR to get PSTATE at exception return.
106  *
107  * The code is based on "Aarch64.exceptions.takeexception" described in
108  * DDI0602 revision 2026-03.
109  * "https://developer.arm.com/documentation/ddi0597/2026-03/Shared-Pseudocode/
110  * aarch64-exceptions-takeexception"
111  *
112  * NOTE: This piece of code must be reviewed every release against the latest
113  * takeexception sequence to ensure that we keep up with new arch features that
114  * affect the PSTATE.
115  *
116  * Next review: TF-A 2.16 release
117  *
118  * FEAT_NV3 has an impact but is not implemented in EL3 yet.
119  * TODO: this should create a BRBE exception record
120  */
create_spsr(u_register_t old_spsr,unsigned int target_el)121 u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
122 {
123 	u_register_t new_spsr = 0;
124 	u_register_t sctlr;
125 
126 	/* Set M bits for target EL in AArch64 mode, also get sctlr */
127 	if (target_el == MODE_EL2) {
128 		sctlr = read_sctlr_el2();
129 		new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
130 	} else {
131 		sctlr = read_sctlr_el1();
132 		new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
133 	}
134 
135 	/* Mask all exceptions, update DAIF bits */
136 	new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
137 
138 	/* If FEAT_BTI is present, clear BTYPE bits */
139 	new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
140 	if (is_feat_bti_present()) {
141 		new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
142 	}
143 
144 	/* If SSBS is implemented, take the value from SCTLR.DSSBS */
145 	new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
146 	if (is_feat_ssbs_present()) {
147 		if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
148 			new_spsr |= SPSR_SSBS_BIT_AARCH64;
149 		} else {
150 			new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
151 		}
152 	}
153 
154 	/* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
155 	new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
156 	if (is_feat_nmi_present()) {
157 		if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
158 			new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
159 		} else {
160 			new_spsr |= SPSR_ALLINT_BIT_AARCH64;
161 		}
162 	}
163 
164 	/* Clear PSTATE.IL bit explicitly */
165 	new_spsr &= ~SPSR_IL_BIT;
166 
167 	/* Clear PSTATE.SS bit explicitly */
168 	new_spsr &= ~SPSR_SS_BIT;
169 
170 	/* Update PSTATE.PAN bit */
171 	new_spsr |= old_spsr & SPSR_PAN_BIT;
172 	if (is_feat_pan_present() &&
173 	    ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
174 	    ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
175 	    new_spsr |= SPSR_PAN_BIT;
176 	}
177 
178 	/* Clear UAO bit if FEAT_UAO is present */
179 	new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
180 	if (is_feat_uao_present()) {
181 		new_spsr &= ~SPSR_UAO_BIT_AARCH64;
182 	}
183 
184 	/* DIT bits are unchanged */
185 	new_spsr |= old_spsr & SPSR_DIT_BIT;
186 
187 	/* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
188 	new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
189 	if (is_feat_mte2_present()) {
190 		new_spsr |= SPSR_TCO_BIT_AARCH64;
191 	}
192 
193 	/* NZCV bits are unchanged */
194 	new_spsr |= old_spsr & SPSR_NZCV;
195 
196 	/* UINJ bit is unchanged */
197 	new_spsr |= old_spsr & SPSR_UINJ_BIT;
198 
199 	/* If FEAT_EBEP is present set PM bit */
200 	new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
201 	if (is_feat_ebep_present()) {
202 		new_spsr |= SPSR_PM_BIT_AARCH64;
203 	}
204 
205 	/* If FEAT_SEBEP is present clear PPEND bit */
206 	new_spsr |= old_spsr & SPSR_PPEND_BIT;
207 	if (is_feat_sebep_present()) {
208 		new_spsr &= ~SPSR_PPEND_BIT;
209 	}
210 
211 	/* If FEAT_GCS is present, update EXLOCK bit */
212 	new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
213 	if (is_feat_gcs_present()) {
214 		u_register_t gcscr;
215 		if (target_el == MODE_EL2) {
216 			gcscr = read_gcscr_el2();
217 		} else {
218 			gcscr = read_gcscr_el1();
219 		}
220 		new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
221 	}
222 
223 	/* If FEAT_PAUTH_LR present then zero the PACM bit. */
224 	new_spsr |= old_spsr & SPSR_PACM_BIT_AARCH64;
225 	if (is_feat_pauth_lr_present()) {
226 		new_spsr &= ~SPSR_PACM_BIT_AARCH64;
227 	}
228 
229 	return new_spsr;
230 }
231 
232 /*
233  * Handler for injecting Undefined exception to lower EL which is caused by
234  * lower EL accessing system registers of which (old)EL3 firmware is unaware.
235  *
236  * This is a safety net to avoid EL3 panics caused by system register access
237  * that triggers an exception syndrome EC=0x18.
238  */
inject_undef64(cpu_context_t * ctx)239 void inject_undef64(cpu_context_t *ctx)
240 {
241 	el3_state_t *state = get_el3state_ctx(ctx);
242 	u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
243 	u_register_t scr_el3 = 0U;
244 	unsigned int to_el = 0U;
245 	u_register_t esr = 0U;
246 	u_register_t elr_el3 = 0U;
247 	u_register_t new_spsr = 0U;
248 
249 	if (is_feat_uinj_supported()) {
250 		new_spsr = old_spsr | SPSR_UINJ_BIT;
251 		write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
252 		return;
253 	}
254 
255 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
256 	to_el = target_el(GET_EL(old_spsr), scr_el3);
257 	esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
258 	elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
259 
260 	if (to_el == MODE_EL2) {
261 		write_elr_el2(elr_el3);
262 		elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
263 		write_esr_el2(esr);
264 		write_spsr_el2(old_spsr);
265 	} else {
266 		write_elr_el1(elr_el3);
267 		elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
268 		write_esr_el1(esr);
269 		write_spsr_el1(old_spsr);
270 	}
271 
272 	new_spsr = create_spsr(old_spsr, to_el);
273 
274 	write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
275 	write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
276 }
277