xref: /rk3399_ARM-atf/bl31/bl31_traps.c (revision 82a97355ab429988641d166a794ccabf36ce41b0)
1 /*
2  * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
3  * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Dispatch synchronous system register traps from lower ELs.
8  */
9 
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <bl31/sync_handle.h>
13 #include <context.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 
16 int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx,
17 			u_register_t flags __unused)
18 {
19 	uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK;
20 
21 #if ENABLE_FEAT_RNG_TRAP
22 	if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
23 		return plat_handle_rng_trap(esr_el3, ctx);
24 	}
25 #endif
26 
27 #if IMPDEF_SYSREG_TRAP
28 	if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) {
29 		return plat_handle_impdef_trap(esr_el3, ctx);
30 	}
31 #endif
32 
33 	return TRAP_RET_UNHANDLED;
34 }
35 
36 static bool is_tge_enabled(void)
37 {
38 	u_register_t hcr_el2 = read_hcr_el2();
39 
40 	return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
41 }
42 
43 /*
44  * This function is to ensure that undef injection does not happen into
45  * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
46  * and non-secure world is running with TGE bit set, considering EL3 does
47  * not save/restore EL2 registers if only one world has EL2 enabled.
48  * So reading hcr_el2.TGE would give NS world value.
49  */
50 static bool is_secure_trap_without_sel2(u_register_t scr)
51 {
52 	return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
53 }
54 
55 static unsigned int target_el(unsigned int from_el, u_register_t scr)
56 {
57 	if (from_el > MODE_EL1) {
58 		return from_el;
59 	} else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
60 		return MODE_EL2;
61 	} else {
62 		return MODE_EL1;
63 	}
64 }
65 
66 static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
67 {
68 	unsigned int outgoing_el = GET_EL(spsr_el3);
69 	u_register_t elr_el3 = 0;
70 
71 	if (outgoing_el == target_el) {
72 		/*
73 		 * Target EL is either EL1 or EL2, lsb can tell us the SPsel
74 		 *  Thread mode  : 0
75 		 *  Handler mode : 1
76 		 */
77 		if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
78 			elr_el3 = vbar + CURRENT_EL_SPX;
79 		} else {
80 			elr_el3 = vbar + CURRENT_EL_SP0;
81 		}
82 	} else {
83 		/* Vector address for Lower EL using Aarch64 */
84 		elr_el3 = vbar + LOWER_EL_AARCH64;
85 	}
86 
87 	return elr_el3;
88 }
89 
90 /*
91  * Explicitly create all bits of SPSR to get PSTATE at exception return.
92  *
93  * The code is based on "Aarch64.exceptions.takeexception" described in
94  * DDI0602 revision 2025-03.
95  * "https://developer.arm.com/documentation/ddi0597/2025-03/Shared-Pseudocode/
96  * aarch64-exceptions-takeexception"
97  *
98  * NOTE: This piece of code must be reviewed every release against the latest
99  * takeexception sequence to ensure that we keep up with new arch features that
100  * affect the PSTATE.
101  *
102  * TF-A 2.13 release review
103  *
104  * Review of version 2025-03 indicates we are missing support for one feature.
105  *  - FEAT_UINJ (2024 extension)
106  */
107 u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
108 {
109 	u_register_t new_spsr = 0;
110 	u_register_t sctlr;
111 
112 	/* Set M bits for target EL in AArch64 mode, also get sctlr */
113 	if (target_el == MODE_EL2) {
114 		sctlr = read_sctlr_el2();
115 		new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
116 	} else {
117 		sctlr = read_sctlr_el1();
118 		new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
119 	}
120 
121 	/* Mask all exceptions, update DAIF bits */
122 	new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
123 
124 	/* If FEAT_BTI is present, clear BTYPE bits */
125 	new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
126 	if (is_feat_bti_present()) {
127 		new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
128 	}
129 
130 	/* If SSBS is implemented, take the value from SCTLR.DSSBS */
131 	new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
132 	if (is_feat_ssbs_present()) {
133 		if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
134 			new_spsr |= SPSR_SSBS_BIT_AARCH64;
135 		} else {
136 			new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
137 		}
138 	}
139 
140 	/* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
141 	new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
142 	if (is_feat_nmi_present()) {
143 		if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
144 			new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
145 		} else {
146 			new_spsr |= SPSR_ALLINT_BIT_AARCH64;
147 		}
148 	}
149 
150 	/* Clear PSTATE.IL bit explicitly */
151 	new_spsr &= ~SPSR_IL_BIT;
152 
153 	/* Clear PSTATE.SS bit explicitly */
154 	new_spsr &= ~SPSR_SS_BIT;
155 
156 	/* Update PSTATE.PAN bit */
157 	new_spsr |= old_spsr & SPSR_PAN_BIT;
158 	if (is_feat_pan_present() &&
159 	    ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
160 	    ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
161 	    new_spsr |= SPSR_PAN_BIT;
162 	}
163 
164 	/* Clear UAO bit if FEAT_UAO is present */
165 	new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
166 	if (is_feat_uao_present()) {
167 		new_spsr &= ~SPSR_UAO_BIT_AARCH64;
168 	}
169 
170 	/* DIT bits are unchanged */
171 	new_spsr |= old_spsr & SPSR_DIT_BIT;
172 
173 	/* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
174 	new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
175 	if (is_feat_mte2_present()) {
176 		new_spsr |= SPSR_TCO_BIT_AARCH64;
177 	}
178 
179 	/* NZCV bits are unchanged */
180 	new_spsr |= old_spsr & SPSR_NZCV;
181 
182 	/* If FEAT_EBEP is present set PM bit */
183 	new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
184 	if (is_feat_ebep_present()) {
185 		new_spsr |= SPSR_PM_BIT_AARCH64;
186 	}
187 
188 	/* If FEAT_SEBEP is present clear PPEND bit */
189 	new_spsr |= old_spsr & SPSR_PPEND_BIT;
190 	if (is_feat_sebep_present()) {
191 		new_spsr &= ~SPSR_PPEND_BIT;
192 	}
193 
194 	/* If FEAT_GCS is present, update EXLOCK bit */
195 	new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
196 	if (is_feat_gcs_present()) {
197 		u_register_t gcscr;
198 		if (target_el == MODE_EL2) {
199 			gcscr = read_gcscr_el2();
200 		} else {
201 			gcscr = read_gcscr_el1();
202 		}
203 		new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
204 	}
205 
206 	/* If FEAT_PAUTH_LR present then zero the PACM bit. */
207 	new_spsr |= old_spsr & SPSR_PACM_BIT_AARCH64;
208 	if (is_feat_pauth_lr_present()) {
209 		new_spsr &= ~SPSR_PACM_BIT_AARCH64;
210 	}
211 
212 	return new_spsr;
213 }
214 
215 /*
216  * Handler for injecting Undefined exception to lower EL which is caused by
217  * lower EL accessing system registers of which (old)EL3 firmware is unaware.
218  *
219  * This is a safety net to avoid EL3 panics caused by system register access
220  * that triggers an exception syndrome EC=0x18.
221  */
222 void inject_undef64(cpu_context_t *ctx)
223 {
224 	u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
225 	el3_state_t *state = get_el3state_ctx(ctx);
226 	u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
227 	u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
228 	u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
229 	u_register_t new_spsr = 0;
230 	unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3);
231 
232 	if (to_el == MODE_EL2) {
233 		write_elr_el2(elr_el3);
234 		elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
235 		write_esr_el2(esr);
236 		write_spsr_el2(old_spsr);
237 	} else {
238 		write_elr_el1(elr_el3);
239 		elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
240 		write_esr_el1(esr);
241 		write_spsr_el1(old_spsr);
242 	}
243 
244 	new_spsr = create_spsr(old_spsr, to_el);
245 
246 	write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
247 	write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
248 }
249