1 /*
2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <arch_features.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <bl31/sync_handle.h>
16 #include <bl31/ea_handle.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <context.h>
20 #include <lib/cpus/cpu_ops.h>
21 #include <plat/common/platform.h>
22
23 /*******************************************************************************
24 * The 'rt_svc_descs' array holds the runtime service descriptors exported by
25 * services by placing them in the 'rt_svc_descs' linker section.
26 * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
27 * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
28 * type[31] bit in the function id are combined to get an index into the
29 * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
30 * 'rt_svc_descs' array which contains the SMC handler.
31 ******************************************************************************/
32 uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
33
34 void __dead2 report_unhandled_exception(void);
35
36 #define RT_SVC_DECS_NUM ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
37 / sizeof(rt_svc_desc_t))
38
get_handler_for_smc_fid(uint32_t smc_fid,rt_svc_handle_t * handler)39 static bool get_handler_for_smc_fid(uint32_t smc_fid, rt_svc_handle_t *handler)
40 {
41 unsigned int index;
42 unsigned int idx;
43 const rt_svc_desc_t *rt_svc_descs;
44
45 idx = get_unique_oen_from_smc_fid(smc_fid);
46 assert(idx < MAX_RT_SVCS);
47 index = rt_svc_descs_indices[idx];
48
49 if (index >= RT_SVC_DECS_NUM)
50 return false;
51
52 rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
53 assert(handler != NULL);
54 *handler = rt_svc_descs[index].handle;
55 assert(*handler != NULL);
56
57 return true;
58 }
59
60 #if __aarch64__
61 #include <lib/extensions/ras_arch.h>
62
63 #if FFH_SUPPORT
ea_proceed(uint32_t ea_reason,u_register_t esr_el3,cpu_context_t * ctx)64 static void ea_proceed(uint32_t ea_reason, u_register_t esr_el3, cpu_context_t *ctx)
65 {
66 /*
67 * If it is a double fault invoke platform handler. Double fault
68 * scenario would arise when platform is handling a fault in lower EL
69 * using plat_ea_handler() and another fault happens which would trap
70 * into EL3 as FFH_SUPPORT is enabled for the platform.
71 */
72 el3_state_t *state = get_el3state_ctx(ctx);
73 if (read_ctx_reg(state, CTX_DOUBLE_FAULT_ESR) != 0) {
74 return plat_handle_double_fault(ea_reason, esr_el3);
75 }
76
77 /*
78 * Save CTX_DOUBLE_FAULT_ESR, so that if another fault happens in lower
79 * EL, we catch it as DoubleFault in next invocation of ea_proceed()
80 * along with preserving original ESR_EL3.
81 */
82 write_ctx_reg(state, CTX_DOUBLE_FAULT_ESR, esr_el3);
83
84 /* Call platform External Abort handler. */
85 plat_ea_handler(ea_reason, esr_el3, NULL, ctx, read_scr_el3() & SCR_NS_BIT);
86
87 /* Clear Double Fault storage */
88 write_ctx_reg(state, CTX_DOUBLE_FAULT_ESR, 0);
89 }
90
91 /*
92 * This function handles SErrors from lower ELs.
93 *
94 * It delegates the handling of the EA to platform handler, and upon
95 * successfully handling the EA, exits EL3
96 */
handler_lower_el_async_ea(cpu_context_t * ctx)97 void handler_lower_el_async_ea(cpu_context_t *ctx)
98 {
99 u_register_t esr_el3 = read_esr_el3();
100
101 if (is_feat_ras_supported()) {
102 /* should only be invoked for SError */
103 assert(EXTRACT(ESR_EC, esr_el3) == EC_SERROR);
104
105 /*
106 * Check for Implementation Defined Syndrome. If so, skip
107 * checking Uncontainable error type from the syndrome as the
108 * format is unknown.
109 */
110 if ((esr_el3 & SERROR_IDS_BIT) != 0) {
111 /* AET only valid when DFSC is 0x11. Route to platform fatal
112 * error handler if it is an uncontainable error type */
113 if (EXTRACT(EABORT_DFSC, esr_el3) == DFSC_SERROR &&
114 EXTRACT(EABORT_AET, esr_el3) == ERROR_STATUS_UET_UC) {
115 return plat_handle_uncontainable_ea();
116 }
117 }
118 }
119
120 return ea_proceed(ERROR_EA_ASYNC, esr_el3, ctx);
121 }
122
123 #endif /* FFH_SUPPORT */
124
125 /*
126 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
127 * interrupts.
128 */
handler_interrupt_exception(cpu_context_t * ctx)129 void handler_interrupt_exception(cpu_context_t *ctx)
130 {
131 /*
132 * Find out whether this is a valid interrupt type.
133 * If the interrupt controller reports a spurious interrupt then return
134 * to where we came from.
135 */
136 uint32_t type = plat_ic_get_pending_interrupt_type();
137 if (type == INTR_TYPE_INVAL) {
138 return;
139 }
140
141 /*
142 * Get the registered handler for this interrupt type.
143 * A NULL return value could be 'cause of the following conditions:
144 *
145 * a. An interrupt of a type was routed correctly but a handler for its
146 * type was not registered.
147 *
148 * b. An interrupt of a type was not routed correctly so a handler for
149 * its type was not registered.
150 *
151 * c. An interrupt of a type was routed correctly to EL3, but was
152 * deasserted before its pending state could be read. Another
153 * interrupt of a different type pended at the same time and its
154 * type was reported as pending instead. However, a handler for this
155 * type was not registered.
156 *
157 * a. and b. can only happen due to a programming error. The
158 * occurrence of c. could be beyond the control of Trusted Firmware.
159 * It makes sense to return from this exception instead of reporting an
160 * error.
161 */
162 interrupt_type_handler_t handler = get_interrupt_type_handler(type);
163 if (handler == NULL) {
164 return;
165 }
166
167 handler(INTR_ID_UNAVAILABLE, read_scr_el3() & SCR_NS_BIT, ctx, NULL);
168 }
169
smc_unknown(cpu_context_t * ctx)170 static void smc_unknown(cpu_context_t *ctx)
171 {
172 /*
173 * Unknown SMC call. Populate return value with SMC_UNK and call
174 * el3_exit() which will restore the remaining architectural state
175 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
176 * to the desired lower EL.
177 */
178 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, SMC_UNK);
179 }
180
get_flags(uint32_t smc_fid,u_register_t scr_el3)181 static u_register_t get_flags(uint32_t smc_fid, u_register_t scr_el3)
182 {
183 u_register_t flags = 0;
184
185 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
186 flags |= scr_el3 & SCR_NS_BIT;
187 #if ENABLE_RME
188 /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security Shift
189 * copied SCR_EL3.NSE bit by 5 to create space for SCR_EL3.NS bit. Bit 5
190 * of the flag corresponds to the SCR_EL3.NSE bit.
191 */
192 flags |= ((scr_el3 & SCR_NSE_BIT) >> SCR_NSE_SHIFT) << 5;
193 #endif /* ENABLE_RME */
194
195 /*
196 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID passed
197 * through x0. Copy the SVE hint bit to flags and mask the bit in
198 * smc_fid passed to the standard service dispatcher. A
199 * service/dispatcher can retrieve the SVE hint bit state from flags
200 * using the appropriate helper.
201 */
202 flags |= smc_fid & MASK(FUNCID_SVE_HINT);
203
204 return flags;
205 }
206
sync_handler(cpu_context_t * ctx,uint32_t smc_fid)207 static void sync_handler(cpu_context_t *ctx, uint32_t smc_fid)
208 {
209 u_register_t scr_el3 = read_scr_el3();
210 rt_svc_handle_t handler;
211
212 /*
213 * Per SMCCC documentation, bits [23:17] must be zero for Fast SMCs.
214 * Other values are reserved for future use. Ensure that these bits are
215 * zeroes, if not report as unknown SMC.
216 */
217 if (EXTRACT(FUNCID_TYPE, smc_fid) == SMC_TYPE_FAST &&
218 EXTRACT(FUNCID_FC_RESERVED, smc_fid) != 0) {
219 return smc_unknown(ctx);
220 }
221
222 smc_fid &= ~MASK(FUNCID_SVE_HINT);
223
224 /* Get the descriptor using the index */
225 if (!get_handler_for_smc_fid(smc_fid, &handler)) {
226 return smc_unknown(ctx);
227 }
228
229 u_register_t x1, x2, x3, x4;
230 get_smc_params_from_ctx(ctx, x1, x2, x3, x4);
231 handler(smc_fid, x1, x2, x3, x4, NULL, ctx, get_flags(smc_fid, scr_el3));
232 }
233
handler_sync_exception(cpu_context_t * ctx)234 void handler_sync_exception(cpu_context_t *ctx)
235 {
236 uint32_t smc_fid = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0);
237 u_register_t esr_el3 = read_esr_el3();
238 u_register_t exc_class = EXTRACT(ESR_EC, esr_el3);
239 el3_state_t *state = get_el3state_ctx(ctx);
240
241 if (exc_class == EC_AARCH32_SMC || exc_class == EC_AARCH64_SMC) {
242 if (exc_class == EC_AARCH32_SMC && EXTRACT(FUNCID_CC, smc_fid) != 0) {
243 return smc_unknown(ctx);
244 }
245 return sync_handler(ctx, smc_fid);
246 } else if (exc_class == EC_AARCH64_SYS) {
247 int ret = handle_sysreg_trap(esr_el3, ctx, get_flags(smc_fid, read_scr_el3()));
248
249 /* unhandled trap, UNDEF injection into lower. The support is
250 * only provided for lower EL in AArch64 mode. */
251 if (ret == TRAP_RET_UNHANDLED) {
252 if (read_spsr_el3() & MASK(SPSR_M)) {
253 ERROR("Trapped an instruction from AArch32 %s mode\n",
254 get_mode_str((unsigned int)GET_M32(read_spsr_el3())));
255 ERROR("at address 0x%lx, reason 0x%lx\n", read_elr_el3(), read_esr_el3());
256 panic();
257 }
258 inject_undef64(ctx);
259 } else if (ret == TRAP_RET_CONTINUE) {
260 /* advance the PC to continue after the instruction */
261 write_ctx_reg(state, CTX_ELR_EL3, read_ctx_reg(state, CTX_ELR_EL3) + 4);
262 } /* otherwise return to the trapping instruction (repeating it) */
263 return;
264 /* If FFH Support then try to handle lower EL EA exceptions. */
265 } else if ((exc_class == EC_IABORT_LOWER_EL || exc_class == EC_DABORT_LOWER_EL)
266 && ((read_ctx_reg(state, CTX_SCR_EL3) & SCR_EA_BIT) != 0UL)) {
267 #if FFH_SUPPORT
268 /*
269 * Check for Uncontainable error type. If so, route to the
270 * platform fatal error handler rather than the generic EA one.
271 */
272 if (is_feat_ras_supported() &&
273 (EXTRACT(EABORT_SET, esr_el3) == ERROR_STATUS_SET_UC ||
274 EXTRACT(EABORT_DFSC, esr_el3) == SYNC_EA_FSC)) {
275 return plat_handle_uncontainable_ea();
276 }
277 /* Setup exception class and syndrome arguments for platform handler */
278 return ea_proceed(ERROR_EA_SYNC, esr_el3, ctx);
279 #endif /* FFH_SUPPORT */
280 }
281
282 /* Synchronous exceptions other than the above are unhandled */
283 report_unhandled_exception();
284 }
285 #endif /* __aarch64__ */
286
287 /*******************************************************************************
288 * Function to invoke the registered `handle` corresponding to the smc_fid in
289 * AArch32 mode.
290 ******************************************************************************/
handle_runtime_svc(uint32_t smc_fid,void * cookie,void * handle,unsigned int flags)291 uintptr_t handle_runtime_svc(uint32_t smc_fid,
292 void *cookie,
293 void *handle,
294 unsigned int flags)
295 {
296 u_register_t x1, x2, x3, x4;
297 rt_svc_handle_t handler;
298
299 assert(handle != NULL);
300
301 if (!get_handler_for_smc_fid(smc_fid, &handler)) {
302 SMC_RET1(handle, SMC_UNK);
303 }
304
305 get_smc_params_from_ctx(handle, x1, x2, x3, x4);
306
307 return handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
308 }
309
310 /*******************************************************************************
311 * Simple routine to sanity check a runtime service descriptor before using it
312 ******************************************************************************/
validate_rt_svc_desc(const rt_svc_desc_t * desc)313 static int32_t validate_rt_svc_desc(const rt_svc_desc_t *desc)
314 {
315 if (desc == NULL) {
316 return -EINVAL;
317 }
318 if (desc->start_oen > desc->end_oen) {
319 return -EINVAL;
320 }
321 if (desc->end_oen >= OEN_LIMIT) {
322 return -EINVAL;
323 }
324 if ((desc->call_type != SMC_TYPE_FAST) &&
325 (desc->call_type != SMC_TYPE_YIELD)) {
326 return -EINVAL;
327 }
328 /* A runtime service having no init or handle function doesn't make sense */
329 if ((desc->init == NULL) && (desc->handle == NULL)) {
330 return -EINVAL;
331 }
332 return 0;
333 }
334
335 /*******************************************************************************
336 * This function calls the initialisation routine in the descriptor exported by
337 * a runtime service. Once a descriptor has been validated, its start & end
338 * owning entity numbers and the call type are combined to form a unique oen.
339 * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
340 * The index of the runtime service descriptor is stored at this index.
341 ******************************************************************************/
runtime_svc_init(void)342 void __init runtime_svc_init(void)
343 {
344 int rc = 0;
345 uint8_t index, start_idx, end_idx;
346 rt_svc_desc_t *rt_svc_descs;
347
348 /* Assert the number of descriptors detected are less than maximum indices */
349 assert((RT_SVC_DESCS_END >= RT_SVC_DESCS_START) &&
350 (RT_SVC_DECS_NUM < MAX_RT_SVCS));
351
352 /* If no runtime services are implemented then simply bail out */
353 if (RT_SVC_DECS_NUM == 0U) {
354 return;
355 }
356 /* Initialise internal variables to invalid state */
357 (void)memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
358
359 rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
360 for (index = 0U; index < RT_SVC_DECS_NUM; index++) {
361 rt_svc_desc_t *service = &rt_svc_descs[index];
362
363 /*
364 * An invalid descriptor is an error condition since it is
365 * difficult to predict the system behaviour in the absence
366 * of this service.
367 */
368 rc = validate_rt_svc_desc(service);
369 if (rc != 0) {
370 ERROR("Invalid runtime service descriptor %p\n",
371 (void *) service);
372 panic();
373 }
374
375 /*
376 * The runtime service may have separate rt_svc_desc_t
377 * for its fast smc and yielding smc. Since the service itself
378 * need to be initialized only once, only one of them will have
379 * an initialisation routine defined. Call the initialisation
380 * routine for this runtime service, if it is defined.
381 */
382 if (service->init != NULL) {
383 rc = service->init();
384 if (rc != 0) {
385 ERROR("Error initializing runtime service %s\n",
386 service->name);
387 continue;
388 }
389 }
390
391 /*
392 * Fill the indices corresponding to the start and end
393 * owning entity numbers with the index of the
394 * descriptor which will handle the SMCs for this owning
395 * entity range.
396 */
397 start_idx = (uint8_t)get_unique_oen(service->start_oen,
398 service->call_type);
399 end_idx = (uint8_t)get_unique_oen(service->end_oen,
400 service->call_type);
401 assert(start_idx <= end_idx);
402 assert(end_idx < MAX_RT_SVCS);
403 for (; start_idx <= end_idx; start_idx++) {
404 rt_svc_descs_indices[start_idx] = index;
405 }
406 }
407 }
408