1 /*
2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <arch_features.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <bl31/sync_handle.h>
16 #include <bl31/ea_handle.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <context.h>
20 #include <lib/cpus/cpu_ops.h>
21 #include <plat/common/platform.h>
22
23 /*******************************************************************************
24 * The 'rt_svc_descs' array holds the runtime service descriptors exported by
25 * services by placing them in the 'rt_svc_descs' linker section.
26 * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
27 * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
28 * type[31] bit in the function id are combined to get an index into the
29 * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
30 * 'rt_svc_descs' array which contains the SMC handler.
31 ******************************************************************************/
32 uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
33
34 void __dead2 report_unhandled_exception(void);
35
36 #define RT_SVC_DECS_NUM ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
37 / sizeof(rt_svc_desc_t))
38
get_handler_for_smc_fid(uint32_t smc_fid,rt_svc_handle_t * handler)39 static bool get_handler_for_smc_fid(uint32_t smc_fid, rt_svc_handle_t *handler)
40 {
41 unsigned int index;
42 unsigned int idx;
43 const rt_svc_desc_t *rt_svc_descs;
44
45 idx = get_unique_oen_from_smc_fid(smc_fid);
46 assert(idx < MAX_RT_SVCS);
47 index = rt_svc_descs_indices[idx];
48
49 if (index >= RT_SVC_DECS_NUM)
50 return false;
51
52 rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
53 assert(handler != NULL);
54 *handler = rt_svc_descs[index].handle;
55 assert(*handler != NULL);
56
57 return true;
58 }
59
60 #if __aarch64__
61 #include <lib/extensions/ras_arch.h>
62
63 #if FFH_SUPPORT
ea_proceed(uint32_t ea_reason,u_register_t esr_el3,u_register_t scr_el3,cpu_context_t * ctx)64 static void ea_proceed(uint32_t ea_reason, u_register_t esr_el3, u_register_t scr_el3, cpu_context_t *ctx)
65 {
66 /*
67 * If it is a double fault invoke platform handler. Double fault
68 * scenario would arise when platform is handling a fault in lower EL
69 * using plat_ea_handler() and another fault happens which would trap
70 * into EL3 as FFH_SUPPORT is enabled for the platform.
71 */
72 el3_state_t *state = get_el3state_ctx(ctx);
73 if (read_ctx_reg(state, CTX_DOUBLE_FAULT_ESR) != 0) {
74 return plat_handle_double_fault(ea_reason, esr_el3);
75 }
76
77 /*
78 * Save CTX_DOUBLE_FAULT_ESR, so that if another fault happens in lower
79 * EL, we catch it as DoubleFault in next invocation of ea_proceed()
80 * along with preserving original ESR_EL3.
81 */
82 write_ctx_reg(state, CTX_DOUBLE_FAULT_ESR, esr_el3);
83
84 /* Call platform External Abort handler. */
85 plat_ea_handler(ea_reason, esr_el3, NULL, ctx, scr_el3 & SCR_NS_BIT);
86
87 /* Clear Double Fault storage */
88 write_ctx_reg(state, CTX_DOUBLE_FAULT_ESR, 0);
89 }
90
91 /*
92 * This function handles SErrors from lower ELs.
93 *
94 * It delegates the handling of the EA to platform handler, and upon
95 * successfully handling the EA, exits EL3
96 */
handler_lower_el_async_ea(cpu_context_t * ctx)97 void handler_lower_el_async_ea(cpu_context_t *ctx)
98 {
99 el3_state_t *state = get_el3state_ctx(ctx);
100 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
101 u_register_t esr_el3 = read_esr_el3();
102
103 if (is_feat_ras_supported()) {
104 /* should only be invoked for SError */
105 assert(EXTRACT(ESR_EC, esr_el3) == EC_SERROR);
106
107 /*
108 * Check for Implementation Defined Syndrome. If so, skip
109 * checking Uncontainable error type from the syndrome as the
110 * format is unknown.
111 */
112 if ((esr_el3 & SERROR_IDS_BIT) != 0) {
113 /* AET only valid when DFSC is 0x11. Route to platform fatal
114 * error handler if it is an uncontainable error type */
115 if (EXTRACT(EABORT_DFSC, esr_el3) == DFSC_SERROR &&
116 EXTRACT(EABORT_AET, esr_el3) == ERROR_STATUS_UET_UC) {
117 return plat_handle_uncontainable_ea();
118 }
119 }
120 }
121
122 return ea_proceed(ERROR_EA_ASYNC, esr_el3, scr_el3, ctx);
123 }
124
125 #endif /* FFH_SUPPORT */
126
127 /*
128 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
129 * interrupts.
130 */
handler_interrupt_exception(cpu_context_t * ctx)131 void handler_interrupt_exception(cpu_context_t *ctx)
132 {
133 el3_state_t *state = get_el3state_ctx(ctx);
134 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
135
136 /*
137 * Find out whether this is a valid interrupt type.
138 * If the interrupt controller reports a spurious interrupt then return
139 * to where we came from.
140 */
141 uint32_t type = plat_ic_get_pending_interrupt_type();
142 if (type == INTR_TYPE_INVAL) {
143 return;
144 }
145
146 /*
147 * Get the registered handler for this interrupt type.
148 * A NULL return value could be 'cause of the following conditions:
149 *
150 * a. An interrupt of a type was routed correctly but a handler for its
151 * type was not registered.
152 *
153 * b. An interrupt of a type was not routed correctly so a handler for
154 * its type was not registered.
155 *
156 * c. An interrupt of a type was routed correctly to EL3, but was
157 * deasserted before its pending state could be read. Another
158 * interrupt of a different type pended at the same time and its
159 * type was reported as pending instead. However, a handler for this
160 * type was not registered.
161 *
162 * a. and b. can only happen due to a programming error. The
163 * occurrence of c. could be beyond the control of Trusted Firmware.
164 * It makes sense to return from this exception instead of reporting an
165 * error.
166 */
167 interrupt_type_handler_t handler = get_interrupt_type_handler(type);
168 if (handler == NULL) {
169 return;
170 }
171
172 handler(INTR_ID_UNAVAILABLE, scr_el3 & SCR_NS_BIT, ctx, NULL);
173 }
174
smc_unknown(cpu_context_t * ctx)175 static void smc_unknown(cpu_context_t *ctx)
176 {
177 /*
178 * Unknown SMC call. Populate return value with SMC_UNK and call
179 * el3_exit() which will restore the remaining architectural state
180 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
181 * to the desired lower EL.
182 */
183 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, SMC_UNK);
184 }
185
get_flags(uint32_t smc_fid,u_register_t scr_el3)186 static u_register_t get_flags(uint32_t smc_fid, u_register_t scr_el3)
187 {
188 u_register_t flags = 0;
189
190 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
191 flags |= scr_el3 & SCR_NS_BIT;
192 #if ENABLE_RMM
193 /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security Shift
194 * copied SCR_EL3.NSE bit by 5 to create space for SCR_EL3.NS bit. Bit 5
195 * of the flag corresponds to the SCR_EL3.NSE bit.
196 */
197 flags |= ((scr_el3 & SCR_NSE_BIT) >> SCR_NSE_SHIFT) << 5;
198 #endif /* ENABLE_RMM */
199
200 /*
201 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID passed
202 * through x0. Copy the SVE hint bit to flags and mask the bit in
203 * smc_fid passed to the standard service dispatcher. A
204 * service/dispatcher can retrieve the SVE hint bit state from flags
205 * using the appropriate helper.
206 */
207 flags |= smc_fid & MASK(FUNCID_SVE_HINT);
208
209 return flags;
210 }
211
sync_handler(cpu_context_t * ctx,uint32_t smc_fid,u_register_t scr_el3)212 static void sync_handler(cpu_context_t *ctx, uint32_t smc_fid, u_register_t scr_el3)
213 {
214 rt_svc_handle_t handler;
215
216 /*
217 * Per SMCCC documentation, bits [23:17] must be zero for Fast SMCs.
218 * Other values are reserved for future use. Ensure that these bits are
219 * zeroes, if not report as unknown SMC.
220 */
221 if (EXTRACT(FUNCID_TYPE, smc_fid) == SMC_TYPE_FAST &&
222 EXTRACT(FUNCID_FC_RESERVED, smc_fid) != 0) {
223 return smc_unknown(ctx);
224 }
225
226 smc_fid &= ~MASK(FUNCID_SVE_HINT);
227
228 /* Get the descriptor using the index */
229 if (!get_handler_for_smc_fid(smc_fid, &handler)) {
230 return smc_unknown(ctx);
231 }
232
233 u_register_t x1, x2, x3, x4;
234 get_smc_params_from_ctx(ctx, x1, x2, x3, x4);
235 handler(smc_fid, x1, x2, x3, x4, NULL, ctx, get_flags(smc_fid, scr_el3));
236 }
237
handler_sync_exception(cpu_context_t * ctx)238 void handler_sync_exception(cpu_context_t *ctx)
239 {
240 uint32_t smc_fid = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0);
241 u_register_t esr_el3 = read_esr_el3();
242 u_register_t exc_class = EXTRACT(ESR_EC, esr_el3);
243 el3_state_t *state = get_el3state_ctx(ctx);
244 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
245
246 if (exc_class == EC_AARCH32_SMC || exc_class == EC_AARCH64_SMC) {
247 if (exc_class == EC_AARCH32_SMC && EXTRACT(FUNCID_CC, smc_fid) != 0) {
248 return smc_unknown(ctx);
249 }
250 return sync_handler(ctx, smc_fid, scr_el3);
251 } else if (exc_class == EC_AARCH64_SYS) {
252 int ret = handle_sysreg_trap(esr_el3, ctx, get_flags(smc_fid, scr_el3));
253 if (ret == TRAP_RET_CONTINUE) {
254 /* advance the PC to continue after the instruction */
255 write_ctx_reg(state, CTX_ELR_EL3, read_ctx_reg(state, CTX_ELR_EL3) + 4);
256 return;
257 } else if (ret == TRAP_RET_REPEAT) {
258 /* continue at the same instruction */
259 return;
260 }
261 #if FFH_SUPPORT
262 /* If FFH Support then try to handle lower EL EA exceptions. */
263 } else if ((exc_class == EC_IABORT_LOWER_EL || exc_class == EC_DABORT_LOWER_EL)
264 && ((scr_el3 & SCR_EA_BIT) != 0UL)) {
265 /*
266 * Check for Uncontainable error type. If so, route to the
267 * platform fatal error handler rather than the generic EA one.
268 */
269 if (is_feat_ras_supported() &&
270 (EXTRACT(EABORT_SET, esr_el3) == ERROR_STATUS_SET_UC ||
271 EXTRACT(EABORT_DFSC, esr_el3) == SYNC_EA_FSC)) {
272 return plat_handle_uncontainable_ea();
273 }
274 /* Setup exception class and syndrome arguments for platform handler */
275 return ea_proceed(ERROR_EA_SYNC, esr_el3, scr_el3, ctx);
276 #endif /* FFH_SUPPORT */
277 }
278
279 /* unhandled trap, UNDEF injection not provided for lower EL in AArch32 mode. */
280 if (read_spsr_el3() & MASK(SPSR_M)) {
281 ERROR("Trapped an instruction from AArch32 %s mode\n",
282 get_mode_str((unsigned int)GET_M32(read_spsr_el3())));
283 ERROR("at address 0x%lx, reason 0x%lx\n", read_elr_el3(), esr_el3);
284 report_unhandled_exception();
285 }
286
287 /*
288 * UNDEF injection by default for AArch64 on any otherwise unhandled
289 * trap. This is expected to include:
290 * 0b000111 - SME/SVE instructions and registers
291 * 0b001001 - PAUTH instructions
292 * 0b001010 - FEAT_LS64/catch all instructions
293 * 0b010100 - MSRR/MRSS
294 * 0b011000 - MSR/MRS
295 * 0b011001 - SVE instructions and registers
296 * 0b011101 - SME instructions and illegal ZA execution
297 * 0b101100 - floating point
298 *
299 * Exception syndromes not listed are either unreachable or the erronous
300 * UNDEF injection is accepted as a better alternative to a panic at EL3.
301 */
302 inject_undef64(ctx);
303 }
304 #endif /* __aarch64__ */
305
306 /*******************************************************************************
307 * Function to invoke the registered `handle` corresponding to the smc_fid in
308 * AArch32 mode.
309 ******************************************************************************/
handle_runtime_svc(uint32_t smc_fid,void * cookie,void * handle,unsigned int flags)310 uintptr_t handle_runtime_svc(uint32_t smc_fid,
311 void *cookie,
312 void *handle,
313 unsigned int flags)
314 {
315 u_register_t x1, x2, x3, x4;
316 rt_svc_handle_t handler;
317
318 assert(handle != NULL);
319
320 if (!get_handler_for_smc_fid(smc_fid, &handler)) {
321 SMC_RET1(handle, SMC_UNK);
322 }
323
324 get_smc_params_from_ctx(handle, x1, x2, x3, x4);
325
326 return handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
327 }
328
329 /*******************************************************************************
330 * Simple routine to sanity check a runtime service descriptor before using it
331 ******************************************************************************/
validate_rt_svc_desc(const rt_svc_desc_t * desc)332 static int32_t validate_rt_svc_desc(const rt_svc_desc_t *desc)
333 {
334 if (desc == NULL) {
335 return -EINVAL;
336 }
337 if (desc->start_oen > desc->end_oen) {
338 return -EINVAL;
339 }
340 if (desc->end_oen >= OEN_LIMIT) {
341 return -EINVAL;
342 }
343 if ((desc->call_type != SMC_TYPE_FAST) &&
344 (desc->call_type != SMC_TYPE_YIELD)) {
345 return -EINVAL;
346 }
347 /* A runtime service having no init or handle function doesn't make sense */
348 if ((desc->init == NULL) && (desc->handle == NULL)) {
349 return -EINVAL;
350 }
351 return 0;
352 }
353
354 /*******************************************************************************
355 * This function calls the initialisation routine in the descriptor exported by
356 * a runtime service. Once a descriptor has been validated, its start & end
357 * owning entity numbers and the call type are combined to form a unique oen.
358 * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
359 * The index of the runtime service descriptor is stored at this index.
360 ******************************************************************************/
runtime_svc_init(void)361 void __init runtime_svc_init(void)
362 {
363 int rc = 0;
364 uint8_t index, start_idx, end_idx;
365 rt_svc_desc_t *rt_svc_descs;
366
367 /* Assert the number of descriptors detected are less than maximum indices */
368 assert((RT_SVC_DESCS_END >= RT_SVC_DESCS_START) &&
369 (RT_SVC_DECS_NUM < MAX_RT_SVCS));
370
371 /* If no runtime services are implemented then simply bail out */
372 if (RT_SVC_DECS_NUM == 0U) {
373 return;
374 }
375 /* Initialise internal variables to invalid state */
376 (void)memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
377
378 rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
379 for (index = 0U; index < RT_SVC_DECS_NUM; index++) {
380 rt_svc_desc_t *service = &rt_svc_descs[index];
381
382 /*
383 * An invalid descriptor is an error condition since it is
384 * difficult to predict the system behaviour in the absence
385 * of this service.
386 */
387 rc = validate_rt_svc_desc(service);
388 if (rc != 0) {
389 ERROR("Invalid runtime service descriptor %p\n",
390 (void *) service);
391 panic();
392 }
393
394 /*
395 * The runtime service may have separate rt_svc_desc_t
396 * for its fast smc and yielding smc. Since the service itself
397 * need to be initialized only once, only one of them will have
398 * an initialisation routine defined. Call the initialisation
399 * routine for this runtime service, if it is defined.
400 */
401 if (service->init != NULL) {
402 rc = service->init();
403 if (rc != 0) {
404 ERROR("Error initializing runtime service %s\n",
405 service->name);
406 continue;
407 }
408 }
409
410 /*
411 * Fill the indices corresponding to the start and end
412 * owning entity numbers with the index of the
413 * descriptor which will handle the SMCs for this owning
414 * entity range.
415 */
416 start_idx = (uint8_t)get_unique_oen(service->start_oen,
417 service->call_type);
418 end_idx = (uint8_t)get_unique_oen(service->end_oen,
419 service->call_type);
420 assert(start_idx <= end_idx);
421 assert(end_idx < MAX_RT_SVCS);
422 for (; start_idx <= end_idx; start_idx++) {
423 rt_svc_descs_indices[start_idx] = index;
424 }
425 }
426 }
427