xref: /rk3399_ARM-atf/common/runtime_svc.c (revision 430f246e58d146949d399d72294f56403672bee0)
1 /*
2  * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10 
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <arch_features.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <bl31/sync_handle.h>
16 #include <bl31/ea_handle.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <context.h>
20 #include <lib/cpus/cpu_ops.h>
21 #include <plat/common/platform.h>
22 
23 /*******************************************************************************
24  * The 'rt_svc_descs' array holds the runtime service descriptors exported by
25  * services by placing them in the 'rt_svc_descs' linker section.
26  * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
27  * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
28  * type[31] bit in the function id are combined to get an index into the
29  * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
30  * 'rt_svc_descs' array which contains the SMC handler.
31  ******************************************************************************/
32 uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
33 
34 void __dead2 report_unhandled_exception(void);
35 
36 #define RT_SVC_DECS_NUM		((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
37 					/ sizeof(rt_svc_desc_t))
38 
get_handler_for_smc_fid(uint32_t smc_fid,rt_svc_handle_t * handler)39 static bool get_handler_for_smc_fid(uint32_t smc_fid, rt_svc_handle_t *handler)
40 {
41 	unsigned int index;
42 	unsigned int idx;
43 	const rt_svc_desc_t *rt_svc_descs;
44 
45 	idx = get_unique_oen_from_smc_fid(smc_fid);
46 	assert(idx < MAX_RT_SVCS);
47 	index = rt_svc_descs_indices[idx];
48 
49 	if (index >= RT_SVC_DECS_NUM)
50 		return false;
51 
52 	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
53 	assert(handler != NULL);
54 	*handler = rt_svc_descs[index].handle;
55 	assert(*handler != NULL);
56 
57 	return true;
58 }
59 
60 #if __aarch64__
61 #include <lib/extensions/ras_arch.h>
62 
63 #if FFH_SUPPORT
ea_proceed(uint32_t ea_reason,u_register_t esr_el3,cpu_context_t * ctx)64 static void ea_proceed(uint32_t ea_reason, u_register_t esr_el3, cpu_context_t *ctx)
65 {
66 	/*
67 	 * If it is a double fault invoke platform handler.  Double fault
68 	 * scenario would arise when platform is handling a fault in lower EL
69 	 * using plat_ea_handler() and another fault happens which would trap
70 	 * into EL3 as FFH_SUPPORT is enabled for the platform.
71 	 */
72 	el3_state_t *state = get_el3state_ctx(ctx);
73 	if (read_ctx_reg(state, CTX_DOUBLE_FAULT_ESR) != 0) {
74 		return plat_handle_double_fault(ea_reason, esr_el3);
75 	}
76 
77 	/*
78 	 * Save CTX_DOUBLE_FAULT_ESR, so that if another fault happens in lower
79 	 * EL, we catch it as DoubleFault in next invocation of ea_proceed()
80 	 * along with preserving original ESR_EL3.
81 	 */
82 	write_ctx_reg(state, CTX_DOUBLE_FAULT_ESR, esr_el3);
83 
84 	/* Call platform External Abort handler. */
85 	plat_ea_handler(ea_reason, esr_el3, NULL, ctx, read_scr_el3() & SCR_NS_BIT);
86 
87 	/* Clear Double Fault storage */
88 	write_ctx_reg(state, CTX_DOUBLE_FAULT_ESR, 0);
89 }
90 
91 /*
92  * This function handles SErrors from lower ELs.
93  *
94  * It delegates the handling of the EA to platform handler, and upon
95  * successfully handling the EA, exits EL3
96  */
handler_lower_el_async_ea(cpu_context_t * ctx)97 void handler_lower_el_async_ea(cpu_context_t *ctx)
98 {
99 	u_register_t esr_el3 = read_esr_el3();
100 
101 	if (is_feat_ras_supported()) {
102 		/*  should only be invoked for SError */
103 		assert(EXTRACT(ESR_EC, esr_el3) == EC_SERROR);
104 
105 		/*
106 		 * Check for Implementation Defined Syndrome. If so, skip
107 		 * checking Uncontainable error type from the syndrome as the
108 		 * format is unknown.
109 		 */
110 		if ((esr_el3 & SERROR_IDS_BIT) != 0) {
111 			/* AET only valid when DFSC is 0x11. Route to platform fatal
112 			 * error handler if it is an uncontainable error type */
113 			if (EXTRACT(EABORT_DFSC, esr_el3) == DFSC_SERROR &&
114 			    EXTRACT(EABORT_AET, esr_el3) == ERROR_STATUS_UET_UC) {
115 				return plat_handle_uncontainable_ea();
116 			}
117 		}
118 	}
119 
120 	return ea_proceed(ERROR_EA_ASYNC, esr_el3, ctx);
121 }
122 
123 #endif /* FFH_SUPPORT */
124 
125 /*
126  * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
127  * interrupts.
128  */
handler_interrupt_exception(cpu_context_t * ctx)129 void handler_interrupt_exception(cpu_context_t *ctx)
130 {
131 	/*
132 	 * Find out whether this is a valid interrupt type.
133 	 * If the interrupt controller reports a spurious interrupt then return
134 	 * to where we came from.
135 	 */
136 	uint32_t type = plat_ic_get_pending_interrupt_type();
137 	if (type == INTR_TYPE_INVAL) {
138 		return;
139 	}
140 
141 	/*
142 	 * Get the registered handler for this interrupt type.
143 	 * A NULL return value could be 'cause of the following conditions:
144 	 *
145 	 * a. An interrupt of a type was routed correctly but a handler for its
146 	 *    type was not registered.
147 	 *
148 	 * b. An interrupt of a type was not routed correctly so a handler for
149 	 *    its type was not registered.
150 	 *
151 	 * c. An interrupt of a type was routed correctly to EL3, but was
152 	 *    deasserted before its pending state could be read. Another
153 	 *    interrupt of a different type pended at the same time and its
154 	 *    type was reported as pending instead. However, a handler for this
155 	 *    type was not registered.
156 	 *
157 	 * a. and b. can only happen due to a programming error. The
158 	 * occurrence of c. could be beyond the control of Trusted Firmware.
159 	 * It makes sense to return from this exception instead of reporting an
160 	 * error.
161 	 */
162 	interrupt_type_handler_t handler = get_interrupt_type_handler(type);
163 	if (handler == NULL) {
164 		return;
165 	}
166 
167 	handler(INTR_ID_UNAVAILABLE, read_scr_el3() & SCR_NS_BIT, ctx, NULL);
168 }
169 
smc_unknown(cpu_context_t * ctx)170 static void smc_unknown(cpu_context_t *ctx)
171 {
172 	/*
173 	 * Unknown SMC call. Populate return value with SMC_UNK and call
174 	 * el3_exit() which will restore the remaining architectural state
175 	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
176 	 * to the desired lower EL.
177 	 */
178 	write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, SMC_UNK);
179 }
180 
get_flags(uint32_t smc_fid,u_register_t scr_el3)181 static u_register_t get_flags(uint32_t smc_fid, u_register_t scr_el3)
182 {
183 	u_register_t flags = 0;
184 
185 	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
186 	flags |= scr_el3 & SCR_NS_BIT;
187 #if ENABLE_RMM
188 	/* Copy SCR_EL3.NSE bit to the flag to indicate caller's security Shift
189 	 * copied SCR_EL3.NSE bit by 5 to create space for SCR_EL3.NS bit. Bit 5
190 	 * of the flag corresponds to the SCR_EL3.NSE bit.
191 	 */
192 	flags |= ((scr_el3 & SCR_NSE_BIT) >> SCR_NSE_SHIFT) << 5;
193 #endif /* ENABLE_RMM */
194 
195 	/*
196 	 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID passed
197 	 * through x0. Copy the SVE hint bit to flags and mask the bit in
198 	 * smc_fid passed to the standard service dispatcher.  A
199 	 * service/dispatcher can retrieve the SVE hint bit state from flags
200 	 * using the appropriate helper.
201 	 */
202 	flags |= smc_fid & MASK(FUNCID_SVE_HINT);
203 
204 	return flags;
205 }
206 
sync_handler(cpu_context_t * ctx,uint32_t smc_fid)207 static void sync_handler(cpu_context_t *ctx, uint32_t smc_fid)
208 {
209 	u_register_t scr_el3 = read_scr_el3();
210 	rt_svc_handle_t handler;
211 
212 	/*
213 	 * Per SMCCC documentation, bits [23:17] must be zero for Fast SMCs.
214 	 * Other values are reserved for future use. Ensure that these bits are
215 	 * zeroes, if not report as unknown SMC.
216 	 */
217 	if (EXTRACT(FUNCID_TYPE, smc_fid) == SMC_TYPE_FAST &&
218 	    EXTRACT(FUNCID_FC_RESERVED, smc_fid) != 0) {
219 		return smc_unknown(ctx);
220 	}
221 
222 	smc_fid &= ~MASK(FUNCID_SVE_HINT);
223 
224 	/* Get the descriptor using the index */
225 	if (!get_handler_for_smc_fid(smc_fid, &handler)) {
226 		return smc_unknown(ctx);
227 	}
228 
229 	u_register_t x1, x2, x3, x4;
230 	get_smc_params_from_ctx(ctx, x1, x2, x3, x4);
231 	handler(smc_fid, x1, x2, x3, x4, NULL, ctx, get_flags(smc_fid, scr_el3));
232 }
233 
handler_sync_exception(cpu_context_t * ctx)234 void handler_sync_exception(cpu_context_t *ctx)
235 {
236 	uint32_t smc_fid = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0);
237 	u_register_t esr_el3 = read_esr_el3();
238 	u_register_t exc_class = EXTRACT(ESR_EC, esr_el3);
239 	el3_state_t *state = get_el3state_ctx(ctx);
240 
241 	if (exc_class == EC_AARCH32_SMC || exc_class == EC_AARCH64_SMC) {
242 		if (exc_class == EC_AARCH32_SMC && EXTRACT(FUNCID_CC, smc_fid) != 0) {
243 			return smc_unknown(ctx);
244 		}
245 		return sync_handler(ctx, smc_fid);
246 	} else if (exc_class == EC_AARCH64_SYS) {
247 		int ret = handle_sysreg_trap(esr_el3, ctx, get_flags(smc_fid, read_scr_el3()));
248 		if (ret == TRAP_RET_CONTINUE) {
249 			/* advance the PC to continue after the instruction */
250 			write_ctx_reg(state, CTX_ELR_EL3, read_ctx_reg(state, CTX_ELR_EL3) + 4);
251 			return;
252 		} else if (ret == TRAP_RET_REPEAT) {
253 			/* continue at the same instruction */
254 			return;
255 		}
256 #if FFH_SUPPORT
257 	/* If FFH Support then try to handle lower EL EA exceptions. */
258 	} else if ((exc_class == EC_IABORT_LOWER_EL || exc_class == EC_DABORT_LOWER_EL)
259 		    && ((read_ctx_reg(state, CTX_SCR_EL3) & SCR_EA_BIT) != 0UL)) {
260 		/*
261 		 * Check for Uncontainable error type. If so, route to the
262 		 * platform fatal error handler rather than the generic EA one.
263 		 */
264 		if (is_feat_ras_supported() &&
265 		    (EXTRACT(EABORT_SET, esr_el3) == ERROR_STATUS_SET_UC ||
266 		     EXTRACT(EABORT_DFSC, esr_el3) == SYNC_EA_FSC)) {
267 			return plat_handle_uncontainable_ea();
268 		}
269 		/* Setup exception class and syndrome arguments for platform handler */
270 		return ea_proceed(ERROR_EA_SYNC, esr_el3, ctx);
271 #endif /* FFH_SUPPORT */
272 	}
273 
274 	/* unhandled trap, UNDEF injection not provided for lower EL in AArch32 mode. */
275 	if (read_spsr_el3() & MASK(SPSR_M)) {
276 		ERROR("Trapped an instruction from AArch32 %s mode\n",
277 		      get_mode_str((unsigned int)GET_M32(read_spsr_el3())));
278 		ERROR("at address 0x%lx, reason 0x%lx\n", read_elr_el3(), esr_el3);
279 		report_unhandled_exception();
280 	}
281 
282 	/*
283 	 * UNDEF injection by default for AArch64 on any otherwise unhandled
284 	 * trap. This is expected to include:
285 	 *   0b000111 - SME/SVE instructions and registers
286 	 *   0b001001 - PAUTH instructions
287 	 *   0b001010 - FEAT_LS64/catch all instructions
288 	 *   0b010100 - MSRR/MRSS
289 	 *   0b011000 - MSR/MRS
290 	 *   0b011001 - SVE instructions and registers
291 	 *   0b011101 - SME instructions and illegal ZA execution
292 	 *   0b101100 - floating point
293 	 *
294 	 * Exception syndromes not listed are either unreachable or the erronous
295 	 * UNDEF injection is accepted as a better alternative to a panic at EL3.
296 	 */
297 	inject_undef64(ctx);
298 }
299 #endif /* __aarch64__ */
300 
301 /*******************************************************************************
302  * Function to invoke the registered `handle` corresponding to the smc_fid in
303  * AArch32 mode.
304  ******************************************************************************/
handle_runtime_svc(uint32_t smc_fid,void * cookie,void * handle,unsigned int flags)305 uintptr_t handle_runtime_svc(uint32_t smc_fid,
306 			     void *cookie,
307 			     void *handle,
308 			     unsigned int flags)
309 {
310 	u_register_t x1, x2, x3, x4;
311 	rt_svc_handle_t handler;
312 
313 	assert(handle != NULL);
314 
315 	if (!get_handler_for_smc_fid(smc_fid, &handler)) {
316 		SMC_RET1(handle, SMC_UNK);
317 	}
318 
319 	get_smc_params_from_ctx(handle, x1, x2, x3, x4);
320 
321 	return handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
322 }
323 
324 /*******************************************************************************
325  * Simple routine to sanity check a runtime service descriptor before using it
326  ******************************************************************************/
validate_rt_svc_desc(const rt_svc_desc_t * desc)327 static int32_t validate_rt_svc_desc(const rt_svc_desc_t *desc)
328 {
329 	if (desc == NULL) {
330 		return -EINVAL;
331 	}
332 	if (desc->start_oen > desc->end_oen) {
333 		return -EINVAL;
334 	}
335 	if (desc->end_oen >= OEN_LIMIT) {
336 		return -EINVAL;
337 	}
338 	if ((desc->call_type != SMC_TYPE_FAST) &&
339 	    (desc->call_type != SMC_TYPE_YIELD)) {
340 		return -EINVAL;
341 	}
342 	/* A runtime service having no init or handle function doesn't make sense */
343 	if ((desc->init == NULL) && (desc->handle == NULL)) {
344 		return -EINVAL;
345 	}
346 	return 0;
347 }
348 
349 /*******************************************************************************
350  * This function calls the initialisation routine in the descriptor exported by
351  * a runtime service. Once a descriptor has been validated, its start & end
352  * owning entity numbers and the call type are combined to form a unique oen.
353  * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
354  * The index of the runtime service descriptor is stored at this index.
355  ******************************************************************************/
runtime_svc_init(void)356 void __init runtime_svc_init(void)
357 {
358 	int rc = 0;
359 	uint8_t index, start_idx, end_idx;
360 	rt_svc_desc_t *rt_svc_descs;
361 
362 	/* Assert the number of descriptors detected are less than maximum indices */
363 	assert((RT_SVC_DESCS_END >= RT_SVC_DESCS_START) &&
364 			(RT_SVC_DECS_NUM < MAX_RT_SVCS));
365 
366 	/* If no runtime services are implemented then simply bail out */
367 	if (RT_SVC_DECS_NUM == 0U) {
368 		return;
369 	}
370 	/* Initialise internal variables to invalid state */
371 	(void)memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
372 
373 	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
374 	for (index = 0U; index < RT_SVC_DECS_NUM; index++) {
375 		rt_svc_desc_t *service = &rt_svc_descs[index];
376 
377 		/*
378 		 * An invalid descriptor is an error condition since it is
379 		 * difficult to predict the system behaviour in the absence
380 		 * of this service.
381 		 */
382 		rc = validate_rt_svc_desc(service);
383 		if (rc != 0) {
384 			ERROR("Invalid runtime service descriptor %p\n",
385 				(void *) service);
386 			panic();
387 		}
388 
389 		/*
390 		 * The runtime service may have separate rt_svc_desc_t
391 		 * for its fast smc and yielding smc. Since the service itself
392 		 * need to be initialized only once, only one of them will have
393 		 * an initialisation routine defined. Call the initialisation
394 		 * routine for this runtime service, if it is defined.
395 		 */
396 		if (service->init != NULL) {
397 			rc = service->init();
398 			if (rc != 0) {
399 				ERROR("Error initializing runtime service %s\n",
400 						service->name);
401 				continue;
402 			}
403 		}
404 
405 		/*
406 		 * Fill the indices corresponding to the start and end
407 		 * owning entity numbers with the index of the
408 		 * descriptor which will handle the SMCs for this owning
409 		 * entity range.
410 		 */
411 		start_idx = (uint8_t)get_unique_oen(service->start_oen,
412 						    service->call_type);
413 		end_idx = (uint8_t)get_unique_oen(service->end_oen,
414 						  service->call_type);
415 		assert(start_idx <= end_idx);
416 		assert(end_idx < MAX_RT_SVCS);
417 		for (; start_idx <= end_idx; start_idx++) {
418 			rt_svc_descs_indices[start_idx] = index;
419 		}
420 	}
421 }
422