xref: /rk3399_ARM-atf/bl31/aarch64/runtime_exceptions.S (revision a4defaefe65379554f464e9cf2b4f4d9818740aa)
1/*
2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl31/ea_handle.h>
12#include <bl31/interrupt_mgmt.h>
13#include <bl31/sync_handle.h>
14#include <common/runtime_svc.h>
15#include <context.h>
16#include <cpu_macros.S>
17#include <el3_common_macros.S>
18#include <lib/el3_runtime/cpu_data.h>
19#include <lib/smccc.h>
20
21	.globl	runtime_exceptions
22
23	.globl	sync_exception_sp_el0
24	.globl	irq_sp_el0
25	.globl	fiq_sp_el0
26	.globl	serror_sp_el0
27
28	.globl	sync_exception_sp_elx
29	.globl	irq_sp_elx
30	.globl	fiq_sp_elx
31	.globl	serror_sp_elx
32
33	.globl	sync_exception_aarch64
34	.globl	irq_aarch64
35	.globl	fiq_aarch64
36	.globl	serror_aarch64
37
38	.globl	sync_exception_aarch32
39	.globl	irq_aarch32
40	.globl	fiq_aarch32
41	.globl	serror_aarch32
42
43	/*
44	 * Save LR and make x30 available as most of the routines in vector entry
45	 * need a free register
46	 */
47	.macro save_x30
48	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
49	.endm
50
51	.macro restore_x30
52	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
53	.endm
54
55	/*
56	 * Macro that synchronizes errors (EA) and checks for pending SError.
57	 * On detecting a pending SError it either reflects it back to lower
58	 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
59	 */
60	.macro	sync_and_handle_pending_serror
61	synchronize_errors
62	mrs	x30, ISR_EL1
63	tbz	x30, #ISR_A_SHIFT, 2f
64#if FFH_SUPPORT
65	mrs	x30, scr_el3
66	tst	x30, #SCR_EA_BIT
67	b.eq	1f
68	bl	handle_pending_async_ea
69	b	2f
70#endif
711:
72	/* This function never returns, but need LR for decision making */
73	bl	reflect_pending_async_ea_to_lower_el
742:
75	.endm
76
77	/* ---------------------------------------------------------------------
78	 * This macro handles Synchronous exceptions.
79	 * Only SMC exceptions are supported.
80	 * ---------------------------------------------------------------------
81	 */
82	.macro	handle_sync_exception
83#if ENABLE_RUNTIME_INSTRUMENTATION
84	/*
85	 * Read the timestamp value and store it in per-cpu data. The value
86	 * will be extracted from per-cpu data by the C level SMC handler and
87	 * saved to the PMF timestamp region.
88	 */
89	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
90	per_cpu_cur percpu_data, x29, x30
91	mrs	x30, cntpct_el0
92	str	x30, [x29, #CPU_DATA_CPU_DATA_PMF_TS]
93	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
94#endif
95
96	mrs	x30, esr_el3
97
98	/* fast paths that have a minimal environment */
99	cmp	x30, #EC_IMP_DEF_EL3
100	b.eq	imp_def_el3_handler
101
102	/* setup the full environment */
103	bl	prepare_el3_entry
104
105#if ENABLE_ASSERTIONS
106	/* save the stack pointer */
107	mov	x28, sp
108#endif
109
110	bl	handler_sync_exception
111
112	/* validate the stack has been completely unwound */
113#if ENABLE_ASSERTIONS
114	mov	x27, sp
115	cmp	x28, x27
116	ASM_ASSERT(eq)
117#endif
118
119	no_ret	el3_exit
120	.endm
121
122.macro handle_lower_el_async_ea
123	bl	prepare_el3_entry
124
125	bl	handler_lower_el_async_ea
126
127	no_ret	el3_exit
128.endm
129
130	/* ---------------------------------------------------------------------
131	 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
132	 * interrupts.
133	 * ---------------------------------------------------------------------
134	 */
135.macro handle_interrupt_exception
136	bl	prepare_el3_entry
137
138	bl	handler_interrupt_exception
139
140	/* Return from exception, possibly in a different security state */
141	no_ret	el3_exit
142.endm
143
144vector_base runtime_exceptions
145
146	/* ---------------------------------------------------------------------
147	 * Current EL with SP_EL0 : 0x0 - 0x200
148	 * ---------------------------------------------------------------------
149	 */
150vector_entry sync_exception_sp_el0
151#ifdef MONITOR_TRAPS
152	stp x29, x30, [sp, #-16]!
153
154	mrs	x30, esr_el3
155	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
156
157	/* Check for BRK */
158	cmp	x30, #EC_BRK
159	b.eq	brk_handler
160
161	ldp x29, x30, [sp], #16
162#endif /* MONITOR_TRAPS */
163
164	/* We don't expect any synchronous exceptions from EL3 */
165	b	report_unhandled_exception
166end_vector_entry sync_exception_sp_el0
167
168vector_entry irq_sp_el0
169	/*
170	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
171	 * error. Loop infinitely.
172	 */
173	b	report_unhandled_interrupt
174end_vector_entry irq_sp_el0
175
176
177vector_entry fiq_sp_el0
178	b	report_unhandled_interrupt
179end_vector_entry fiq_sp_el0
180
181
182vector_entry serror_sp_el0
183	no_ret	plat_handle_el3_ea
184end_vector_entry serror_sp_el0
185
186	/* ---------------------------------------------------------------------
187	 * Current EL with SP_ELx: 0x200 - 0x400
188	 * ---------------------------------------------------------------------
189	 */
190vector_entry sync_exception_sp_elx
191	/*
192	 * This exception will trigger if anything went wrong during a previous
193	 * exception entry or exit or while handling an earlier unexpected
194	 * synchronous exception. There is a high probability that SP_EL3 is
195	 * corrupted.
196	 */
197	b	report_unhandled_exception
198end_vector_entry sync_exception_sp_elx
199
200vector_entry irq_sp_elx
201	b	report_unhandled_interrupt
202end_vector_entry irq_sp_elx
203
204vector_entry fiq_sp_elx
205	b	report_unhandled_interrupt
206end_vector_entry fiq_sp_elx
207
208vector_entry serror_sp_elx
209#if FFH_SUPPORT
210	/*
211	 * This will trigger if the exception was taken due to SError in EL3 or
212	 * because of pending asynchronous external aborts from lower EL that got
213	 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
214	 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
215	 * The later case will occur when PSTATE.A bit is cleared in
216	 * "handle_pending_async_ea". This means we are doing a nested
217	 * exception in EL3. Call the handler for async EA which will eret back to
218	 * original el3 handler if it is nested exception. Also, unmask EA so that we
219	 * catch any further EA arise when handling this nested exception at EL3.
220	 */
221	save_x30
222	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
223	cbz	x30, 1f
224	/*
225	 * This is nested exception handling, clear the flag to avoid taking this
226	 * path for further exceptions caused by EA handling
227	 */
228	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
229	unmask_async_ea
230
231	handle_lower_el_async_ea
2321:
233	restore_x30
234#endif
235	no_ret	plat_handle_el3_ea
236
237end_vector_entry serror_sp_elx
238
239	/* ---------------------------------------------------------------------
240	 * Lower EL using AArch64 : 0x400 - 0x600
241	 * ---------------------------------------------------------------------
242	 */
243vector_entry sync_exception_aarch64
244	/*
245	 * This exception vector will be the entry point for SMCs and traps
246	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
247	 * to a valid cpu context where the general purpose and system register
248	 * state can be saved.
249	 */
250	save_x30
251	apply_at_speculative_wa
252	sync_and_handle_pending_serror
253	handle_sync_exception
254end_vector_entry sync_exception_aarch64
255
256vector_entry irq_aarch64
257	save_x30
258	apply_at_speculative_wa
259	sync_and_handle_pending_serror
260	handle_interrupt_exception
261end_vector_entry irq_aarch64
262
263vector_entry fiq_aarch64
264	save_x30
265	apply_at_speculative_wa
266	sync_and_handle_pending_serror
267	handle_interrupt_exception
268end_vector_entry fiq_aarch64
269
270	/*
271	 * Need to synchronize any outstanding SError since we can get a burst of errors.
272	 * So reuse the sync mechanism to catch any further errors which are pending.
273	 */
274vector_entry serror_aarch64
275#if FFH_SUPPORT
276	save_x30
277	apply_at_speculative_wa
278	sync_and_handle_pending_serror
279	handle_lower_el_async_ea
280#else
281	b	report_unhandled_exception
282#endif
283end_vector_entry serror_aarch64
284
285	/* ---------------------------------------------------------------------
286	 * Lower EL using AArch32 : 0x600 - 0x800
287	 * ---------------------------------------------------------------------
288	 */
289vector_entry sync_exception_aarch32
290	/*
291	 * This exception vector will be the entry point for SMCs and traps
292	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
293	 * to a valid cpu context where the general purpose and system register
294	 * state can be saved.
295	 */
296	save_x30
297	apply_at_speculative_wa
298	sync_and_handle_pending_serror
299	handle_sync_exception
300end_vector_entry sync_exception_aarch32
301
302vector_entry irq_aarch32
303	save_x30
304	apply_at_speculative_wa
305	sync_and_handle_pending_serror
306	handle_interrupt_exception
307end_vector_entry irq_aarch32
308
309vector_entry fiq_aarch32
310	save_x30
311	apply_at_speculative_wa
312	sync_and_handle_pending_serror
313	handle_interrupt_exception
314end_vector_entry fiq_aarch32
315
316	/*
317	 * Need to synchronize any outstanding SError since we can get a burst of errors.
318	 * So reuse the sync mechanism to catch any further errors which are pending.
319	 */
320vector_entry serror_aarch32
321#if FFH_SUPPORT
322	save_x30
323	apply_at_speculative_wa
324	sync_and_handle_pending_serror
325	handle_lower_el_async_ea
326#else
327	b	report_unhandled_exception
328#endif
329end_vector_entry serror_aarch32
330
331#ifdef MONITOR_TRAPS
332	.section .rodata.brk_string, "aS"
333brk_location:
334	.asciz "Error at instruction 0x"
335brk_message:
336	.asciz "Unexpected BRK instruction with value 0x"
337#endif /* MONITOR_TRAPS */
338
339func imp_def_el3_handler
340	/* Save GP registers */
341	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
342	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
343	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
344
345	/* Get the cpu_ops pointer */
346	bl	get_cpu_ops_ptr
347
348	/* Get the cpu_ops exception handler */
349	ldr	x0, [x0, #CPU_E_HANDLER_FUNC]
350
351	/*
352	 * If the reserved function pointer is NULL, this CPU does not have an
353	 * implementation defined exception handler function
354	 */
355	cbz	x0, el3_handler_exit
356	mrs	x1, esr_el3
357	ubfx	x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
358	blr	x0
359el3_handler_exit:
360	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
361	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
362	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
363	restore_x30
364	no_ret	report_unhandled_exception
365endfunc imp_def_el3_handler
366
367/*
368 * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
369 *
370 * This scenario may arise when there is an error (EA) in the system which is not
371 * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
372 * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
373 *
374 * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
375 * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
376 *
377 * This function assumes x30 has been saved.
378 */
379func reflect_pending_async_ea_to_lower_el
380	/*
381	 * As the original exception was not handled we need to ensure that we return
382	 * back to the instruction which caused the exception. To acheive that, eret
383	 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
384	 * (Label "skip_smc_check").
385	 *
386	 * LIMITATION: It could be that async EA is masked at the target exception level
387	 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
388	 * causes back and forth between lower EL and EL3. In case of back and forth between
389	 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
390	 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
391	 * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
392	 * counter retains its value but if we do a normal el3_exit this flag gets cleared.
393	 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
394	 * as per AArch64.TakeException pseudo code in Arm ARM.
395	 *
396	 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
397	 * ELs, we can remove the el3_panic and handle the original exception first and
398	 * inject SError to lower EL before ereting back.
399	 */
400	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
401	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
402	mrs	x28, elr_el3
403	cmp	x29, x28
404	b.eq	check_loop_ctr
405	str	x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
406	/* Zero the loop counter */
407	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
408	b	skip_loop_ctr
409check_loop_ctr:
410	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
411	add	x29, x29, #1
412	str	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
413	cmp	x29, #ASYNC_EA_REPLAY_COUNTER
414	b.ge	el3_panic
415skip_loop_ctr:
416	/*
417	 * Logic to distinguish if we came from SMC or any other exception.
418	 * Use offsets in vector entry to get which exception we are handling.
419	 * In each vector entry of size 0x200, address "0x0-0x80" is for sync
420	 * exception and "0x80-0x200" is for async exceptions.
421	 * Use vector base address (vbar_el3) and exception offset (LR) to
422	 * calculate whether the address we came from is any of the following
423	 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
424	 */
425	mrs	x29, vbar_el3
426	sub	x30, x30, x29
427	and	x30, x30, #0x1ff
428	cmp	x30, #0x80
429	b.ge	skip_smc_check
430	/* Its a synchronous exception, Now check if it is SMC or not? */
431	mrs	x30, esr_el3
432	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
433	cmp	x30, #EC_AARCH32_SMC
434	b.eq	subtract_elr_el3
435	cmp	x30, #EC_AARCH64_SMC
436	b.eq	subtract_elr_el3
437	b	skip_smc_check
438subtract_elr_el3:
439	sub	x28, x28, #4
440skip_smc_check:
441	msr	elr_el3, x28
442	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
443	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
444	exception_return
445endfunc reflect_pending_async_ea_to_lower_el
446
447	/* ---------------------------------------------------------------------
448	 * The following code handles exceptions caused by BRK instructions.
449	 * Following a BRK instruction, the only real valid cause of action is
450	 * to print some information and panic, as the code that caused it is
451	 * likely in an inconsistent internal state.
452	 *
453	 * This is initially intended to be used in conjunction with
454	 * __builtin_trap.
455	 * ---------------------------------------------------------------------
456	 */
457#ifdef MONITOR_TRAPS
458func brk_handler
459	/* Extract the ISS */
460	mrs	x10, esr_el3
461	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
462
463	/* Ensure the console is initialized */
464	bl	plat_crash_console_init
465
466	adr	x4, brk_location
467	bl	asm_print_str
468	mrs	x4, elr_el3
469	bl	asm_print_hex
470	bl	asm_print_newline
471
472	adr	x4, brk_message
473	bl	asm_print_str
474	mov	x4, x10
475	mov	x5, #28
476	bl	asm_print_hex_bits
477	bl	asm_print_newline
478
479	no_ret	plat_panic_handler
480endfunc brk_handler
481#endif /* MONITOR_TRAPS */
482