xref: /rk3399_ARM-atf/bl31/aarch64/runtime_exceptions.S (revision f87e54f73cfee5042df526af6185ac6d9653a8f5)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl31/ea_handle.h>
12#include <bl31/interrupt_mgmt.h>
13#include <bl31/sync_handle.h>
14#include <common/runtime_svc.h>
15#include <context.h>
16#include <el3_common_macros.S>
17#include <lib/el3_runtime/cpu_data.h>
18#include <lib/smccc.h>
19
20	.globl	runtime_exceptions
21
22	.globl	sync_exception_sp_el0
23	.globl	irq_sp_el0
24	.globl	fiq_sp_el0
25	.globl	serror_sp_el0
26
27	.globl	sync_exception_sp_elx
28	.globl	irq_sp_elx
29	.globl	fiq_sp_elx
30	.globl	serror_sp_elx
31
32	.globl	sync_exception_aarch64
33	.globl	irq_aarch64
34	.globl	fiq_aarch64
35	.globl	serror_aarch64
36
37	.globl	sync_exception_aarch32
38	.globl	irq_aarch32
39	.globl	fiq_aarch32
40	.globl	serror_aarch32
41
42	/*
43	 * Save LR and make x30 available as most of the routines in vector entry
44	 * need a free register
45	 */
46	.macro save_x30
47	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
48	.endm
49
50	.macro restore_x30
51	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
52	.endm
53
54	/*
55	 * Macro that synchronizes errors (EA) and checks for pending SError.
56	 * On detecting a pending SError it either reflects it back to lower
57	 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
58	 */
59	.macro	sync_and_handle_pending_serror
60	synchronize_errors
61	mrs	x30, ISR_EL1
62	tbz	x30, #ISR_A_SHIFT, 2f
63#if FFH_SUPPORT
64	mrs	x30, scr_el3
65	tst	x30, #SCR_EA_BIT
66	b.eq	1f
67	bl	handle_pending_async_ea
68	b	2f
69#endif
701:
71	/* This function never returns, but need LR for decision making */
72	bl	reflect_pending_async_ea_to_lower_el
732:
74	.endm
75
76	/* ---------------------------------------------------------------------
77	 * This macro handles Synchronous exceptions.
78	 * Only SMC exceptions are supported.
79	 * ---------------------------------------------------------------------
80	 */
81	.macro	handle_sync_exception
82#if ENABLE_RUNTIME_INSTRUMENTATION
83	/*
84	 * Read the timestamp value and store it in per-cpu data. The value
85	 * will be extracted from per-cpu data by the C level SMC handler and
86	 * saved to the PMF timestamp region.
87	 */
88	mrs	x30, cntpct_el0
89	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
90	mrs	x29, tpidr_el3
91	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
92	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
93#endif
94
95	mrs	x30, esr_el3
96	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
97
98	/* Handle SMC exceptions separately from other synchronous exceptions */
99	cmp	x30, #EC_AARCH32_SMC
100	b.eq	smc_handler32
101
102	cmp	x30, #EC_AARCH64_SMC
103	b.eq	sync_handler64
104
105	cmp	x30, #EC_AARCH64_SYS
106	b.eq	sync_handler64
107
108	/* Synchronous exceptions other than the above are assumed to be EA */
109	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
110	b	handle_lower_el_sync_ea
111	.endm
112
113vector_base runtime_exceptions
114
115	/* ---------------------------------------------------------------------
116	 * Current EL with SP_EL0 : 0x0 - 0x200
117	 * ---------------------------------------------------------------------
118	 */
119vector_entry sync_exception_sp_el0
120#ifdef MONITOR_TRAPS
121	stp x29, x30, [sp, #-16]!
122
123	mrs	x30, esr_el3
124	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
125
126	/* Check for BRK */
127	cmp	x30, #EC_BRK
128	b.eq	brk_handler
129
130	ldp x29, x30, [sp], #16
131#endif /* MONITOR_TRAPS */
132
133	/* We don't expect any synchronous exceptions from EL3 */
134	b	report_unhandled_exception
135end_vector_entry sync_exception_sp_el0
136
137vector_entry irq_sp_el0
138	/*
139	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
140	 * error. Loop infinitely.
141	 */
142	b	report_unhandled_interrupt
143end_vector_entry irq_sp_el0
144
145
146vector_entry fiq_sp_el0
147	b	report_unhandled_interrupt
148end_vector_entry fiq_sp_el0
149
150
151vector_entry serror_sp_el0
152	no_ret	plat_handle_el3_ea
153end_vector_entry serror_sp_el0
154
155	/* ---------------------------------------------------------------------
156	 * Current EL with SP_ELx: 0x200 - 0x400
157	 * ---------------------------------------------------------------------
158	 */
159vector_entry sync_exception_sp_elx
160	/*
161	 * This exception will trigger if anything went wrong during a previous
162	 * exception entry or exit or while handling an earlier unexpected
163	 * synchronous exception. There is a high probability that SP_EL3 is
164	 * corrupted.
165	 */
166	b	report_unhandled_exception
167end_vector_entry sync_exception_sp_elx
168
169vector_entry irq_sp_elx
170	b	report_unhandled_interrupt
171end_vector_entry irq_sp_elx
172
173vector_entry fiq_sp_elx
174	b	report_unhandled_interrupt
175end_vector_entry fiq_sp_elx
176
177vector_entry serror_sp_elx
178#if FFH_SUPPORT
179	/*
180	 * This will trigger if the exception was taken due to SError in EL3 or
181	 * because of pending asynchronous external aborts from lower EL that got
182	 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
183	 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
184	 * The later case will occur when PSTATE.A bit is cleared in
185	 * "handle_pending_async_ea". This means we are doing a nested
186	 * exception in EL3. Call the handler for async EA which will eret back to
187	 * original el3 handler if it is nested exception. Also, unmask EA so that we
188	 * catch any further EA arise when handling this nested exception at EL3.
189	 */
190	save_x30
191	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
192	cbz	x30, 1f
193	/*
194	 * This is nested exception handling, clear the flag to avoid taking this
195	 * path for further exceptions caused by EA handling
196	 */
197	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
198	unmask_async_ea
199	b	handle_lower_el_async_ea
2001:
201	restore_x30
202#endif
203	no_ret	plat_handle_el3_ea
204
205end_vector_entry serror_sp_elx
206
207	/* ---------------------------------------------------------------------
208	 * Lower EL using AArch64 : 0x400 - 0x600
209	 * ---------------------------------------------------------------------
210	 */
211vector_entry sync_exception_aarch64
212	/*
213	 * This exception vector will be the entry point for SMCs and traps
214	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
215	 * to a valid cpu context where the general purpose and system register
216	 * state can be saved.
217	 */
218	save_x30
219	apply_at_speculative_wa
220	sync_and_handle_pending_serror
221	unmask_async_ea
222	handle_sync_exception
223end_vector_entry sync_exception_aarch64
224
225vector_entry irq_aarch64
226	save_x30
227	apply_at_speculative_wa
228	sync_and_handle_pending_serror
229	unmask_async_ea
230	b	handle_interrupt_exception
231end_vector_entry irq_aarch64
232
233vector_entry fiq_aarch64
234	save_x30
235	apply_at_speculative_wa
236	sync_and_handle_pending_serror
237	unmask_async_ea
238	b 	handle_interrupt_exception
239end_vector_entry fiq_aarch64
240
241	/*
242	 * Need to synchronize any outstanding SError since we can get a burst of errors.
243	 * So reuse the sync mechanism to catch any further errors which are pending.
244	 */
245vector_entry serror_aarch64
246	save_x30
247	apply_at_speculative_wa
248	sync_and_handle_pending_serror
249	unmask_async_ea
250	b	handle_lower_el_async_ea
251end_vector_entry serror_aarch64
252
253	/* ---------------------------------------------------------------------
254	 * Lower EL using AArch32 : 0x600 - 0x800
255	 * ---------------------------------------------------------------------
256	 */
257vector_entry sync_exception_aarch32
258	/*
259	 * This exception vector will be the entry point for SMCs and traps
260	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
261	 * to a valid cpu context where the general purpose and system register
262	 * state can be saved.
263	 */
264	save_x30
265	apply_at_speculative_wa
266	sync_and_handle_pending_serror
267	unmask_async_ea
268	handle_sync_exception
269end_vector_entry sync_exception_aarch32
270
271vector_entry irq_aarch32
272	save_x30
273	apply_at_speculative_wa
274	sync_and_handle_pending_serror
275	unmask_async_ea
276	b	handle_interrupt_exception
277end_vector_entry irq_aarch32
278
279vector_entry fiq_aarch32
280	save_x30
281	apply_at_speculative_wa
282	sync_and_handle_pending_serror
283	unmask_async_ea
284	b	handle_interrupt_exception
285end_vector_entry fiq_aarch32
286
287	/*
288	 * Need to synchronize any outstanding SError since we can get a burst of errors.
289	 * So reuse the sync mechanism to catch any further errors which are pending.
290	 */
291vector_entry serror_aarch32
292	save_x30
293	apply_at_speculative_wa
294	sync_and_handle_pending_serror
295	unmask_async_ea
296	b	handle_lower_el_async_ea
297end_vector_entry serror_aarch32
298
299#ifdef MONITOR_TRAPS
300	.section .rodata.brk_string, "aS"
301brk_location:
302	.asciz "Error at instruction 0x"
303brk_message:
304	.asciz "Unexpected BRK instruction with value 0x"
305#endif /* MONITOR_TRAPS */
306
307	/* ---------------------------------------------------------------------
308	 * The following code handles secure monitor calls.
309	 * Depending upon the execution state from where the SMC has been
310	 * invoked, it frees some general purpose registers to perform the
311	 * remaining tasks. They involve finding the runtime service handler
312	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
313	 * before calling the handler.
314	 *
315	 * Note that x30 has been explicitly saved and can be used here
316	 * ---------------------------------------------------------------------
317	 */
318func sync_exception_handler
319smc_handler32:
320	/* Check whether aarch32 issued an SMC64 */
321	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
322
323sync_handler64:
324	/* NOTE: The code below must preserve x0-x4 */
325
326	/*
327	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
328	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
329	 */
330	bl	prepare_el3_entry
331
332#if ENABLE_PAUTH
333	/* Load and program APIAKey firmware key */
334	bl	pauth_load_bl31_apiakey
335#endif
336
337	/*
338	 * Populate the parameters for the SMC handler.
339	 * We already have x0-x4 in place. x5 will point to a cookie (not used
340	 * now). x6 will point to the context structure (SP_EL3) and x7 will
341	 * contain flags we need to pass to the handler.
342	 */
343	mov	x5, xzr
344	mov	x6, sp
345
346	/*
347	 * Restore the saved C runtime stack value which will become the new
348	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
349	 * structure prior to the last ERET from EL3.
350	 */
351	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
352
353	/* Switch to SP_EL0 */
354	msr	spsel, #MODE_SP_EL0
355
356	/*
357	 * Save the SPSR_EL3 and ELR_EL3 in case there is a world
358	 * switch during SMC handling.
359	 * TODO: Revisit if all system registers can be saved later.
360	 */
361	mrs	x16, spsr_el3
362	mrs	x17, elr_el3
363	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
364
365	/* Load SCR_EL3 */
366	mrs	x18, scr_el3
367
368	/* check for system register traps */
369	mrs	x16, esr_el3
370	ubfx	x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
371	cmp	x17, #EC_AARCH64_SYS
372	b.eq	sysreg_handler64
373
374	/* Clear flag register */
375	mov	x7, xzr
376
377#if ENABLE_RME
378	/* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
379	ubfx	x7, x18, #SCR_NSE_SHIFT, #1
380
381	/*
382	 * Shift copied SCR_EL3.NSE bit by 5 to create space for
383	 * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
384	 * the SCR_EL3.NSE bit.
385	 */
386	lsl	x7, x7, #5
387#endif /* ENABLE_RME */
388
389	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
390	bfi	x7, x18, #0, #1
391
392	mov	sp, x12
393
394	/*
395	 * Per SMCCC documentation, bits [23:17] must be zero for Fast
396	 * SMCs. Other values are reserved for future use. Ensure that
397	 * these bits are zeroes, if not report as unknown SMC.
398	 */
399	tbz	x0, #FUNCID_TYPE_SHIFT, 2f  /* Skip check if its a Yield Call*/
400	tst	x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
401	b.ne	smc_unknown
402
403	/*
404	 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
405	 * passed through x0. Copy the SVE hint bit to flags and mask the
406	 * bit in smc_fid passed to the standard service dispatcher.
407	 * A service/dispatcher can retrieve the SVE hint bit state from
408	 * flags using the appropriate helper.
409	 */
4102:
411	and	x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
412	orr	x7, x7, x16
413	bic	x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
414
415	/* Get the unique owning entity number */
416	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
417	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
418	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
419
420	/* Load descriptor index from array of indices */
421	adrp	x14, rt_svc_descs_indices
422	add	x14, x14, :lo12:rt_svc_descs_indices
423	ldrb	w15, [x14, x16]
424
425	/* Any index greater than 127 is invalid. Check bit 7. */
426	tbnz	w15, 7, smc_unknown
427
428	/*
429	 * Get the descriptor using the index
430	 * x11 = (base + off), w15 = index
431	 *
432	 * handler = (base + off) + (index << log2(size))
433	 */
434	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
435	lsl	w10, w15, #RT_SVC_SIZE_LOG2
436	ldr	x15, [x11, w10, uxtw]
437
438	/*
439	 * Call the Secure Monitor Call handler and then drop directly into
440	 * el3_exit() which will program any remaining architectural state
441	 * prior to issuing the ERET to the desired lower EL.
442	 */
443#if DEBUG
444	cbz	x15, rt_svc_fw_critical_error
445#endif
446	blr	x15
447
448	b	el3_exit
449
450sysreg_handler64:
451	mov	x0, x16		/* ESR_EL3, containing syndrome information */
452	mov	x1, x6		/* lower EL's context */
453	mov	x19, x6		/* save context pointer for after the call */
454	mov	sp, x12		/* EL3 runtime stack, as loaded above */
455
456	/* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
457	bl	handle_sysreg_trap
458	/*
459	 * returns:
460	 *   -1: unhandled trap, panic
461	 *    0: handled trap, return to the trapping instruction (repeating it)
462	 *    1: handled trap, return to the next instruction
463	 */
464
465	tst	w0, w0
466	b.mi	elx_panic	/* negative return value: panic */
467	b.eq	1f		/* zero: do not change ELR_EL3 */
468
469	/* advance the PC to continue after the instruction */
470	ldr	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
471	add	x1, x1, #4
472	str	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
4731:
474	b	el3_exit
475
476smc_unknown:
477	/*
478	 * Unknown SMC call. Populate return value with SMC_UNK and call
479	 * el3_exit() which will restore the remaining architectural state
480	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
481	 * to the desired lower EL.
482	 */
483	mov	x0, #SMC_UNK
484	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
485	b	el3_exit
486
487smc_prohibited:
488	restore_ptw_el1_sys_regs
489	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
490	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
491	mov	x0, #SMC_UNK
492	exception_return
493
494#if DEBUG
495rt_svc_fw_critical_error:
496	/* Switch to SP_ELx */
497	msr	spsel, #MODE_SP_ELX
498	no_ret	report_unhandled_exception
499#endif
500endfunc sync_exception_handler
501
502	/* ---------------------------------------------------------------------
503	 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
504	 * interrupts.
505	 *
506	 * Note that x30 has been explicitly saved and can be used here
507	 * ---------------------------------------------------------------------
508	 */
509func handle_interrupt_exception
510	/*
511	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
512	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
513	 */
514	bl	prepare_el3_entry
515
516#if ENABLE_PAUTH
517	/* Load and program APIAKey firmware key */
518	bl	pauth_load_bl31_apiakey
519#endif
520
521	/* Save the EL3 system registers needed to return from this exception */
522	mrs	x0, spsr_el3
523	mrs	x1, elr_el3
524	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
525
526	/* Switch to the runtime stack i.e. SP_EL0 */
527	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
528	mov	x20, sp
529	msr	spsel, #MODE_SP_EL0
530	mov	sp, x2
531
532	/*
533	 * Find out whether this is a valid interrupt type.
534	 * If the interrupt controller reports a spurious interrupt then return
535	 * to where we came from.
536	 */
537	bl	plat_ic_get_pending_interrupt_type
538	cmp	x0, #INTR_TYPE_INVAL
539	b.eq	interrupt_exit
540
541	/*
542	 * Get the registered handler for this interrupt type.
543	 * A NULL return value could be 'cause of the following conditions:
544	 *
545	 * a. An interrupt of a type was routed correctly but a handler for its
546	 *    type was not registered.
547	 *
548	 * b. An interrupt of a type was not routed correctly so a handler for
549	 *    its type was not registered.
550	 *
551	 * c. An interrupt of a type was routed correctly to EL3, but was
552	 *    deasserted before its pending state could be read. Another
553	 *    interrupt of a different type pended at the same time and its
554	 *    type was reported as pending instead. However, a handler for this
555	 *    type was not registered.
556	 *
557	 * a. and b. can only happen due to a programming error. The
558	 * occurrence of c. could be beyond the control of Trusted Firmware.
559	 * It makes sense to return from this exception instead of reporting an
560	 * error.
561	 */
562	bl	get_interrupt_type_handler
563	cbz	x0, interrupt_exit
564	mov	x21, x0
565
566	mov	x0, #INTR_ID_UNAVAILABLE
567
568	/* Set the current security state in the 'flags' parameter */
569	mrs	x2, scr_el3
570	ubfx	x1, x2, #0, #1
571
572	/* Restore the reference to the 'handle' i.e. SP_EL3 */
573	mov	x2, x20
574
575	/* x3 will point to a cookie (not used now) */
576	mov	x3, xzr
577
578	/* Call the interrupt type handler */
579	blr	x21
580
581interrupt_exit:
582	/* Return from exception, possibly in a different security state */
583	b	el3_exit
584endfunc handle_interrupt_exception
585
586	/* ---------------------------------------------------------------------
587	 * The following code handles exceptions caused by BRK instructions.
588	 * Following a BRK instruction, the only real valid cause of action is
589	 * to print some information and panic, as the code that caused it is
590	 * likely in an inconsistent internal state.
591	 *
592	 * This is initially intended to be used in conjunction with
593	 * __builtin_trap.
594	 * ---------------------------------------------------------------------
595	 */
596#ifdef MONITOR_TRAPS
597func brk_handler
598	/* Extract the ISS */
599	mrs	x10, esr_el3
600	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
601
602	/* Ensure the console is initialized */
603	bl	plat_crash_console_init
604
605	adr	x4, brk_location
606	bl	asm_print_str
607	mrs	x4, elr_el3
608	bl	asm_print_hex
609	bl	asm_print_newline
610
611	adr	x4, brk_message
612	bl	asm_print_str
613	mov	x4, x10
614	mov	x5, #28
615	bl	asm_print_hex_bits
616	bl	asm_print_newline
617
618	no_ret	plat_panic_handler
619endfunc brk_handler
620#endif /* MONITOR_TRAPS */
621