xref: /rk3399_ARM-atf/bl31/aarch64/runtime_exceptions.S (revision d04c04a4e8d968f9f82de810a3c763474e3faeb7)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl31/ea_handle.h>
12#include <bl31/interrupt_mgmt.h>
13#include <bl31/sync_handle.h>
14#include <common/runtime_svc.h>
15#include <context.h>
16#include <el3_common_macros.S>
17#include <lib/el3_runtime/cpu_data.h>
18#include <lib/smccc.h>
19
20	.globl	runtime_exceptions
21
22	.globl	sync_exception_sp_el0
23	.globl	irq_sp_el0
24	.globl	fiq_sp_el0
25	.globl	serror_sp_el0
26
27	.globl	sync_exception_sp_elx
28	.globl	irq_sp_elx
29	.globl	fiq_sp_elx
30	.globl	serror_sp_elx
31
32	.globl	sync_exception_aarch64
33	.globl	irq_aarch64
34	.globl	fiq_aarch64
35	.globl	serror_aarch64
36
37	.globl	sync_exception_aarch32
38	.globl	irq_aarch32
39	.globl	fiq_aarch32
40	.globl	serror_aarch32
41
42	/*
43	 * Save LR and make x30 available as most of the routines in vector entry
44	 * need a free register
45	 */
46	.macro save_x30
47	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
48	.endm
49
50	.macro restore_x30
51	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
52	.endm
53
54	/*
55	 * Macro that synchronizes errors (EA) and checks for pending SError.
56	 * On detecting a pending SError it either reflects it back to lower
57	 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
58	 */
59	.macro	sync_and_handle_pending_serror
60	dsb	sy
61	isb
62	mrs	x30, ISR_EL1
63	tbz	x30, #ISR_A_SHIFT, 2f
64#if HANDLE_EA_EL3_FIRST_NS
65	mrs	x30, scr_el3
66	tst	x30, #SCR_EA_BIT
67	b.eq	1f
68	bl	handle_pending_async_ea
69	b	2f
70#endif
711:
72	/* This function never returns, but need LR for decision making */
73	bl	reflect_pending_async_ea_to_lower_el
742:
75	.endm
76
77	/* ---------------------------------------------------------------------
78	 * This macro handles Synchronous exceptions.
79	 * Only SMC exceptions are supported.
80	 * ---------------------------------------------------------------------
81	 */
82	.macro	handle_sync_exception
83#if ENABLE_RUNTIME_INSTRUMENTATION
84	/*
85	 * Read the timestamp value and store it in per-cpu data. The value
86	 * will be extracted from per-cpu data by the C level SMC handler and
87	 * saved to the PMF timestamp region.
88	 */
89	mrs	x30, cntpct_el0
90	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
91	mrs	x29, tpidr_el3
92	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
93	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
94#endif
95
96	mrs	x30, esr_el3
97	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
98
99	/* Handle SMC exceptions separately from other synchronous exceptions */
100	cmp	x30, #EC_AARCH32_SMC
101	b.eq	smc_handler32
102
103	cmp	x30, #EC_AARCH64_SMC
104	b.eq	sync_handler64
105
106	cmp	x30, #EC_AARCH64_SYS
107	b.eq	sync_handler64
108
109	/* Synchronous exceptions other than the above are assumed to be EA */
110	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
111	b	handle_lower_el_sync_ea
112	.endm
113
114vector_base runtime_exceptions
115
116	/* ---------------------------------------------------------------------
117	 * Current EL with SP_EL0 : 0x0 - 0x200
118	 * ---------------------------------------------------------------------
119	 */
120vector_entry sync_exception_sp_el0
121#ifdef MONITOR_TRAPS
122	stp x29, x30, [sp, #-16]!
123
124	mrs	x30, esr_el3
125	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
126
127	/* Check for BRK */
128	cmp	x30, #EC_BRK
129	b.eq	brk_handler
130
131	ldp x29, x30, [sp], #16
132#endif /* MONITOR_TRAPS */
133
134	/* We don't expect any synchronous exceptions from EL3 */
135	b	report_unhandled_exception
136end_vector_entry sync_exception_sp_el0
137
138vector_entry irq_sp_el0
139	/*
140	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
141	 * error. Loop infinitely.
142	 */
143	b	report_unhandled_interrupt
144end_vector_entry irq_sp_el0
145
146
147vector_entry fiq_sp_el0
148	b	report_unhandled_interrupt
149end_vector_entry fiq_sp_el0
150
151
152vector_entry serror_sp_el0
153	no_ret	plat_handle_el3_ea
154end_vector_entry serror_sp_el0
155
156	/* ---------------------------------------------------------------------
157	 * Current EL with SP_ELx: 0x200 - 0x400
158	 * ---------------------------------------------------------------------
159	 */
160vector_entry sync_exception_sp_elx
161	/*
162	 * This exception will trigger if anything went wrong during a previous
163	 * exception entry or exit or while handling an earlier unexpected
164	 * synchronous exception. There is a high probability that SP_EL3 is
165	 * corrupted.
166	 */
167	b	report_unhandled_exception
168end_vector_entry sync_exception_sp_elx
169
170vector_entry irq_sp_elx
171	b	report_unhandled_interrupt
172end_vector_entry irq_sp_elx
173
174vector_entry fiq_sp_elx
175	b	report_unhandled_interrupt
176end_vector_entry fiq_sp_elx
177
178vector_entry serror_sp_elx
179#if HANDLE_EA_EL3_FIRST_NS
180	/*
181	 * This will trigger if the exception was taken due to SError in EL3 or
182	 * because of pending asynchronous external aborts from lower EL that got
183	 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
184	 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
185	 * The later case will occur when PSTATE.A bit is cleared in
186	 * "handle_pending_async_ea". This means we are doing a nested
187	 * exception in EL3. Call the handler for async EA which will eret back to
188	 * original el3 handler if it is nested exception. Also, unmask EA so that we
189	 * catch any further EA arise when handling this nested exception at EL3.
190	 */
191	save_x30
192	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
193	cbz	x30, 1f
194	/*
195	 * This is nested exception handling, clear the flag to avoid taking this
196	 * path for further exceptions caused by EA handling
197	 */
198	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
199	unmask_async_ea
200	b	handle_lower_el_async_ea
2011:
202	restore_x30
203#endif
204	no_ret	plat_handle_el3_ea
205
206end_vector_entry serror_sp_elx
207
208	/* ---------------------------------------------------------------------
209	 * Lower EL using AArch64 : 0x400 - 0x600
210	 * ---------------------------------------------------------------------
211	 */
212vector_entry sync_exception_aarch64
213	/*
214	 * This exception vector will be the entry point for SMCs and traps
215	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
216	 * to a valid cpu context where the general purpose and system register
217	 * state can be saved.
218	 */
219	save_x30
220	apply_at_speculative_wa
221	sync_and_handle_pending_serror
222	unmask_async_ea
223	handle_sync_exception
224end_vector_entry sync_exception_aarch64
225
226vector_entry irq_aarch64
227	save_x30
228	apply_at_speculative_wa
229	sync_and_handle_pending_serror
230	unmask_async_ea
231	b	handle_interrupt_exception
232end_vector_entry irq_aarch64
233
234vector_entry fiq_aarch64
235	save_x30
236	apply_at_speculative_wa
237	sync_and_handle_pending_serror
238	unmask_async_ea
239	b 	handle_interrupt_exception
240end_vector_entry fiq_aarch64
241
242	/*
243	 * Need to synchronize any outstanding SError since we can get a burst of errors.
244	 * So reuse the sync mechanism to catch any further errors which are pending.
245	 */
246vector_entry serror_aarch64
247	save_x30
248	apply_at_speculative_wa
249	sync_and_handle_pending_serror
250	unmask_async_ea
251	b	handle_lower_el_async_ea
252end_vector_entry serror_aarch64
253
254	/* ---------------------------------------------------------------------
255	 * Lower EL using AArch32 : 0x600 - 0x800
256	 * ---------------------------------------------------------------------
257	 */
258vector_entry sync_exception_aarch32
259	/*
260	 * This exception vector will be the entry point for SMCs and traps
261	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
262	 * to a valid cpu context where the general purpose and system register
263	 * state can be saved.
264	 */
265	save_x30
266	apply_at_speculative_wa
267	sync_and_handle_pending_serror
268	unmask_async_ea
269	handle_sync_exception
270end_vector_entry sync_exception_aarch32
271
272vector_entry irq_aarch32
273	save_x30
274	apply_at_speculative_wa
275	sync_and_handle_pending_serror
276	unmask_async_ea
277	b	handle_interrupt_exception
278end_vector_entry irq_aarch32
279
280vector_entry fiq_aarch32
281	save_x30
282	apply_at_speculative_wa
283	sync_and_handle_pending_serror
284	unmask_async_ea
285	b	handle_interrupt_exception
286end_vector_entry fiq_aarch32
287
288	/*
289	 * Need to synchronize any outstanding SError since we can get a burst of errors.
290	 * So reuse the sync mechanism to catch any further errors which are pending.
291	 */
292vector_entry serror_aarch32
293	save_x30
294	apply_at_speculative_wa
295	sync_and_handle_pending_serror
296	unmask_async_ea
297	b	handle_lower_el_async_ea
298end_vector_entry serror_aarch32
299
300#ifdef MONITOR_TRAPS
301	.section .rodata.brk_string, "aS"
302brk_location:
303	.asciz "Error at instruction 0x"
304brk_message:
305	.asciz "Unexpected BRK instruction with value 0x"
306#endif /* MONITOR_TRAPS */
307
308	/* ---------------------------------------------------------------------
309	 * The following code handles secure monitor calls.
310	 * Depending upon the execution state from where the SMC has been
311	 * invoked, it frees some general purpose registers to perform the
312	 * remaining tasks. They involve finding the runtime service handler
313	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
314	 * before calling the handler.
315	 *
316	 * Note that x30 has been explicitly saved and can be used here
317	 * ---------------------------------------------------------------------
318	 */
319func sync_exception_handler
320smc_handler32:
321	/* Check whether aarch32 issued an SMC64 */
322	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
323
324sync_handler64:
325	/* NOTE: The code below must preserve x0-x4 */
326
327	/*
328	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
329	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
330	 */
331	bl	prepare_el3_entry
332
333#if ENABLE_PAUTH
334	/* Load and program APIAKey firmware key */
335	bl	pauth_load_bl31_apiakey
336#endif
337
338	/*
339	 * Populate the parameters for the SMC handler.
340	 * We already have x0-x4 in place. x5 will point to a cookie (not used
341	 * now). x6 will point to the context structure (SP_EL3) and x7 will
342	 * contain flags we need to pass to the handler.
343	 */
344	mov	x5, xzr
345	mov	x6, sp
346
347	/*
348	 * Restore the saved C runtime stack value which will become the new
349	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
350	 * structure prior to the last ERET from EL3.
351	 */
352	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
353
354	/* Switch to SP_EL0 */
355	msr	spsel, #MODE_SP_EL0
356
357	/*
358	 * Save the SPSR_EL3 and ELR_EL3 in case there is a world
359	 * switch during SMC handling.
360	 * TODO: Revisit if all system registers can be saved later.
361	 */
362	mrs	x16, spsr_el3
363	mrs	x17, elr_el3
364	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
365
366	/* Load SCR_EL3 */
367	mrs	x18, scr_el3
368
369	/* check for system register traps */
370	mrs	x16, esr_el3
371	ubfx	x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
372	cmp	x17, #EC_AARCH64_SYS
373	b.eq	sysreg_handler64
374
375	/* Clear flag register */
376	mov	x7, xzr
377
378#if ENABLE_RME
379	/* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
380	ubfx	x7, x18, #SCR_NSE_SHIFT, #1
381
382	/*
383	 * Shift copied SCR_EL3.NSE bit by 5 to create space for
384	 * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
385	 * the SCR_EL3.NSE bit.
386	 */
387	lsl	x7, x7, #5
388#endif /* ENABLE_RME */
389
390	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
391	bfi	x7, x18, #0, #1
392
393	mov	sp, x12
394
395	/*
396	 * Per SMCCC documentation, bits [23:17] must be zero for Fast
397	 * SMCs. Other values are reserved for future use. Ensure that
398	 * these bits are zeroes, if not report as unknown SMC.
399	 */
400	tbz	x0, #FUNCID_TYPE_SHIFT, 2f  /* Skip check if its a Yield Call*/
401	tst	x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
402	b.ne	smc_unknown
403
404	/*
405	 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
406	 * passed through x0. Copy the SVE hint bit to flags and mask the
407	 * bit in smc_fid passed to the standard service dispatcher.
408	 * A service/dispatcher can retrieve the SVE hint bit state from
409	 * flags using the appropriate helper.
410	 */
4112:
412	and	x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
413	orr	x7, x7, x16
414	bic	x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
415
416	/* Get the unique owning entity number */
417	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
418	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
419	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
420
421	/* Load descriptor index from array of indices */
422	adrp	x14, rt_svc_descs_indices
423	add	x14, x14, :lo12:rt_svc_descs_indices
424	ldrb	w15, [x14, x16]
425
426	/* Any index greater than 127 is invalid. Check bit 7. */
427	tbnz	w15, 7, smc_unknown
428
429	/*
430	 * Get the descriptor using the index
431	 * x11 = (base + off), w15 = index
432	 *
433	 * handler = (base + off) + (index << log2(size))
434	 */
435	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
436	lsl	w10, w15, #RT_SVC_SIZE_LOG2
437	ldr	x15, [x11, w10, uxtw]
438
439	/*
440	 * Call the Secure Monitor Call handler and then drop directly into
441	 * el3_exit() which will program any remaining architectural state
442	 * prior to issuing the ERET to the desired lower EL.
443	 */
444#if DEBUG
445	cbz	x15, rt_svc_fw_critical_error
446#endif
447	blr	x15
448
449	b	el3_exit
450
451sysreg_handler64:
452	mov	x0, x16		/* ESR_EL3, containing syndrome information */
453	mov	x1, x6		/* lower EL's context */
454	mov	x19, x6		/* save context pointer for after the call */
455	mov	sp, x12		/* EL3 runtime stack, as loaded above */
456
457	/* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
458	bl	handle_sysreg_trap
459	/*
460	 * returns:
461	 *   -1: unhandled trap, panic
462	 *    0: handled trap, return to the trapping instruction (repeating it)
463	 *    1: handled trap, return to the next instruction
464	 */
465
466	tst	w0, w0
467	b.mi	elx_panic	/* negative return value: panic */
468	b.eq	1f		/* zero: do not change ELR_EL3 */
469
470	/* advance the PC to continue after the instruction */
471	ldr	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
472	add	x1, x1, #4
473	str	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
4741:
475	b	el3_exit
476
477smc_unknown:
478	/*
479	 * Unknown SMC call. Populate return value with SMC_UNK and call
480	 * el3_exit() which will restore the remaining architectural state
481	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
482	 * to the desired lower EL.
483	 */
484	mov	x0, #SMC_UNK
485	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
486	b	el3_exit
487
488smc_prohibited:
489	restore_ptw_el1_sys_regs
490	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
491	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
492	mov	x0, #SMC_UNK
493	exception_return
494
495#if DEBUG
496rt_svc_fw_critical_error:
497	/* Switch to SP_ELx */
498	msr	spsel, #MODE_SP_ELX
499	no_ret	report_unhandled_exception
500#endif
501endfunc sync_exception_handler
502
503	/* ---------------------------------------------------------------------
504	 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
505	 * interrupts.
506	 *
507	 * Note that x30 has been explicitly saved and can be used here
508	 * ---------------------------------------------------------------------
509	 */
510func handle_interrupt_exception
511	/*
512	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
513	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
514	 */
515	bl	prepare_el3_entry
516
517#if ENABLE_PAUTH
518	/* Load and program APIAKey firmware key */
519	bl	pauth_load_bl31_apiakey
520#endif
521
522	/* Save the EL3 system registers needed to return from this exception */
523	mrs	x0, spsr_el3
524	mrs	x1, elr_el3
525	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
526
527	/* Switch to the runtime stack i.e. SP_EL0 */
528	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
529	mov	x20, sp
530	msr	spsel, #MODE_SP_EL0
531	mov	sp, x2
532
533	/*
534	 * Find out whether this is a valid interrupt type.
535	 * If the interrupt controller reports a spurious interrupt then return
536	 * to where we came from.
537	 */
538	bl	plat_ic_get_pending_interrupt_type
539	cmp	x0, #INTR_TYPE_INVAL
540	b.eq	interrupt_exit
541
542	/*
543	 * Get the registered handler for this interrupt type.
544	 * A NULL return value could be 'cause of the following conditions:
545	 *
546	 * a. An interrupt of a type was routed correctly but a handler for its
547	 *    type was not registered.
548	 *
549	 * b. An interrupt of a type was not routed correctly so a handler for
550	 *    its type was not registered.
551	 *
552	 * c. An interrupt of a type was routed correctly to EL3, but was
553	 *    deasserted before its pending state could be read. Another
554	 *    interrupt of a different type pended at the same time and its
555	 *    type was reported as pending instead. However, a handler for this
556	 *    type was not registered.
557	 *
558	 * a. and b. can only happen due to a programming error. The
559	 * occurrence of c. could be beyond the control of Trusted Firmware.
560	 * It makes sense to return from this exception instead of reporting an
561	 * error.
562	 */
563	bl	get_interrupt_type_handler
564	cbz	x0, interrupt_exit
565	mov	x21, x0
566
567	mov	x0, #INTR_ID_UNAVAILABLE
568
569	/* Set the current security state in the 'flags' parameter */
570	mrs	x2, scr_el3
571	ubfx	x1, x2, #0, #1
572
573	/* Restore the reference to the 'handle' i.e. SP_EL3 */
574	mov	x2, x20
575
576	/* x3 will point to a cookie (not used now) */
577	mov	x3, xzr
578
579	/* Call the interrupt type handler */
580	blr	x21
581
582interrupt_exit:
583	/* Return from exception, possibly in a different security state */
584	b	el3_exit
585endfunc handle_interrupt_exception
586
587	/* ---------------------------------------------------------------------
588	 * The following code handles exceptions caused by BRK instructions.
589	 * Following a BRK instruction, the only real valid cause of action is
590	 * to print some information and panic, as the code that caused it is
591	 * likely in an inconsistent internal state.
592	 *
593	 * This is initially intended to be used in conjunction with
594	 * __builtin_trap.
595	 * ---------------------------------------------------------------------
596	 */
597#ifdef MONITOR_TRAPS
598func brk_handler
599	/* Extract the ISS */
600	mrs	x10, esr_el3
601	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
602
603	/* Ensure the console is initialized */
604	bl	plat_crash_console_init
605
606	adr	x4, brk_location
607	bl	asm_print_str
608	mrs	x4, elr_el3
609	bl	asm_print_hex
610	bl	asm_print_newline
611
612	adr	x4, brk_message
613	bl	asm_print_str
614	mov	x4, x10
615	mov	x5, #28
616	bl	asm_print_hex_bits
617	bl	asm_print_newline
618
619	no_ret	plat_panic_handler
620endfunc brk_handler
621#endif /* MONITOR_TRAPS */
622