xref: /rk3399_ARM-atf/bl31/aarch64/runtime_exceptions.S (revision 1123a5e2f973dc9f0223467f4782f6b2df542620)
1/*
2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl31/ea_handle.h>
12#include <bl31/interrupt_mgmt.h>
13#include <common/runtime_svc.h>
14#include <context.h>
15#include <el3_common_macros.S>
16#include <lib/el3_runtime/cpu_data.h>
17#include <lib/smccc.h>
18
19	.globl	runtime_exceptions
20
21	.globl	sync_exception_sp_el0
22	.globl	irq_sp_el0
23	.globl	fiq_sp_el0
24	.globl	serror_sp_el0
25
26	.globl	sync_exception_sp_elx
27	.globl	irq_sp_elx
28	.globl	fiq_sp_elx
29	.globl	serror_sp_elx
30
31	.globl	sync_exception_aarch64
32	.globl	irq_aarch64
33	.globl	fiq_aarch64
34	.globl	serror_aarch64
35
36	.globl	sync_exception_aarch32
37	.globl	irq_aarch32
38	.globl	fiq_aarch32
39	.globl	serror_aarch32
40
41	/*
42	 * Macro that prepares entry to EL3 upon taking an exception.
43	 *
44	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
45	 * instruction. When an error is thus synchronized, the handling is
46	 * delegated to platform EA handler.
47	 *
48	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
49	 * Asynchronous External Aborts.
50	 */
51	.macro check_and_unmask_ea
52#if RAS_EXTENSION
53	/* Synchronize pending External Aborts */
54	esb
55
56	/* Unmask the SError interrupt */
57	msr	daifclr, #DAIF_ABT_BIT
58
59	/*
60	 * Explicitly save x30 so as to free up a register and to enable
61	 * branching
62	 */
63	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
64
65	/* Check for SErrors synchronized by the ESB instruction */
66	mrs	x30, DISR_EL1
67	tbz	x30, #DISR_A_BIT, 1f
68
69	/*
70	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
71	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
72	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
73	 */
74	bl	save_gp_pmcr_pauth_regs
75
76	bl	handle_lower_el_ea_esb
77
78	/* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */
79	bl	restore_gp_pmcr_pauth_regs
801:
81#else
82	/* Unmask the SError interrupt */
83	msr	daifclr, #DAIF_ABT_BIT
84
85	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
86#endif
87	.endm
88
89	/* ---------------------------------------------------------------------
90	 * This macro handles Synchronous exceptions.
91	 * Only SMC exceptions are supported.
92	 * ---------------------------------------------------------------------
93	 */
94	.macro	handle_sync_exception
95#if ENABLE_RUNTIME_INSTRUMENTATION
96	/*
97	 * Read the timestamp value and store it in per-cpu data. The value
98	 * will be extracted from per-cpu data by the C level SMC handler and
99	 * saved to the PMF timestamp region.
100	 */
101	mrs	x30, cntpct_el0
102	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
103	mrs	x29, tpidr_el3
104	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
105	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
106#endif
107
108	mrs	x30, esr_el3
109	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
110
111	/* Handle SMC exceptions separately from other synchronous exceptions */
112	cmp	x30, #EC_AARCH32_SMC
113	b.eq	smc_handler32
114
115	cmp	x30, #EC_AARCH64_SMC
116	b.eq	smc_handler64
117
118	/* Synchronous exceptions other than the above are assumed to be EA */
119	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
120	b	enter_lower_el_sync_ea
121	.endm
122
123
124	/* ---------------------------------------------------------------------
125	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
126	 * interrupts.
127	 * ---------------------------------------------------------------------
128	 */
129	.macro	handle_interrupt_exception label
130
131	/*
132	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
133	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
134	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
135	 */
136	bl	save_gp_pmcr_pauth_regs
137
138#if ENABLE_PAUTH
139	/* Load and program APIAKey firmware key */
140	bl	pauth_load_bl31_apiakey
141#endif
142
143	/* Save the EL3 system registers needed to return from this exception */
144	mrs	x0, spsr_el3
145	mrs	x1, elr_el3
146	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
147
148	/* Switch to the runtime stack i.e. SP_EL0 */
149	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
150	mov	x20, sp
151	msr	spsel, #MODE_SP_EL0
152	mov	sp, x2
153
154	/*
155	 * Find out whether this is a valid interrupt type.
156	 * If the interrupt controller reports a spurious interrupt then return
157	 * to where we came from.
158	 */
159	bl	plat_ic_get_pending_interrupt_type
160	cmp	x0, #INTR_TYPE_INVAL
161	b.eq	interrupt_exit_\label
162
163	/*
164	 * Get the registered handler for this interrupt type.
165	 * A NULL return value could be 'cause of the following conditions:
166	 *
167	 * a. An interrupt of a type was routed correctly but a handler for its
168	 *    type was not registered.
169	 *
170	 * b. An interrupt of a type was not routed correctly so a handler for
171	 *    its type was not registered.
172	 *
173	 * c. An interrupt of a type was routed correctly to EL3, but was
174	 *    deasserted before its pending state could be read. Another
175	 *    interrupt of a different type pended at the same time and its
176	 *    type was reported as pending instead. However, a handler for this
177	 *    type was not registered.
178	 *
179	 * a. and b. can only happen due to a programming error. The
180	 * occurrence of c. could be beyond the control of Trusted Firmware.
181	 * It makes sense to return from this exception instead of reporting an
182	 * error.
183	 */
184	bl	get_interrupt_type_handler
185	cbz	x0, interrupt_exit_\label
186	mov	x21, x0
187
188	mov	x0, #INTR_ID_UNAVAILABLE
189
190	/* Set the current security state in the 'flags' parameter */
191	mrs	x2, scr_el3
192	ubfx	x1, x2, #0, #1
193
194	/* Restore the reference to the 'handle' i.e. SP_EL3 */
195	mov	x2, x20
196
197	/* x3 will point to a cookie (not used now) */
198	mov	x3, xzr
199
200	/* Call the interrupt type handler */
201	blr	x21
202
203interrupt_exit_\label:
204	/* Return from exception, possibly in a different security state */
205	b	el3_exit
206
207	.endm
208
209
210vector_base runtime_exceptions
211
212	/* ---------------------------------------------------------------------
213	 * Current EL with SP_EL0 : 0x0 - 0x200
214	 * ---------------------------------------------------------------------
215	 */
216vector_entry sync_exception_sp_el0
217#ifdef MONITOR_TRAPS
218	stp x29, x30, [sp, #-16]!
219
220	mrs	x30, esr_el3
221	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
222
223	/* Check for BRK */
224	cmp	x30, #EC_BRK
225	b.eq	brk_handler
226
227	ldp x29, x30, [sp], #16
228#endif /* MONITOR_TRAPS */
229
230	/* We don't expect any synchronous exceptions from EL3 */
231	b	report_unhandled_exception
232end_vector_entry sync_exception_sp_el0
233
234vector_entry irq_sp_el0
235	/*
236	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
237	 * error. Loop infinitely.
238	 */
239	b	report_unhandled_interrupt
240end_vector_entry irq_sp_el0
241
242
243vector_entry fiq_sp_el0
244	b	report_unhandled_interrupt
245end_vector_entry fiq_sp_el0
246
247
248vector_entry serror_sp_el0
249	no_ret	plat_handle_el3_ea
250end_vector_entry serror_sp_el0
251
252	/* ---------------------------------------------------------------------
253	 * Current EL with SP_ELx: 0x200 - 0x400
254	 * ---------------------------------------------------------------------
255	 */
256vector_entry sync_exception_sp_elx
257	/*
258	 * This exception will trigger if anything went wrong during a previous
259	 * exception entry or exit or while handling an earlier unexpected
260	 * synchronous exception. There is a high probability that SP_EL3 is
261	 * corrupted.
262	 */
263	b	report_unhandled_exception
264end_vector_entry sync_exception_sp_elx
265
266vector_entry irq_sp_elx
267	b	report_unhandled_interrupt
268end_vector_entry irq_sp_elx
269
270vector_entry fiq_sp_elx
271	b	report_unhandled_interrupt
272end_vector_entry fiq_sp_elx
273
274vector_entry serror_sp_elx
275	no_ret	plat_handle_el3_ea
276end_vector_entry serror_sp_elx
277
278	/* ---------------------------------------------------------------------
279	 * Lower EL using AArch64 : 0x400 - 0x600
280	 * ---------------------------------------------------------------------
281	 */
282vector_entry sync_exception_aarch64
283	/*
284	 * This exception vector will be the entry point for SMCs and traps
285	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
286	 * to a valid cpu context where the general purpose and system register
287	 * state can be saved.
288	 */
289	apply_at_speculative_wa
290	check_and_unmask_ea
291	handle_sync_exception
292end_vector_entry sync_exception_aarch64
293
294vector_entry irq_aarch64
295	apply_at_speculative_wa
296	check_and_unmask_ea
297	handle_interrupt_exception irq_aarch64
298end_vector_entry irq_aarch64
299
300vector_entry fiq_aarch64
301	apply_at_speculative_wa
302	check_and_unmask_ea
303	handle_interrupt_exception fiq_aarch64
304end_vector_entry fiq_aarch64
305
306vector_entry serror_aarch64
307	apply_at_speculative_wa
308	msr	daifclr, #DAIF_ABT_BIT
309	b	enter_lower_el_async_ea
310end_vector_entry serror_aarch64
311
312	/* ---------------------------------------------------------------------
313	 * Lower EL using AArch32 : 0x600 - 0x800
314	 * ---------------------------------------------------------------------
315	 */
316vector_entry sync_exception_aarch32
317	/*
318	 * This exception vector will be the entry point for SMCs and traps
319	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
320	 * to a valid cpu context where the general purpose and system register
321	 * state can be saved.
322	 */
323	apply_at_speculative_wa
324	check_and_unmask_ea
325	handle_sync_exception
326end_vector_entry sync_exception_aarch32
327
328vector_entry irq_aarch32
329	apply_at_speculative_wa
330	check_and_unmask_ea
331	handle_interrupt_exception irq_aarch32
332end_vector_entry irq_aarch32
333
334vector_entry fiq_aarch32
335	apply_at_speculative_wa
336	check_and_unmask_ea
337	handle_interrupt_exception fiq_aarch32
338end_vector_entry fiq_aarch32
339
340vector_entry serror_aarch32
341	apply_at_speculative_wa
342	msr	daifclr, #DAIF_ABT_BIT
343	b	enter_lower_el_async_ea
344end_vector_entry serror_aarch32
345
346#ifdef MONITOR_TRAPS
347	.section .rodata.brk_string, "aS"
348brk_location:
349	.asciz "Error at instruction 0x"
350brk_message:
351	.asciz "Unexpected BRK instruction with value 0x"
352#endif /* MONITOR_TRAPS */
353
354	/* ---------------------------------------------------------------------
355	 * The following code handles secure monitor calls.
356	 * Depending upon the execution state from where the SMC has been
357	 * invoked, it frees some general purpose registers to perform the
358	 * remaining tasks. They involve finding the runtime service handler
359	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
360	 * before calling the handler.
361	 *
362	 * Note that x30 has been explicitly saved and can be used here
363	 * ---------------------------------------------------------------------
364	 */
365func smc_handler
366smc_handler32:
367	/* Check whether aarch32 issued an SMC64 */
368	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
369
370smc_handler64:
371	/* NOTE: The code below must preserve x0-x4 */
372
373	/*
374	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
375	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
376	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
377	 */
378	bl	save_gp_pmcr_pauth_regs
379
380#if ENABLE_PAUTH
381	/* Load and program APIAKey firmware key */
382	bl	pauth_load_bl31_apiakey
383#endif
384
385	/*
386	 * Populate the parameters for the SMC handler.
387	 * We already have x0-x4 in place. x5 will point to a cookie (not used
388	 * now). x6 will point to the context structure (SP_EL3) and x7 will
389	 * contain flags we need to pass to the handler.
390	 */
391	mov	x5, xzr
392	mov	x6, sp
393
394	/*
395	 * Restore the saved C runtime stack value which will become the new
396	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
397	 * structure prior to the last ERET from EL3.
398	 */
399	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
400
401	/* Switch to SP_EL0 */
402	msr	spsel, #MODE_SP_EL0
403
404	/*
405	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
406	 * switch during SMC handling.
407	 * TODO: Revisit if all system registers can be saved later.
408	 */
409	mrs	x16, spsr_el3
410	mrs	x17, elr_el3
411	mrs	x18, scr_el3
412	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
413	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
414
415	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
416	bfi	x7, x18, #0, #1
417
418	mov	sp, x12
419
420	/* Get the unique owning entity number */
421	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
422	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
423	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
424
425	/* Load descriptor index from array of indices */
426	adrp	x14, rt_svc_descs_indices
427	add	x14, x14, :lo12:rt_svc_descs_indices
428	ldrb	w15, [x14, x16]
429
430	/* Any index greater than 127 is invalid. Check bit 7. */
431	tbnz	w15, 7, smc_unknown
432
433	/*
434	 * Get the descriptor using the index
435	 * x11 = (base + off), w15 = index
436	 *
437	 * handler = (base + off) + (index << log2(size))
438	 */
439	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
440	lsl	w10, w15, #RT_SVC_SIZE_LOG2
441	ldr	x15, [x11, w10, uxtw]
442
443	/*
444	 * Call the Secure Monitor Call handler and then drop directly into
445	 * el3_exit() which will program any remaining architectural state
446	 * prior to issuing the ERET to the desired lower EL.
447	 */
448#if DEBUG
449	cbz	x15, rt_svc_fw_critical_error
450#endif
451	blr	x15
452
453	b	el3_exit
454
455smc_unknown:
456	/*
457	 * Unknown SMC call. Populate return value with SMC_UNK and call
458	 * el3_exit() which will restore the remaining architectural state
459	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
460         * to the desired lower EL.
461	 */
462	mov	x0, #SMC_UNK
463	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
464	b	el3_exit
465
466smc_prohibited:
467	restore_ptw_el1_sys_regs
468	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
469	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
470	mov	x0, #SMC_UNK
471	exception_return
472
473#if DEBUG
474rt_svc_fw_critical_error:
475	/* Switch to SP_ELx */
476	msr	spsel, #MODE_SP_ELX
477	no_ret	report_unhandled_exception
478#endif
479endfunc smc_handler
480
481	/* ---------------------------------------------------------------------
482	 * The following code handles exceptions caused by BRK instructions.
483	 * Following a BRK instruction, the only real valid cause of action is
484	 * to print some information and panic, as the code that caused it is
485	 * likely in an inconsistent internal state.
486	 *
487	 * This is initially intended to be used in conjunction with
488	 * __builtin_trap.
489	 * ---------------------------------------------------------------------
490	 */
491#ifdef MONITOR_TRAPS
492func brk_handler
493	/* Extract the ISS */
494	mrs	x10, esr_el3
495	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
496
497	/* Ensure the console is initialized */
498	bl	plat_crash_console_init
499
500	adr	x4, brk_location
501	bl	asm_print_str
502	mrs	x4, elr_el3
503	bl	asm_print_hex
504	bl	asm_print_newline
505
506	adr	x4, brk_message
507	bl	asm_print_str
508	mov	x4, x10
509	mov	x5, #28
510	bl	asm_print_hex_bits
511	bl	asm_print_newline
512
513	no_ret	plat_panic_handler
514endfunc brk_handler
515#endif /* MONITOR_TRAPS */
516