xref: /rk3399_ARM-atf/bl31/aarch64/runtime_exceptions.S (revision a5c4212f0514e01a1c427ff8ed5cb6968764e929)
14f6ad66aSAchin Gupta/*
23c789bfcSManish Pandey * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54f6ad66aSAchin Gupta */
64f6ad66aSAchin Gupta
709d40e0eSAntonio Nino Diaz#include <platform_def.h>
809d40e0eSAntonio Nino Diaz
94f6ad66aSAchin Gupta#include <arch.h>
1035e98e55SDan Handley#include <asm_macros.S>
1109d40e0eSAntonio Nino Diaz#include <bl31/ea_handle.h>
1209d40e0eSAntonio Nino Diaz#include <bl31/interrupt_mgmt.h>
13ccd81f1eSAndre Przywara#include <bl31/sync_handle.h>
1409d40e0eSAntonio Nino Diaz#include <common/runtime_svc.h>
1597043ac9SDan Handley#include <context.h>
166d22b089SManish Pandey#include <cpu_macros.S>
173b8456bdSManish V Badarkhe#include <el3_common_macros.S>
1809d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h>
1909d40e0eSAntonio Nino Diaz#include <lib/smccc.h>
204f6ad66aSAchin Gupta
214f6ad66aSAchin Gupta	.globl	runtime_exceptions
224f6ad66aSAchin Gupta
23f62ad322SDimitris Papastamos	.globl	sync_exception_sp_el0
24f62ad322SDimitris Papastamos	.globl	irq_sp_el0
25f62ad322SDimitris Papastamos	.globl	fiq_sp_el0
26f62ad322SDimitris Papastamos	.globl	serror_sp_el0
27f62ad322SDimitris Papastamos
28f62ad322SDimitris Papastamos	.globl	sync_exception_sp_elx
29f62ad322SDimitris Papastamos	.globl	irq_sp_elx
30f62ad322SDimitris Papastamos	.globl	fiq_sp_elx
31f62ad322SDimitris Papastamos	.globl	serror_sp_elx
32f62ad322SDimitris Papastamos
33f62ad322SDimitris Papastamos	.globl	sync_exception_aarch64
34f62ad322SDimitris Papastamos	.globl	irq_aarch64
35f62ad322SDimitris Papastamos	.globl	fiq_aarch64
36f62ad322SDimitris Papastamos	.globl	serror_aarch64
37f62ad322SDimitris Papastamos
38f62ad322SDimitris Papastamos	.globl	sync_exception_aarch32
39f62ad322SDimitris Papastamos	.globl	irq_aarch32
40f62ad322SDimitris Papastamos	.globl	fiq_aarch32
41f62ad322SDimitris Papastamos	.globl	serror_aarch32
42f62ad322SDimitris Papastamos
4376454abfSJeenu Viswambharan	/*
44d87c0e27SManish Pandey	 * Save LR and make x30 available as most of the routines in vector entry
45d87c0e27SManish Pandey	 * need a free register
46d87c0e27SManish Pandey	 */
47d87c0e27SManish Pandey	.macro save_x30
48d87c0e27SManish Pandey	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
49d87c0e27SManish Pandey	.endm
50d87c0e27SManish Pandey
51d04c04a4SManish Pandey	.macro restore_x30
52d04c04a4SManish Pandey	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
53d04c04a4SManish Pandey	.endm
5414c6016aSJeenu Viswambharan
55e290a8fcSAlexei Fedorov	/*
56d04c04a4SManish Pandey	 * Macro that synchronizes errors (EA) and checks for pending SError.
57d04c04a4SManish Pandey	 * On detecting a pending SError it either reflects it back to lower
58d04c04a4SManish Pandey	 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
59e290a8fcSAlexei Fedorov	 */
60d04c04a4SManish Pandey	.macro	sync_and_handle_pending_serror
616597fcf1SManish Pandey	synchronize_errors
62d04c04a4SManish Pandey	mrs	x30, ISR_EL1
63d04c04a4SManish Pandey	tbz	x30, #ISR_A_SHIFT, 2f
64f87e54f7SManish Pandey#if FFH_SUPPORT
65d04c04a4SManish Pandey	mrs	x30, scr_el3
66d04c04a4SManish Pandey	tst	x30, #SCR_EA_BIT
67d04c04a4SManish Pandey	b.eq	1f
68d04c04a4SManish Pandey	bl	handle_pending_async_ea
69d04c04a4SManish Pandey	b	2f
70c2d32a5fSMadhukar Pappireddy#endif
71d04c04a4SManish Pandey1:
72d04c04a4SManish Pandey	/* This function never returns, but need LR for decision making */
73d04c04a4SManish Pandey	bl	reflect_pending_async_ea_to_lower_el
74d04c04a4SManish Pandey2:
7576a91d87SManish Pandey	.endm
76c2d32a5fSMadhukar Pappireddy
77a6ef4393SDouglas Raillard	/* ---------------------------------------------------------------------
78a6ef4393SDouglas Raillard	 * This macro handles Synchronous exceptions.
79a6ef4393SDouglas Raillard	 * Only SMC exceptions are supported.
80a6ef4393SDouglas Raillard	 * ---------------------------------------------------------------------
81dce74b89SAchin Gupta	 */
82dce74b89SAchin Gupta	.macro	handle_sync_exception
83872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION
84872be88aSdp-arm	/*
85a6ef4393SDouglas Raillard	 * Read the timestamp value and store it in per-cpu data. The value
86a6ef4393SDouglas Raillard	 * will be extracted from per-cpu data by the C level SMC handler and
87a6ef4393SDouglas Raillard	 * saved to the PMF timestamp region.
88872be88aSdp-arm	 */
89872be88aSdp-arm	mrs	x30, cntpct_el0
90872be88aSdp-arm	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
91872be88aSdp-arm	mrs	x29, tpidr_el3
92872be88aSdp-arm	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
93872be88aSdp-arm	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
94872be88aSdp-arm#endif
95872be88aSdp-arm
96dce74b89SAchin Gupta	mrs	x30, esr_el3
97dce74b89SAchin Gupta	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
98dce74b89SAchin Gupta
99a6ef4393SDouglas Raillard	/* Handle SMC exceptions separately from other synchronous exceptions */
100dce74b89SAchin Gupta	cmp	x30, #EC_AARCH32_SMC
101dce74b89SAchin Gupta	b.eq	smc_handler32
102dce74b89SAchin Gupta
103dce74b89SAchin Gupta	cmp	x30, #EC_AARCH64_SMC
104ccd81f1eSAndre Przywara	b.eq	sync_handler64
105ccd81f1eSAndre Przywara
106ccd81f1eSAndre Przywara	cmp	x30, #EC_AARCH64_SYS
107ccd81f1eSAndre Przywara	b.eq	sync_handler64
108dce74b89SAchin Gupta
1096d22b089SManish Pandey	cmp	x30, #EC_IMP_DEF_EL3
1106d22b089SManish Pandey	b.eq	imp_def_el3_handler
1116d22b089SManish Pandey
1126d22b089SManish Pandey	/* If FFH Support then try to handle lower EL EA exceptions. */
1136d22b089SManish Pandey#if FFH_SUPPORT
1146d22b089SManish Pandey	mrs	x30, scr_el3
1156d22b089SManish Pandey	tst	x30, #SCR_EA_BIT
1166d22b089SManish Pandey	b.eq	1f
1176f7de9a8SManish Pandey	b	handle_lower_el_sync_ea
1186d22b089SManish Pandey#endif
1196d22b089SManish Pandey1:
1206d22b089SManish Pandey	/* Synchronous exceptions other than the above are unhandled */
1216d22b089SManish Pandey	b	report_unhandled_exception
122dce74b89SAchin Gupta	.endm
123dce74b89SAchin Gupta
124e0ae9fabSSandrine Bailleuxvector_base runtime_exceptions
125e0ae9fabSSandrine Bailleux
126a6ef4393SDouglas Raillard	/* ---------------------------------------------------------------------
127a6ef4393SDouglas Raillard	 * Current EL with SP_EL0 : 0x0 - 0x200
128a6ef4393SDouglas Raillard	 * ---------------------------------------------------------------------
1294f6ad66aSAchin Gupta	 */
130e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_el0
1311f461979SJustin Chadwell#ifdef MONITOR_TRAPS
1321f461979SJustin Chadwell	stp x29, x30, [sp, #-16]!
1331f461979SJustin Chadwell
1341f461979SJustin Chadwell	mrs	x30, esr_el3
1351f461979SJustin Chadwell	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
1361f461979SJustin Chadwell
1371f461979SJustin Chadwell	/* Check for BRK */
1381f461979SJustin Chadwell	cmp	x30, #EC_BRK
1391f461979SJustin Chadwell	b.eq	brk_handler
1401f461979SJustin Chadwell
1411f461979SJustin Chadwell	ldp x29, x30, [sp], #16
1421f461979SJustin Chadwell#endif /* MONITOR_TRAPS */
1431f461979SJustin Chadwell
144a6ef4393SDouglas Raillard	/* We don't expect any synchronous exceptions from EL3 */
1454d91838bSJulius Werner	b	report_unhandled_exception
146a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_el0
1474f6ad66aSAchin Gupta
148e0ae9fabSSandrine Bailleuxvector_entry irq_sp_el0
149a6ef4393SDouglas Raillard	/*
150a6ef4393SDouglas Raillard	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
151a6ef4393SDouglas Raillard	 * error. Loop infinitely.
152a6ef4393SDouglas Raillard	 */
1534d91838bSJulius Werner	b	report_unhandled_interrupt
154a9203edaSRoberto Vargasend_vector_entry irq_sp_el0
1554f6ad66aSAchin Gupta
156e0ae9fabSSandrine Bailleux
157e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_el0
1584d91838bSJulius Werner	b	report_unhandled_interrupt
159a9203edaSRoberto Vargasend_vector_entry fiq_sp_el0
1604f6ad66aSAchin Gupta
161e0ae9fabSSandrine Bailleux
162e0ae9fabSSandrine Bailleuxvector_entry serror_sp_el0
163eaeaa4d0SJeenu Viswambharan	no_ret	plat_handle_el3_ea
164a9203edaSRoberto Vargasend_vector_entry serror_sp_el0
1654f6ad66aSAchin Gupta
166a6ef4393SDouglas Raillard	/* ---------------------------------------------------------------------
167a6ef4393SDouglas Raillard	 * Current EL with SP_ELx: 0x200 - 0x400
168a6ef4393SDouglas Raillard	 * ---------------------------------------------------------------------
1694f6ad66aSAchin Gupta	 */
170e0ae9fabSSandrine Bailleuxvector_entry sync_exception_sp_elx
171a6ef4393SDouglas Raillard	/*
172a6ef4393SDouglas Raillard	 * This exception will trigger if anything went wrong during a previous
173a6ef4393SDouglas Raillard	 * exception entry or exit or while handling an earlier unexpected
174a6ef4393SDouglas Raillard	 * synchronous exception. There is a high probability that SP_EL3 is
175a6ef4393SDouglas Raillard	 * corrupted.
176caa84939SJeenu Viswambharan	 */
1774d91838bSJulius Werner	b	report_unhandled_exception
178a9203edaSRoberto Vargasend_vector_entry sync_exception_sp_elx
1794f6ad66aSAchin Gupta
180e0ae9fabSSandrine Bailleuxvector_entry irq_sp_elx
1814d91838bSJulius Werner	b	report_unhandled_interrupt
182a9203edaSRoberto Vargasend_vector_entry irq_sp_elx
183a7934d69SJeenu Viswambharan
184e0ae9fabSSandrine Bailleuxvector_entry fiq_sp_elx
1854d91838bSJulius Werner	b	report_unhandled_interrupt
186a9203edaSRoberto Vargasend_vector_entry fiq_sp_elx
187a7934d69SJeenu Viswambharan
188e0ae9fabSSandrine Bailleuxvector_entry serror_sp_elx
189f87e54f7SManish Pandey#if FFH_SUPPORT
19076a91d87SManish Pandey	/*
19176a91d87SManish Pandey	 * This will trigger if the exception was taken due to SError in EL3 or
19276a91d87SManish Pandey	 * because of pending asynchronous external aborts from lower EL that got
193d04c04a4SManish Pandey	 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
194d04c04a4SManish Pandey	 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
195d04c04a4SManish Pandey	 * The later case will occur when PSTATE.A bit is cleared in
196d04c04a4SManish Pandey	 * "handle_pending_async_ea". This means we are doing a nested
197d04c04a4SManish Pandey	 * exception in EL3. Call the handler for async EA which will eret back to
198d04c04a4SManish Pandey	 * original el3 handler if it is nested exception. Also, unmask EA so that we
199d04c04a4SManish Pandey	 * catch any further EA arise when handling this nested exception at EL3.
20076a91d87SManish Pandey	 */
201d87c0e27SManish Pandey	save_x30
202d04c04a4SManish Pandey	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
203d04c04a4SManish Pandey	cbz	x30, 1f
204d04c04a4SManish Pandey	/*
205d04c04a4SManish Pandey	 * This is nested exception handling, clear the flag to avoid taking this
206d04c04a4SManish Pandey	 * path for further exceptions caused by EA handling
207d04c04a4SManish Pandey	 */
208d04c04a4SManish Pandey	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
209d04c04a4SManish Pandey	unmask_async_ea
21076a91d87SManish Pandey	b	handle_lower_el_async_ea
21176a91d87SManish Pandey1:
212d04c04a4SManish Pandey	restore_x30
213c2d32a5fSMadhukar Pappireddy#endif
214eaeaa4d0SJeenu Viswambharan	no_ret	plat_handle_el3_ea
215d04c04a4SManish Pandey
216a9203edaSRoberto Vargasend_vector_entry serror_sp_elx
2174f6ad66aSAchin Gupta
218a6ef4393SDouglas Raillard	/* ---------------------------------------------------------------------
21944804252SSandrine Bailleux	 * Lower EL using AArch64 : 0x400 - 0x600
220a6ef4393SDouglas Raillard	 * ---------------------------------------------------------------------
2214f6ad66aSAchin Gupta	 */
222e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch64
223a6ef4393SDouglas Raillard	/*
224a6ef4393SDouglas Raillard	 * This exception vector will be the entry point for SMCs and traps
225a6ef4393SDouglas Raillard	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
226a6ef4393SDouglas Raillard	 * to a valid cpu context where the general purpose and system register
227a6ef4393SDouglas Raillard	 * state can be saved.
228caa84939SJeenu Viswambharan	 */
229d87c0e27SManish Pandey	save_x30
2303b8456bdSManish V Badarkhe	apply_at_speculative_wa
231d04c04a4SManish Pandey	sync_and_handle_pending_serror
232d04c04a4SManish Pandey	unmask_async_ea
233caa84939SJeenu Viswambharan	handle_sync_exception
234a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch64
2354f6ad66aSAchin Gupta
236e0ae9fabSSandrine Bailleuxvector_entry irq_aarch64
237d87c0e27SManish Pandey	save_x30
2383b8456bdSManish V Badarkhe	apply_at_speculative_wa
239d04c04a4SManish Pandey	sync_and_handle_pending_serror
240d04c04a4SManish Pandey	unmask_async_ea
2413991b889SManish Pandey	b	handle_interrupt_exception
242a9203edaSRoberto Vargasend_vector_entry irq_aarch64
2434f6ad66aSAchin Gupta
244e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch64
245d87c0e27SManish Pandey	save_x30
2463b8456bdSManish V Badarkhe	apply_at_speculative_wa
247d04c04a4SManish Pandey	sync_and_handle_pending_serror
248d04c04a4SManish Pandey	unmask_async_ea
2493991b889SManish Pandey	b 	handle_interrupt_exception
250a9203edaSRoberto Vargasend_vector_entry fiq_aarch64
2514f6ad66aSAchin Gupta
252d04c04a4SManish Pandey	/*
253d04c04a4SManish Pandey	 * Need to synchronize any outstanding SError since we can get a burst of errors.
254d04c04a4SManish Pandey	 * So reuse the sync mechanism to catch any further errors which are pending.
255d04c04a4SManish Pandey	 */
256e0ae9fabSSandrine Bailleuxvector_entry serror_aarch64
2576d22b089SManish Pandey#if FFH_SUPPORT
258d87c0e27SManish Pandey	save_x30
2593b8456bdSManish V Badarkhe	apply_at_speculative_wa
260d04c04a4SManish Pandey	sync_and_handle_pending_serror
261d04c04a4SManish Pandey	unmask_async_ea
2626f7de9a8SManish Pandey	b	handle_lower_el_async_ea
2636d22b089SManish Pandey#else
2646d22b089SManish Pandey	b	report_unhandled_exception
2656d22b089SManish Pandey#endif
266a9203edaSRoberto Vargasend_vector_entry serror_aarch64
2674f6ad66aSAchin Gupta
268a6ef4393SDouglas Raillard	/* ---------------------------------------------------------------------
26944804252SSandrine Bailleux	 * Lower EL using AArch32 : 0x600 - 0x800
270a6ef4393SDouglas Raillard	 * ---------------------------------------------------------------------
2714f6ad66aSAchin Gupta	 */
272e0ae9fabSSandrine Bailleuxvector_entry sync_exception_aarch32
273a6ef4393SDouglas Raillard	/*
274a6ef4393SDouglas Raillard	 * This exception vector will be the entry point for SMCs and traps
275a6ef4393SDouglas Raillard	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
276a6ef4393SDouglas Raillard	 * to a valid cpu context where the general purpose and system register
277a6ef4393SDouglas Raillard	 * state can be saved.
278caa84939SJeenu Viswambharan	 */
279d87c0e27SManish Pandey	save_x30
2803b8456bdSManish V Badarkhe	apply_at_speculative_wa
281d04c04a4SManish Pandey	sync_and_handle_pending_serror
282d04c04a4SManish Pandey	unmask_async_ea
283caa84939SJeenu Viswambharan	handle_sync_exception
284a9203edaSRoberto Vargasend_vector_entry sync_exception_aarch32
2854f6ad66aSAchin Gupta
286e0ae9fabSSandrine Bailleuxvector_entry irq_aarch32
287d87c0e27SManish Pandey	save_x30
2883b8456bdSManish V Badarkhe	apply_at_speculative_wa
289d04c04a4SManish Pandey	sync_and_handle_pending_serror
290d04c04a4SManish Pandey	unmask_async_ea
2913991b889SManish Pandey	b	handle_interrupt_exception
292a9203edaSRoberto Vargasend_vector_entry irq_aarch32
2934f6ad66aSAchin Gupta
294e0ae9fabSSandrine Bailleuxvector_entry fiq_aarch32
295d87c0e27SManish Pandey	save_x30
2963b8456bdSManish V Badarkhe	apply_at_speculative_wa
297d04c04a4SManish Pandey	sync_and_handle_pending_serror
298d04c04a4SManish Pandey	unmask_async_ea
2993991b889SManish Pandey	b	handle_interrupt_exception
300a9203edaSRoberto Vargasend_vector_entry fiq_aarch32
3014f6ad66aSAchin Gupta
302d04c04a4SManish Pandey	/*
303d04c04a4SManish Pandey	 * Need to synchronize any outstanding SError since we can get a burst of errors.
304d04c04a4SManish Pandey	 * So reuse the sync mechanism to catch any further errors which are pending.
305d04c04a4SManish Pandey	 */
306e0ae9fabSSandrine Bailleuxvector_entry serror_aarch32
3076d22b089SManish Pandey#if FFH_SUPPORT
308d87c0e27SManish Pandey	save_x30
3093b8456bdSManish V Badarkhe	apply_at_speculative_wa
310d04c04a4SManish Pandey	sync_and_handle_pending_serror
311d04c04a4SManish Pandey	unmask_async_ea
3126f7de9a8SManish Pandey	b	handle_lower_el_async_ea
3136d22b089SManish Pandey#else
3146d22b089SManish Pandey	b	report_unhandled_exception
3156d22b089SManish Pandey#endif
316a9203edaSRoberto Vargasend_vector_entry serror_aarch32
317a7934d69SJeenu Viswambharan
3181f461979SJustin Chadwell#ifdef MONITOR_TRAPS
3191f461979SJustin Chadwell	.section .rodata.brk_string, "aS"
3201f461979SJustin Chadwellbrk_location:
3211f461979SJustin Chadwell	.asciz "Error at instruction 0x"
3221f461979SJustin Chadwellbrk_message:
3231f461979SJustin Chadwell	.asciz "Unexpected BRK instruction with value 0x"
3241f461979SJustin Chadwell#endif /* MONITOR_TRAPS */
3251f461979SJustin Chadwell
3262f370465SAntonio Nino Diaz	/* ---------------------------------------------------------------------
327caa84939SJeenu Viswambharan	 * The following code handles secure monitor calls.
328a6ef4393SDouglas Raillard	 * Depending upon the execution state from where the SMC has been
329a6ef4393SDouglas Raillard	 * invoked, it frees some general purpose registers to perform the
330a6ef4393SDouglas Raillard	 * remaining tasks. They involve finding the runtime service handler
331a6ef4393SDouglas Raillard	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
332a6ef4393SDouglas Raillard	 * before calling the handler.
333caa84939SJeenu Viswambharan	 *
334a6ef4393SDouglas Raillard	 * Note that x30 has been explicitly saved and can be used here
335a6ef4393SDouglas Raillard	 * ---------------------------------------------------------------------
336caa84939SJeenu Viswambharan	 */
337ccd81f1eSAndre Przywarafunc sync_exception_handler
338caa84939SJeenu Viswambharansmc_handler32:
339caa84939SJeenu Viswambharan	/* Check whether aarch32 issued an SMC64 */
340caa84939SJeenu Viswambharan	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
341caa84939SJeenu Viswambharan
342ccd81f1eSAndre Przywarasync_handler64:
3435283962eSAntonio Nino Diaz	/* NOTE: The code below must preserve x0-x4 */
3445283962eSAntonio Nino Diaz
345e290a8fcSAlexei Fedorov	/*
346ed108b56SAlexei Fedorov	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
3471d6d6802SBoyan Karatotev	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
348e290a8fcSAlexei Fedorov	 */
34997215e0fSDaniel Boulby	bl	prepare_el3_entry
350e290a8fcSAlexei Fedorov
351b86048c4SAntonio Nino Diaz#if ENABLE_PAUTH
352ed108b56SAlexei Fedorov	/* Load and program APIAKey firmware key */
353ed108b56SAlexei Fedorov	bl	pauth_load_bl31_apiakey
354b86048c4SAntonio Nino Diaz#endif
3555283962eSAntonio Nino Diaz
356a6ef4393SDouglas Raillard	/*
357a6ef4393SDouglas Raillard	 * Populate the parameters for the SMC handler.
358a6ef4393SDouglas Raillard	 * We already have x0-x4 in place. x5 will point to a cookie (not used
359a6ef4393SDouglas Raillard	 * now). x6 will point to the context structure (SP_EL3) and x7 will
360201ca5b6SDimitris Papastamos	 * contain flags we need to pass to the handler.
361caa84939SJeenu Viswambharan	 */
362caa84939SJeenu Viswambharan	mov	x5, xzr
363caa84939SJeenu Viswambharan	mov	x6, sp
364caa84939SJeenu Viswambharan
365a6ef4393SDouglas Raillard	/*
366a6ef4393SDouglas Raillard	 * Restore the saved C runtime stack value which will become the new
367a6ef4393SDouglas Raillard	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
368a6ef4393SDouglas Raillard	 * structure prior to the last ERET from EL3.
369caa84939SJeenu Viswambharan	 */
370caa84939SJeenu Viswambharan	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
371caa84939SJeenu Viswambharan
372caa84939SJeenu Viswambharan	/* Switch to SP_EL0 */
373ed108b56SAlexei Fedorov	msr	spsel, #MODE_SP_EL0
374caa84939SJeenu Viswambharan
375a6ef4393SDouglas Raillard	/*
376e61713b0SManish Pandey	 * Save the SPSR_EL3 and ELR_EL3 in case there is a world
377a6ef4393SDouglas Raillard	 * switch during SMC handling.
378a6ef4393SDouglas Raillard	 * TODO: Revisit if all system registers can be saved later.
379caa84939SJeenu Viswambharan	 */
380caa84939SJeenu Viswambharan	mrs	x16, spsr_el3
381caa84939SJeenu Viswambharan	mrs	x17, elr_el3
382caa84939SJeenu Viswambharan	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
383e61713b0SManish Pandey
384e61713b0SManish Pandey	/* Load SCR_EL3 */
385e61713b0SManish Pandey	mrs	x18, scr_el3
386caa84939SJeenu Viswambharan
387ccd81f1eSAndre Przywara	/* check for system register traps */
388ccd81f1eSAndre Przywara	mrs	x16, esr_el3
389ccd81f1eSAndre Przywara	ubfx	x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
390ccd81f1eSAndre Przywara	cmp	x17, #EC_AARCH64_SYS
391ccd81f1eSAndre Przywara	b.eq	sysreg_handler64
392ccd81f1eSAndre Przywara
3934693ff72SZelalem Aweke	/* Clear flag register */
3944693ff72SZelalem Aweke	mov	x7, xzr
3954693ff72SZelalem Aweke
3964693ff72SZelalem Aweke#if ENABLE_RME
3974693ff72SZelalem Aweke	/* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
398461c0a5dSElizabeth Ho	ubfx	x7, x18, #SCR_NSE_SHIFT, #1
3994693ff72SZelalem Aweke
4004693ff72SZelalem Aweke	/*
4014693ff72SZelalem Aweke	 * Shift copied SCR_EL3.NSE bit by 5 to create space for
4020fe7b9f2SOlivier Deprez	 * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
4034693ff72SZelalem Aweke	 * the SCR_EL3.NSE bit.
4044693ff72SZelalem Aweke	 */
4054693ff72SZelalem Aweke	lsl	x7, x7, #5
4064693ff72SZelalem Aweke#endif /* ENABLE_RME */
4074693ff72SZelalem Aweke
408caa84939SJeenu Viswambharan	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
409caa84939SJeenu Viswambharan	bfi	x7, x18, #0, #1
410caa84939SJeenu Viswambharan
411f8a35797SJayanth Dodderi Chidanand	mov	sp, x12
412f8a35797SJayanth Dodderi Chidanand
413f8a35797SJayanth Dodderi Chidanand	/*
414f8a35797SJayanth Dodderi Chidanand	 * Per SMCCC documentation, bits [23:17] must be zero for Fast
415f8a35797SJayanth Dodderi Chidanand	 * SMCs. Other values are reserved for future use. Ensure that
416f8a35797SJayanth Dodderi Chidanand	 * these bits are zeroes, if not report as unknown SMC.
417f8a35797SJayanth Dodderi Chidanand	 */
418f8a35797SJayanth Dodderi Chidanand	tbz	x0, #FUNCID_TYPE_SHIFT, 2f  /* Skip check if its a Yield Call*/
419f8a35797SJayanth Dodderi Chidanand	tst	x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
420f8a35797SJayanth Dodderi Chidanand	b.ne	smc_unknown
421f8a35797SJayanth Dodderi Chidanand
4220fe7b9f2SOlivier Deprez	/*
4230fe7b9f2SOlivier Deprez	 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
4240fe7b9f2SOlivier Deprez	 * passed through x0. Copy the SVE hint bit to flags and mask the
4250fe7b9f2SOlivier Deprez	 * bit in smc_fid passed to the standard service dispatcher.
4260fe7b9f2SOlivier Deprez	 * A service/dispatcher can retrieve the SVE hint bit state from
4270fe7b9f2SOlivier Deprez	 * flags using the appropriate helper.
4280fe7b9f2SOlivier Deprez	 */
429f8a35797SJayanth Dodderi Chidanand2:
430b2d85178SOlivier Deprez	and	x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
431b2d85178SOlivier Deprez	orr	x7, x7, x16
4320fe7b9f2SOlivier Deprez	bic	x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
4330fe7b9f2SOlivier Deprez
434cc485e27SMadhukar Pappireddy	/* Get the unique owning entity number */
435cc485e27SMadhukar Pappireddy	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
436cc485e27SMadhukar Pappireddy	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
437cc485e27SMadhukar Pappireddy	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
438cc485e27SMadhukar Pappireddy
439cc485e27SMadhukar Pappireddy	/* Load descriptor index from array of indices */
440c367b75eSMadhukar Pappireddy	adrp	x14, rt_svc_descs_indices
441c367b75eSMadhukar Pappireddy	add	x14, x14, :lo12:rt_svc_descs_indices
442cc485e27SMadhukar Pappireddy	ldrb	w15, [x14, x16]
443cc485e27SMadhukar Pappireddy
444cc485e27SMadhukar Pappireddy	/* Any index greater than 127 is invalid. Check bit 7. */
445cc485e27SMadhukar Pappireddy	tbnz	w15, 7, smc_unknown
446cc485e27SMadhukar Pappireddy
447cc485e27SMadhukar Pappireddy	/*
448cc485e27SMadhukar Pappireddy	 * Get the descriptor using the index
449cc485e27SMadhukar Pappireddy	 * x11 = (base + off), w15 = index
450cc485e27SMadhukar Pappireddy	 *
451cc485e27SMadhukar Pappireddy	 * handler = (base + off) + (index << log2(size))
452cc485e27SMadhukar Pappireddy	 */
453*a5c4212fSHsin-Hsiung Wang	adr_l	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
454cc485e27SMadhukar Pappireddy	lsl	w10, w15, #RT_SVC_SIZE_LOG2
455cc485e27SMadhukar Pappireddy	ldr	x15, [x11, w10, uxtw]
456cc485e27SMadhukar Pappireddy
457a6ef4393SDouglas Raillard	/*
458a6ef4393SDouglas Raillard	 * Call the Secure Monitor Call handler and then drop directly into
459a6ef4393SDouglas Raillard	 * el3_exit() which will program any remaining architectural state
460a6ef4393SDouglas Raillard	 * prior to issuing the ERET to the desired lower EL.
461caa84939SJeenu Viswambharan	 */
462caa84939SJeenu Viswambharan#if DEBUG
463caa84939SJeenu Viswambharan	cbz	x15, rt_svc_fw_critical_error
464caa84939SJeenu Viswambharan#endif
465caa84939SJeenu Viswambharan	blr	x15
466caa84939SJeenu Viswambharan
467bbf8f6f9SYatharth Kochar	b	el3_exit
4684f6ad66aSAchin Gupta
469ccd81f1eSAndre Przywarasysreg_handler64:
470ccd81f1eSAndre Przywara	mov	x0, x16		/* ESR_EL3, containing syndrome information */
471ccd81f1eSAndre Przywara	mov	x1, x6		/* lower EL's context */
472ccd81f1eSAndre Przywara	mov	x19, x6		/* save context pointer for after the call */
473ccd81f1eSAndre Przywara	mov	sp, x12		/* EL3 runtime stack, as loaded above */
474ccd81f1eSAndre Przywara
475ccd81f1eSAndre Przywara	/* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
476ccd81f1eSAndre Przywara	bl	handle_sysreg_trap
477ccd81f1eSAndre Przywara	/*
478ccd81f1eSAndre Przywara	 * returns:
4793c789bfcSManish Pandey	 *   -1: unhandled trap, UNDEF injection into lower EL
480ccd81f1eSAndre Przywara	 *    0: handled trap, return to the trapping instruction (repeating it)
481ccd81f1eSAndre Przywara	 *    1: handled trap, return to the next instruction
482ccd81f1eSAndre Przywara	 */
483ccd81f1eSAndre Przywara
484ccd81f1eSAndre Przywara	tst	w0, w0
4853c789bfcSManish Pandey	b.mi	2f	/* negative: undefined exception injection */
486ccd81f1eSAndre Przywara
4873c789bfcSManish Pandey	b.eq	1f	/* zero: do not change ELR_EL3 */
4883c789bfcSManish Pandey	/* positive: advance the PC to continue after the instruction */
489ccd81f1eSAndre Przywara	ldr	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
490ccd81f1eSAndre Przywara	add	x1, x1, #4
491ccd81f1eSAndre Przywara	str	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
492ccd81f1eSAndre Przywara1:
493ccd81f1eSAndre Przywara	b	el3_exit
4943c789bfcSManish Pandey2:
4953c789bfcSManish Pandey	/*
4963c789bfcSManish Pandey	 * UNDEF injection to lower EL, the support is only provided for lower
4973c789bfcSManish Pandey	 * EL in AArch64 mode, for AArch32 mode it will do elx_panic as before.
4983c789bfcSManish Pandey	 */
4993c789bfcSManish Pandey	mrs	x0, spsr_el3
5003c789bfcSManish Pandey	tst	x0, #(SPSR_M_MASK << SPSR_M_SHIFT)
5013c789bfcSManish Pandey	b.ne	elx_panic
5023c789bfcSManish Pandey	/* Pass context pointer as an argument to inject_undef64 */
5033c789bfcSManish Pandey	mov	x0, x19
5043c789bfcSManish Pandey	bl	inject_undef64
5053c789bfcSManish Pandey	b	el3_exit
506ccd81f1eSAndre Przywara
507caa84939SJeenu Viswambharansmc_unknown:
508caa84939SJeenu Viswambharan	/*
509cc485e27SMadhukar Pappireddy	 * Unknown SMC call. Populate return value with SMC_UNK and call
510cc485e27SMadhukar Pappireddy	 * el3_exit() which will restore the remaining architectural state
511cc485e27SMadhukar Pappireddy	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
512cc485e27SMadhukar Pappireddy	 * to the desired lower EL.
513caa84939SJeenu Viswambharan	 */
5144abd7fa7SAntonio Nino Diaz	mov	x0, #SMC_UNK
515cc485e27SMadhukar Pappireddy	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
516cc485e27SMadhukar Pappireddy	b	el3_exit
517caa84939SJeenu Viswambharan
518caa84939SJeenu Viswambharansmc_prohibited:
5193b8456bdSManish V Badarkhe	restore_ptw_el1_sys_regs
5203b8456bdSManish V Badarkhe	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
521c3260f9bSSoby Mathew	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
5224abd7fa7SAntonio Nino Diaz	mov	x0, #SMC_UNK
523f461fe34SAnthony Steinhauser	exception_return
524caa84939SJeenu Viswambharan
525ed108b56SAlexei Fedorov#if DEBUG
526caa84939SJeenu Viswambharanrt_svc_fw_critical_error:
527a6ef4393SDouglas Raillard	/* Switch to SP_ELx */
528ed108b56SAlexei Fedorov	msr	spsel, #MODE_SP_ELX
529a806dad5SJeenu Viswambharan	no_ret	report_unhandled_exception
530ed108b56SAlexei Fedorov#endif
531ccd81f1eSAndre Przywaraendfunc sync_exception_handler
5321f461979SJustin Chadwell
5331f461979SJustin Chadwell	/* ---------------------------------------------------------------------
5343991b889SManish Pandey	 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
5353991b889SManish Pandey	 * interrupts.
5363991b889SManish Pandey	 *
5373991b889SManish Pandey	 * Note that x30 has been explicitly saved and can be used here
5383991b889SManish Pandey	 * ---------------------------------------------------------------------
5393991b889SManish Pandey	 */
5403991b889SManish Pandeyfunc handle_interrupt_exception
5413991b889SManish Pandey	/*
5423991b889SManish Pandey	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
5433991b889SManish Pandey	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
5443991b889SManish Pandey	 */
5453991b889SManish Pandey	bl	prepare_el3_entry
5463991b889SManish Pandey
5473991b889SManish Pandey#if ENABLE_PAUTH
5483991b889SManish Pandey	/* Load and program APIAKey firmware key */
5493991b889SManish Pandey	bl	pauth_load_bl31_apiakey
5503991b889SManish Pandey#endif
5513991b889SManish Pandey
5523991b889SManish Pandey	/* Save the EL3 system registers needed to return from this exception */
5533991b889SManish Pandey	mrs	x0, spsr_el3
5543991b889SManish Pandey	mrs	x1, elr_el3
5553991b889SManish Pandey	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
5563991b889SManish Pandey
5573991b889SManish Pandey	/* Switch to the runtime stack i.e. SP_EL0 */
5583991b889SManish Pandey	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
5593991b889SManish Pandey	mov	x20, sp
5603991b889SManish Pandey	msr	spsel, #MODE_SP_EL0
5613991b889SManish Pandey	mov	sp, x2
5623991b889SManish Pandey
5633991b889SManish Pandey	/*
5643991b889SManish Pandey	 * Find out whether this is a valid interrupt type.
5653991b889SManish Pandey	 * If the interrupt controller reports a spurious interrupt then return
5663991b889SManish Pandey	 * to where we came from.
5673991b889SManish Pandey	 */
5683991b889SManish Pandey	bl	plat_ic_get_pending_interrupt_type
5693991b889SManish Pandey	cmp	x0, #INTR_TYPE_INVAL
5703991b889SManish Pandey	b.eq	interrupt_exit
5713991b889SManish Pandey
5723991b889SManish Pandey	/*
5733991b889SManish Pandey	 * Get the registered handler for this interrupt type.
5743991b889SManish Pandey	 * A NULL return value could be 'cause of the following conditions:
5753991b889SManish Pandey	 *
5763991b889SManish Pandey	 * a. An interrupt of a type was routed correctly but a handler for its
5773991b889SManish Pandey	 *    type was not registered.
5783991b889SManish Pandey	 *
5793991b889SManish Pandey	 * b. An interrupt of a type was not routed correctly so a handler for
5803991b889SManish Pandey	 *    its type was not registered.
5813991b889SManish Pandey	 *
5823991b889SManish Pandey	 * c. An interrupt of a type was routed correctly to EL3, but was
5833991b889SManish Pandey	 *    deasserted before its pending state could be read. Another
5843991b889SManish Pandey	 *    interrupt of a different type pended at the same time and its
5853991b889SManish Pandey	 *    type was reported as pending instead. However, a handler for this
5863991b889SManish Pandey	 *    type was not registered.
5873991b889SManish Pandey	 *
5883991b889SManish Pandey	 * a. and b. can only happen due to a programming error. The
5893991b889SManish Pandey	 * occurrence of c. could be beyond the control of Trusted Firmware.
5903991b889SManish Pandey	 * It makes sense to return from this exception instead of reporting an
5913991b889SManish Pandey	 * error.
5923991b889SManish Pandey	 */
5933991b889SManish Pandey	bl	get_interrupt_type_handler
5943991b889SManish Pandey	cbz	x0, interrupt_exit
5953991b889SManish Pandey	mov	x21, x0
5963991b889SManish Pandey
5973991b889SManish Pandey	mov	x0, #INTR_ID_UNAVAILABLE
5983991b889SManish Pandey
5993991b889SManish Pandey	/* Set the current security state in the 'flags' parameter */
6003991b889SManish Pandey	mrs	x2, scr_el3
6013991b889SManish Pandey	ubfx	x1, x2, #0, #1
6023991b889SManish Pandey
6033991b889SManish Pandey	/* Restore the reference to the 'handle' i.e. SP_EL3 */
6043991b889SManish Pandey	mov	x2, x20
6053991b889SManish Pandey
6063991b889SManish Pandey	/* x3 will point to a cookie (not used now) */
6073991b889SManish Pandey	mov	x3, xzr
6083991b889SManish Pandey
6093991b889SManish Pandey	/* Call the interrupt type handler */
6103991b889SManish Pandey	blr	x21
6113991b889SManish Pandey
6123991b889SManish Pandeyinterrupt_exit:
6133991b889SManish Pandey	/* Return from exception, possibly in a different security state */
6143991b889SManish Pandey	b	el3_exit
6153991b889SManish Pandeyendfunc handle_interrupt_exception
6163991b889SManish Pandey
6176d22b089SManish Pandeyfunc imp_def_el3_handler
6186d22b089SManish Pandey	/* Save GP registers */
6196d22b089SManish Pandey	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
6206d22b089SManish Pandey	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
6216d22b089SManish Pandey	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
6226d22b089SManish Pandey
6236d22b089SManish Pandey	/* Get the cpu_ops pointer */
6246d22b089SManish Pandey	bl	get_cpu_ops_ptr
6256d22b089SManish Pandey
6266d22b089SManish Pandey	/* Get the cpu_ops exception handler */
6276d22b089SManish Pandey	ldr	x0, [x0, #CPU_E_HANDLER_FUNC]
6286d22b089SManish Pandey
6296d22b089SManish Pandey	/*
6306d22b089SManish Pandey	 * If the reserved function pointer is NULL, this CPU does not have an
6316d22b089SManish Pandey	 * implementation defined exception handler function
6326d22b089SManish Pandey	 */
6336d22b089SManish Pandey	cbz	x0, el3_handler_exit
6346d22b089SManish Pandey	mrs	x1, esr_el3
6356d22b089SManish Pandey	ubfx	x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
6366d22b089SManish Pandey	blr	x0
6376d22b089SManish Pandeyel3_handler_exit:
6386d22b089SManish Pandey	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
6396d22b089SManish Pandey	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
6406d22b089SManish Pandey	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
6416d22b089SManish Pandey	restore_x30
6426d22b089SManish Pandey	no_ret	report_unhandled_exception
6436d22b089SManish Pandeyendfunc imp_def_el3_handler
6446d22b089SManish Pandey
6456d22b089SManish Pandey/*
6466d22b089SManish Pandey * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
6476d22b089SManish Pandey *
6486d22b089SManish Pandey * This scenario may arise when there is an error (EA) in the system which is not
6496d22b089SManish Pandey * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
6506d22b089SManish Pandey * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
6516d22b089SManish Pandey *
6526d22b089SManish Pandey * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
6536d22b089SManish Pandey * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
6546d22b089SManish Pandey *
6556d22b089SManish Pandey * This function assumes x30 has been saved.
6566d22b089SManish Pandey */
6576d22b089SManish Pandeyfunc reflect_pending_async_ea_to_lower_el
6586d22b089SManish Pandey	/*
6596d22b089SManish Pandey	 * As the original exception was not handled we need to ensure that we return
6606d22b089SManish Pandey	 * back to the instruction which caused the exception. To acheive that, eret
6616d22b089SManish Pandey	 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
6626d22b089SManish Pandey	 * (Label "skip_smc_check").
6636d22b089SManish Pandey	 *
6646d22b089SManish Pandey	 * LIMITATION: It could be that async EA is masked at the target exception level
6656d22b089SManish Pandey	 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
6666d22b089SManish Pandey	 * causes back and forth between lower EL and EL3. In case of back and forth between
6676d22b089SManish Pandey	 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
6686d22b089SManish Pandey	 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
6696d22b089SManish Pandey	 * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
6706d22b089SManish Pandey	 * counter retains its value but if we do a normal el3_exit this flag gets cleared.
6716d22b089SManish Pandey	 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
6726d22b089SManish Pandey	 * as per AArch64.TakeException pseudo code in Arm ARM.
6736d22b089SManish Pandey	 *
6746d22b089SManish Pandey	 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
6756d22b089SManish Pandey	 * ELs, we can remove the el3_panic and handle the original exception first and
6766d22b089SManish Pandey	 * inject SError to lower EL before ereting back.
6776d22b089SManish Pandey	 */
6786d22b089SManish Pandey	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
6796d22b089SManish Pandey	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
6806d22b089SManish Pandey	mrs	x28, elr_el3
6816d22b089SManish Pandey	cmp	x29, x28
6826d22b089SManish Pandey	b.eq	check_loop_ctr
6836d22b089SManish Pandey	str	x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
6846d22b089SManish Pandey	/* Zero the loop counter */
6856d22b089SManish Pandey	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
6866d22b089SManish Pandey	b	skip_loop_ctr
6876d22b089SManish Pandeycheck_loop_ctr:
6886d22b089SManish Pandey	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
6896d22b089SManish Pandey	add	x29, x29, #1
6906d22b089SManish Pandey	str	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
6916d22b089SManish Pandey	cmp	x29, #ASYNC_EA_REPLAY_COUNTER
6926d22b089SManish Pandey	b.ge	el3_panic
6936d22b089SManish Pandeyskip_loop_ctr:
6946d22b089SManish Pandey	/*
6956d22b089SManish Pandey	 * Logic to distinguish if we came from SMC or any other exception.
6966d22b089SManish Pandey	 * Use offsets in vector entry to get which exception we are handling.
6976d22b089SManish Pandey	 * In each vector entry of size 0x200, address "0x0-0x80" is for sync
6986d22b089SManish Pandey	 * exception and "0x80-0x200" is for async exceptions.
6996d22b089SManish Pandey	 * Use vector base address (vbar_el3) and exception offset (LR) to
7006d22b089SManish Pandey	 * calculate whether the address we came from is any of the following
7016d22b089SManish Pandey	 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
7026d22b089SManish Pandey	 */
7036d22b089SManish Pandey	mrs	x29, vbar_el3
7046d22b089SManish Pandey	sub	x30, x30, x29
7056d22b089SManish Pandey	and	x30, x30, #0x1ff
7066d22b089SManish Pandey	cmp	x30, #0x80
7076d22b089SManish Pandey	b.ge	skip_smc_check
7086d22b089SManish Pandey	/* Its a synchronous exception, Now check if it is SMC or not? */
7096d22b089SManish Pandey	mrs	x30, esr_el3
7106d22b089SManish Pandey	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
7116d22b089SManish Pandey	cmp	x30, #EC_AARCH32_SMC
7126d22b089SManish Pandey	b.eq	subtract_elr_el3
7136d22b089SManish Pandey	cmp	x30, #EC_AARCH64_SMC
7146d22b089SManish Pandey	b.eq	subtract_elr_el3
7156d22b089SManish Pandey	b	skip_smc_check
7166d22b089SManish Pandeysubtract_elr_el3:
7176d22b089SManish Pandey	sub	x28, x28, #4
7186d22b089SManish Pandeyskip_smc_check:
7196d22b089SManish Pandey	msr	elr_el3, x28
7206d22b089SManish Pandey	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
7216d22b089SManish Pandey	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
7226d22b089SManish Pandey	exception_return
7236d22b089SManish Pandeyendfunc reflect_pending_async_ea_to_lower_el
7246d22b089SManish Pandey
7253991b889SManish Pandey	/* ---------------------------------------------------------------------
7261f461979SJustin Chadwell	 * The following code handles exceptions caused by BRK instructions.
7271f461979SJustin Chadwell	 * Following a BRK instruction, the only real valid cause of action is
7281f461979SJustin Chadwell	 * to print some information and panic, as the code that caused it is
7291f461979SJustin Chadwell	 * likely in an inconsistent internal state.
7301f461979SJustin Chadwell	 *
7311f461979SJustin Chadwell	 * This is initially intended to be used in conjunction with
7321f461979SJustin Chadwell	 * __builtin_trap.
7331f461979SJustin Chadwell	 * ---------------------------------------------------------------------
7341f461979SJustin Chadwell	 */
7351f461979SJustin Chadwell#ifdef MONITOR_TRAPS
7361f461979SJustin Chadwellfunc brk_handler
7371f461979SJustin Chadwell	/* Extract the ISS */
7381f461979SJustin Chadwell	mrs	x10, esr_el3
7391f461979SJustin Chadwell	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
7401f461979SJustin Chadwell
7411f461979SJustin Chadwell	/* Ensure the console is initialized */
7421f461979SJustin Chadwell	bl	plat_crash_console_init
7431f461979SJustin Chadwell
7441f461979SJustin Chadwell	adr	x4, brk_location
7451f461979SJustin Chadwell	bl	asm_print_str
7461f461979SJustin Chadwell	mrs	x4, elr_el3
7471f461979SJustin Chadwell	bl	asm_print_hex
7481f461979SJustin Chadwell	bl	asm_print_newline
7491f461979SJustin Chadwell
7501f461979SJustin Chadwell	adr	x4, brk_message
7511f461979SJustin Chadwell	bl	asm_print_str
7521f461979SJustin Chadwell	mov	x4, x10
7531f461979SJustin Chadwell	mov	x5, #28
7541f461979SJustin Chadwell	bl	asm_print_hex_bits
7551f461979SJustin Chadwell	bl	asm_print_newline
7561f461979SJustin Chadwell
7571f461979SJustin Chadwell	no_ret	plat_panic_handler
7581f461979SJustin Chadwellendfunc brk_handler
7591f461979SJustin Chadwell#endif /* MONITOR_TRAPS */
760