xref: /rk3399_ARM-atf/bl31/aarch64/runtime_exceptions.S (revision caa84939a4d8b1189dea8619ccc57bdb3026b125)
14f6ad66aSAchin Gupta/*
2e83b0cadSDan Handley * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
44f6ad66aSAchin Gupta * Redistribution and use in source and binary forms, with or without
54f6ad66aSAchin Gupta * modification, are permitted provided that the following conditions are met:
64f6ad66aSAchin Gupta *
74f6ad66aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this
84f6ad66aSAchin Gupta * list of conditions and the following disclaimer.
94f6ad66aSAchin Gupta *
104f6ad66aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
114f6ad66aSAchin Gupta * this list of conditions and the following disclaimer in the documentation
124f6ad66aSAchin Gupta * and/or other materials provided with the distribution.
134f6ad66aSAchin Gupta *
144f6ad66aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
154f6ad66aSAchin Gupta * to endorse or promote products derived from this software without specific
164f6ad66aSAchin Gupta * prior written permission.
174f6ad66aSAchin Gupta *
184f6ad66aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
194f6ad66aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
204f6ad66aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
214f6ad66aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
224f6ad66aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
234f6ad66aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
244f6ad66aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
254f6ad66aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
264f6ad66aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
274f6ad66aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
284f6ad66aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
294f6ad66aSAchin Gupta */
304f6ad66aSAchin Gupta
314f6ad66aSAchin Gupta#include <arch.h>
324f6ad66aSAchin Gupta#include <runtime_svc.h>
33*caa84939SJeenu Viswambharan#include <platform.h>
34*caa84939SJeenu Viswambharan#include <context.h>
35*caa84939SJeenu Viswambharan#include "cm_macros.S"
364f6ad66aSAchin Gupta
374f6ad66aSAchin Gupta	.globl	runtime_exceptions
38*caa84939SJeenu Viswambharan	.globl	el3_exit
39*caa84939SJeenu Viswambharan	.globl	get_exception_stack
404f6ad66aSAchin Gupta
41b739f22aSAchin Gupta	.section	.vectors, "ax"; .align 11
424f6ad66aSAchin Gupta
434f6ad66aSAchin Gupta	.align	7
444f6ad66aSAchin Guptaruntime_exceptions:
454f6ad66aSAchin Gupta	/* -----------------------------------------------------
464f6ad66aSAchin Gupta	 * Current EL with _sp_el0 : 0x0 - 0x180
474f6ad66aSAchin Gupta	 * -----------------------------------------------------
484f6ad66aSAchin Gupta	 */
494f6ad66aSAchin Guptasync_exception_sp_el0:
50*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
51*caa84939SJeenu Viswambharan	 * We don't expect any synchronous exceptions from EL3
52*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
53*caa84939SJeenu Viswambharan	 */
54*caa84939SJeenu Viswambharan	wfi
55*caa84939SJeenu Viswambharan	b	sync_exception_sp_el0
564f6ad66aSAchin Gupta
574f6ad66aSAchin Gupta	.align	7
58*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
59*caa84939SJeenu Viswambharan	 * EL3 code is non-reentrant. Any asynchronous exception
60*caa84939SJeenu Viswambharan	 * is a serious error. Loop infinitely.
61*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
62*caa84939SJeenu Viswambharan	 */
634f6ad66aSAchin Guptairq_sp_el0:
64*caa84939SJeenu Viswambharan	handle_async_exception IRQ_SP_EL0
65*caa84939SJeenu Viswambharan	b	irq_sp_el0
664f6ad66aSAchin Gupta
674f6ad66aSAchin Gupta	.align	7
684f6ad66aSAchin Guptafiq_sp_el0:
69*caa84939SJeenu Viswambharan	handle_async_exception FIQ_SP_EL0
70*caa84939SJeenu Viswambharan	b	fiq_sp_el0
714f6ad66aSAchin Gupta
724f6ad66aSAchin Gupta	.align	7
734f6ad66aSAchin Guptaserror_sp_el0:
74*caa84939SJeenu Viswambharan	handle_async_exception SERROR_SP_EL0
75*caa84939SJeenu Viswambharan	b	serror_sp_el0
764f6ad66aSAchin Gupta
774f6ad66aSAchin Gupta	/* -----------------------------------------------------
784f6ad66aSAchin Gupta	 * Current EL with SPx: 0x200 - 0x380
794f6ad66aSAchin Gupta	 * -----------------------------------------------------
804f6ad66aSAchin Gupta	 */
814f6ad66aSAchin Gupta	.align	7
824f6ad66aSAchin Guptasync_exception_sp_elx:
83*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
84*caa84939SJeenu Viswambharan	 * This exception will trigger if anything went wrong
85*caa84939SJeenu Viswambharan	 * during a previous exception entry or exit or while
86*caa84939SJeenu Viswambharan	 * handling an earlier unexpected synchronous exception.
87*caa84939SJeenu Viswambharan	 * In any case we cannot rely on SP_EL3. Switching to a
88*caa84939SJeenu Viswambharan	 * known safe area of memory will corrupt at least a
89*caa84939SJeenu Viswambharan	 * single register. It is best to enter wfi in loop as
90*caa84939SJeenu Viswambharan	 * that will preserve the system state for analysis
91*caa84939SJeenu Viswambharan	 * through a debugger later.
92*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
93*caa84939SJeenu Viswambharan	 */
94*caa84939SJeenu Viswambharan	wfi
95*caa84939SJeenu Viswambharan	b	sync_exception_sp_elx
964f6ad66aSAchin Gupta
97*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
98*caa84939SJeenu Viswambharan	 * As mentioned in the previous comment, all bets are
99*caa84939SJeenu Viswambharan	 * off if SP_EL3 cannot be relied upon. Report their
100*caa84939SJeenu Viswambharan	 * occurrence.
101*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
102*caa84939SJeenu Viswambharan	 */
1034f6ad66aSAchin Gupta	.align	7
1044f6ad66aSAchin Guptairq_sp_elx:
105*caa84939SJeenu Viswambharan	b	irq_sp_elx
1064f6ad66aSAchin Gupta	.align	7
1074f6ad66aSAchin Guptafiq_sp_elx:
108*caa84939SJeenu Viswambharan	b	fiq_sp_elx
1094f6ad66aSAchin Gupta	.align	7
1104f6ad66aSAchin Guptaserror_sp_elx:
111*caa84939SJeenu Viswambharan	b	serror_sp_elx
1124f6ad66aSAchin Gupta
1134f6ad66aSAchin Gupta	/* -----------------------------------------------------
1144f6ad66aSAchin Gupta	 * Lower EL using AArch64 : 0x400 - 0x580
1154f6ad66aSAchin Gupta	 * -----------------------------------------------------
1164f6ad66aSAchin Gupta	 */
1174f6ad66aSAchin Gupta	.align	7
1184f6ad66aSAchin Guptasync_exception_aarch64:
119*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
120*caa84939SJeenu Viswambharan	 * This exception vector will be the entry point for
121*caa84939SJeenu Viswambharan	 * SMCs and traps that are unhandled at lower ELs most
122*caa84939SJeenu Viswambharan	 * commonly. SP_EL3 should point to a valid cpu context
123*caa84939SJeenu Viswambharan	 * where the general purpose and system register state
124*caa84939SJeenu Viswambharan	 * can be saved.
125*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
126*caa84939SJeenu Viswambharan	 */
127*caa84939SJeenu Viswambharan	handle_sync_exception
1284f6ad66aSAchin Gupta
1294f6ad66aSAchin Gupta	.align	7
130*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
131*caa84939SJeenu Viswambharan	 * Asynchronous exceptions from lower ELs are not
132*caa84939SJeenu Viswambharan	 * currently supported. Report their occurrence.
133*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
134*caa84939SJeenu Viswambharan	 */
1354f6ad66aSAchin Guptairq_aarch64:
136*caa84939SJeenu Viswambharan	handle_async_exception IRQ_AARCH64
137*caa84939SJeenu Viswambharan	b	irq_aarch64
1384f6ad66aSAchin Gupta
1394f6ad66aSAchin Gupta	.align	7
1404f6ad66aSAchin Guptafiq_aarch64:
141*caa84939SJeenu Viswambharan	handle_async_exception FIQ_AARCH64
142*caa84939SJeenu Viswambharan	b	fiq_aarch64
1434f6ad66aSAchin Gupta
1444f6ad66aSAchin Gupta	.align	7
1454f6ad66aSAchin Guptaserror_aarch64:
146*caa84939SJeenu Viswambharan	handle_async_exception SERROR_AARCH64
147*caa84939SJeenu Viswambharan	b	serror_aarch64
1484f6ad66aSAchin Gupta
1494f6ad66aSAchin Gupta	/* -----------------------------------------------------
1504f6ad66aSAchin Gupta	 * Lower EL using AArch32 : 0x600 - 0x780
1514f6ad66aSAchin Gupta	 * -----------------------------------------------------
1524f6ad66aSAchin Gupta	 */
1534f6ad66aSAchin Gupta	.align	7
1544f6ad66aSAchin Guptasync_exception_aarch32:
155*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
156*caa84939SJeenu Viswambharan	 * This exception vector will be the entry point for
157*caa84939SJeenu Viswambharan	 * SMCs and traps that are unhandled at lower ELs most
158*caa84939SJeenu Viswambharan	 * commonly. SP_EL3 should point to a valid cpu context
159*caa84939SJeenu Viswambharan	 * where the general purpose and system register state
160*caa84939SJeenu Viswambharan	 * can be saved.
161*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
162*caa84939SJeenu Viswambharan	 */
163*caa84939SJeenu Viswambharan	handle_sync_exception
1644f6ad66aSAchin Gupta
1654f6ad66aSAchin Gupta	.align	7
166*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
167*caa84939SJeenu Viswambharan	 * Asynchronous exceptions from lower ELs are not
168*caa84939SJeenu Viswambharan	 * currently supported. Report their occurrence.
169*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
170*caa84939SJeenu Viswambharan	 */
1714f6ad66aSAchin Guptairq_aarch32:
172*caa84939SJeenu Viswambharan	handle_async_exception IRQ_AARCH32
173*caa84939SJeenu Viswambharan	b	irq_aarch32
1744f6ad66aSAchin Gupta
1754f6ad66aSAchin Gupta	.align	7
1764f6ad66aSAchin Guptafiq_aarch32:
177*caa84939SJeenu Viswambharan	handle_async_exception FIQ_AARCH32
178*caa84939SJeenu Viswambharan	b	fiq_aarch32
1794f6ad66aSAchin Gupta
1804f6ad66aSAchin Gupta	.align	7
1814f6ad66aSAchin Guptaserror_aarch32:
182*caa84939SJeenu Viswambharan	handle_async_exception SERROR_AARCH32
183*caa84939SJeenu Viswambharan	b	serror_aarch32
184*caa84939SJeenu Viswambharan	.align	7
185*caa84939SJeenu Viswambharan
186*caa84939SJeenu Viswambharan	.section	.text, "ax"
187*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
188*caa84939SJeenu Viswambharan	 * The following code handles secure monitor calls.
189*caa84939SJeenu Viswambharan	 * Depending upon the execution state from where the SMC
190*caa84939SJeenu Viswambharan	 * has been invoked, it frees some general purpose
191*caa84939SJeenu Viswambharan	 * registers to perform the remaining tasks. They
192*caa84939SJeenu Viswambharan	 * involve finding the runtime service handler that is
193*caa84939SJeenu Viswambharan	 * the target of the SMC & switching to runtime stacks
194*caa84939SJeenu Viswambharan	 * (SP_EL0) before calling the handler.
195*caa84939SJeenu Viswambharan	 *
196*caa84939SJeenu Viswambharan	 * Note that x30 has been explicitly saved and can be
197*caa84939SJeenu Viswambharan	 * used here
198*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
199*caa84939SJeenu Viswambharan	 */
200*caa84939SJeenu Viswambharansmc_handler32:
201*caa84939SJeenu Viswambharan	/* Check whether aarch32 issued an SMC64 */
202*caa84939SJeenu Viswambharan	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
203*caa84939SJeenu Viswambharan
204*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
205*caa84939SJeenu Viswambharan	 * Since we're are coming from aarch32, x8-x18 need to
206*caa84939SJeenu Viswambharan	 * be saved as per SMC32 calling convention. If a lower
207*caa84939SJeenu Viswambharan	 * EL in aarch64 is making an SMC32 call then it must
208*caa84939SJeenu Viswambharan	 * have saved x8-x17 already therein.
209*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
210*caa84939SJeenu Viswambharan	 */
211*caa84939SJeenu Viswambharan	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
212*caa84939SJeenu Viswambharan	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
213*caa84939SJeenu Viswambharan	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
214*caa84939SJeenu Viswambharan	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
215*caa84939SJeenu Viswambharan	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
216*caa84939SJeenu Viswambharan
217*caa84939SJeenu Viswambharan	/* x4-x7, x18, sp_el0 are saved below */
218*caa84939SJeenu Viswambharan
219*caa84939SJeenu Viswambharansmc_handler64:
220*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
221*caa84939SJeenu Viswambharan	 * Populate the parameters for the SMC handler. We
222*caa84939SJeenu Viswambharan	 * already have x0-x4 in place. x5 will point to a
223*caa84939SJeenu Viswambharan	 * cookie (not used now). x6 will point to the context
224*caa84939SJeenu Viswambharan	 * structure (SP_EL3) and x7 will contain flags we need
225*caa84939SJeenu Viswambharan	 * to pass to the handler Hence save x5-x7. Note that x4
226*caa84939SJeenu Viswambharan	 * only needs to be preserved for AArch32 callers but we
227*caa84939SJeenu Viswambharan	 * do it for AArch64 callers as well for convenience
228*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
229*caa84939SJeenu Viswambharan	 */
230*caa84939SJeenu Viswambharan	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
231*caa84939SJeenu Viswambharan	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
232*caa84939SJeenu Viswambharan
233*caa84939SJeenu Viswambharan	mov	x5, xzr
234*caa84939SJeenu Viswambharan	mov	x6, sp
235*caa84939SJeenu Viswambharan
236*caa84939SJeenu Viswambharan	/* Get the unique owning entity number */
237*caa84939SJeenu Viswambharan	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
238*caa84939SJeenu Viswambharan	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
239*caa84939SJeenu Viswambharan	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
240*caa84939SJeenu Viswambharan
241*caa84939SJeenu Viswambharan	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
242*caa84939SJeenu Viswambharan
243*caa84939SJeenu Viswambharan	/* Load descriptor index from array of indices */
244*caa84939SJeenu Viswambharan	adr	x14, rt_svc_descs_indices
245*caa84939SJeenu Viswambharan	ldrb	w15, [x14, x16]
246*caa84939SJeenu Viswambharan
247*caa84939SJeenu Viswambharan	/* Save x18 and SP_EL0 */
248*caa84939SJeenu Viswambharan	mrs	x17, sp_el0
249*caa84939SJeenu Viswambharan	stp	x18, x17, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
250*caa84939SJeenu Viswambharan
251*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
252*caa84939SJeenu Viswambharan	 * Restore the saved C runtime stack value which will
253*caa84939SJeenu Viswambharan	 * become the new SP_EL0 i.e. EL3 runtime stack. It was
254*caa84939SJeenu Viswambharan	 * saved in the 'cpu_context' structure prior to the last
255*caa84939SJeenu Viswambharan	 * ERET from EL3.
256*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
257*caa84939SJeenu Viswambharan	 */
258*caa84939SJeenu Viswambharan	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
259*caa84939SJeenu Viswambharan
260*caa84939SJeenu Viswambharan	/*
261*caa84939SJeenu Viswambharan	 * Any index greater than 127 is invalid. Check bit 7 for
262*caa84939SJeenu Viswambharan	 * a valid index
263*caa84939SJeenu Viswambharan	 */
264*caa84939SJeenu Viswambharan	tbnz	w15, 7, smc_unknown
265*caa84939SJeenu Viswambharan
266*caa84939SJeenu Viswambharan	/* Switch to SP_EL0 */
267*caa84939SJeenu Viswambharan	msr	spsel, #0
268*caa84939SJeenu Viswambharan
269*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
270*caa84939SJeenu Viswambharan	 * Get the descriptor using the index
271*caa84939SJeenu Viswambharan	 * x11 = (base + off), x15 = index
272*caa84939SJeenu Viswambharan	 *
273*caa84939SJeenu Viswambharan	 * handler = (base + off) + (index << log2(size))
274*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
275*caa84939SJeenu Viswambharan	 */
276*caa84939SJeenu Viswambharan	lsl	w10, w15, #RT_SVC_SIZE_LOG2
277*caa84939SJeenu Viswambharan	ldr	x15, [x11, w10, uxtw]
278*caa84939SJeenu Viswambharan
279*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
280*caa84939SJeenu Viswambharan	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
281*caa84939SJeenu Viswambharan	 * is a world switch during SMC handling.
282*caa84939SJeenu Viswambharan	 * TODO: Revisit if all system registers can be saved
283*caa84939SJeenu Viswambharan	 * later.
284*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
285*caa84939SJeenu Viswambharan	 */
286*caa84939SJeenu Viswambharan	mrs	x16, spsr_el3
287*caa84939SJeenu Viswambharan	mrs	x17, elr_el3
288*caa84939SJeenu Viswambharan	mrs	x18, scr_el3
289*caa84939SJeenu Viswambharan	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
290*caa84939SJeenu Viswambharan	stp	x18, xzr, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
291*caa84939SJeenu Viswambharan
292*caa84939SJeenu Viswambharan	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
293*caa84939SJeenu Viswambharan	bfi	x7, x18, #0, #1
294*caa84939SJeenu Viswambharan
295*caa84939SJeenu Viswambharan	mov	sp, x12
296*caa84939SJeenu Viswambharan
297*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
298*caa84939SJeenu Viswambharan	 * Call the Secure Monitor Call handler and then drop
299*caa84939SJeenu Viswambharan	 * directly into el3_exit() which will program any
300*caa84939SJeenu Viswambharan	 * remaining architectural state prior to issuing the
301*caa84939SJeenu Viswambharan	 * ERET to the desired lower EL.
302*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
303*caa84939SJeenu Viswambharan	 */
304*caa84939SJeenu Viswambharan#if DEBUG
305*caa84939SJeenu Viswambharan	cbz	x15, rt_svc_fw_critical_error
306*caa84939SJeenu Viswambharan#endif
307*caa84939SJeenu Viswambharan	blr	x15
308*caa84939SJeenu Viswambharan
309*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
310*caa84939SJeenu Viswambharan	 * This routine assumes that the SP_EL3 is pointing to
311*caa84939SJeenu Viswambharan	 * a valid context structure from where the gp regs and
312*caa84939SJeenu Viswambharan	 * other special registers can be retrieved.
313*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
314*caa84939SJeenu Viswambharan	 */
315*caa84939SJeenu Viswambharanel3_exit: ; .type el3_exit, %function
316*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
317*caa84939SJeenu Viswambharan	 * Save the current SP_EL0 i.e. the EL3 runtime stack
318*caa84939SJeenu Viswambharan	 * which will be used for handling the next SMC. Then
319*caa84939SJeenu Viswambharan	 * switch to SP_EL3
320*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
321*caa84939SJeenu Viswambharan	 */
322*caa84939SJeenu Viswambharan	mov	x17, sp
323*caa84939SJeenu Viswambharan	msr	spsel, #1
324*caa84939SJeenu Viswambharan	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
325*caa84939SJeenu Viswambharan
326*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
327*caa84939SJeenu Viswambharan	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
328*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
329*caa84939SJeenu Viswambharan	 */
330*caa84939SJeenu Viswambharan	ldp	x18, xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
331*caa84939SJeenu Viswambharan	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
332*caa84939SJeenu Viswambharan	msr	scr_el3, x18
333*caa84939SJeenu Viswambharan	msr	spsr_el3, x16
334*caa84939SJeenu Viswambharan	msr	elr_el3, x17
335*caa84939SJeenu Viswambharan
336*caa84939SJeenu Viswambharan	/* Restore saved general purpose registers and return */
337*caa84939SJeenu Viswambharan	bl	restore_scratch_registers
338*caa84939SJeenu Viswambharan	ldp	x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
3394f6ad66aSAchin Gupta	eret
3404f6ad66aSAchin Gupta
341*caa84939SJeenu Viswambharansmc_unknown:
342*caa84939SJeenu Viswambharan	/*
343*caa84939SJeenu Viswambharan	 * Here we restore x4-x18 regardless of where we came from. AArch32
344*caa84939SJeenu Viswambharan	 * callers will find the registers contents unchanged, but AArch64
345*caa84939SJeenu Viswambharan	 * callers will find the registers modified (with stale earlier NS
346*caa84939SJeenu Viswambharan	 * content). Either way, we aren't leaking any secure information
347*caa84939SJeenu Viswambharan	 * through them
348*caa84939SJeenu Viswambharan	 */
349*caa84939SJeenu Viswambharan	bl	restore_scratch_registers_callee
350*caa84939SJeenu Viswambharan
351*caa84939SJeenu Viswambharansmc_prohibited:
352*caa84939SJeenu Viswambharan	ldp	x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
353*caa84939SJeenu Viswambharan	mov	w0, #SMC_UNK
354*caa84939SJeenu Viswambharan	eret
355*caa84939SJeenu Viswambharan
356*caa84939SJeenu Viswambharanrt_svc_fw_critical_error:
357*caa84939SJeenu Viswambharan	b	rt_svc_fw_critical_error
358*caa84939SJeenu Viswambharan
359*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
360*caa84939SJeenu Viswambharan	 * The following functions are used to saved and restore
361*caa84939SJeenu Viswambharan	 * all the caller saved registers as per the aapcs_64.
362*caa84939SJeenu Viswambharan	 * These are not macros to ensure their invocation fits
363*caa84939SJeenu Viswambharan	 * within the 32 instructions per exception vector.
364*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
365*caa84939SJeenu Viswambharan	 */
366*caa84939SJeenu Viswambharansave_scratch_registers: ; .type save_scratch_registers, %function
367*caa84939SJeenu Viswambharan	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
368*caa84939SJeenu Viswambharan	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
369*caa84939SJeenu Viswambharan	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
370*caa84939SJeenu Viswambharan	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
371*caa84939SJeenu Viswambharan	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
372*caa84939SJeenu Viswambharan	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
373*caa84939SJeenu Viswambharan	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
374*caa84939SJeenu Viswambharan	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
375*caa84939SJeenu Viswambharan	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
376*caa84939SJeenu Viswambharan	mrs	x17, sp_el0
377*caa84939SJeenu Viswambharan	stp	x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
378*caa84939SJeenu Viswambharan	ret
379*caa84939SJeenu Viswambharan
380*caa84939SJeenu Viswambharanrestore_scratch_registers: ; .type restore_scratch_registers, %function
381*caa84939SJeenu Viswambharan	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
382*caa84939SJeenu Viswambharan	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
383*caa84939SJeenu Viswambharan
384*caa84939SJeenu Viswambharanrestore_scratch_registers_callee:
385*caa84939SJeenu Viswambharan	ldp	x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
386*caa84939SJeenu Viswambharan
387*caa84939SJeenu Viswambharan	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
388*caa84939SJeenu Viswambharan	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
389*caa84939SJeenu Viswambharan	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
390*caa84939SJeenu Viswambharan	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
391*caa84939SJeenu Viswambharan	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
392*caa84939SJeenu Viswambharan	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
393*caa84939SJeenu Viswambharan
394*caa84939SJeenu Viswambharan	msr	sp_el0, x17
395*caa84939SJeenu Viswambharan	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
396*caa84939SJeenu Viswambharan	ret
397*caa84939SJeenu Viswambharan
398*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
399*caa84939SJeenu Viswambharan	 * 256 bytes of exception stack for each cpu
400*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
401*caa84939SJeenu Viswambharan	 */
402*caa84939SJeenu Viswambharan#if DEBUG
403*caa84939SJeenu Viswambharan#define PCPU_EXCEPTION_STACK_SIZE	0x300
404*caa84939SJeenu Viswambharan#else
405*caa84939SJeenu Viswambharan#define PCPU_EXCEPTION_STACK_SIZE	0x100
406*caa84939SJeenu Viswambharan#endif
407*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
408*caa84939SJeenu Viswambharan	 * void get_exception_stack (uint64_t mpidr) : This
409*caa84939SJeenu Viswambharan	 * function is used to allocate a small stack for
410*caa84939SJeenu Viswambharan	 * reporting unhandled exceptions
411*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
412*caa84939SJeenu Viswambharan	 */
413*caa84939SJeenu Viswambharanget_exception_stack: ; .type get_exception_stack, %function
414*caa84939SJeenu Viswambharan	mov	x10, x30 // lr
415*caa84939SJeenu Viswambharan	bl	platform_get_core_pos
416*caa84939SJeenu Viswambharan	add	x0, x0, #1
417*caa84939SJeenu Viswambharan	mov	x1, #PCPU_EXCEPTION_STACK_SIZE
418*caa84939SJeenu Viswambharan	mul	x0, x0, x1
419*caa84939SJeenu Viswambharan	ldr	x1, =pcpu_exception_stack
420*caa84939SJeenu Viswambharan	add	x0, x1, x0
421*caa84939SJeenu Viswambharan	ret	x10
422*caa84939SJeenu Viswambharan
423*caa84939SJeenu Viswambharan	/* -----------------------------------------------------
424*caa84939SJeenu Viswambharan	 * Per-cpu exception stacks in normal memory.
425*caa84939SJeenu Viswambharan	 * -----------------------------------------------------
426*caa84939SJeenu Viswambharan	 */
427*caa84939SJeenu Viswambharan	.section	data, "aw", %nobits; .align 6
428*caa84939SJeenu Viswambharan
429*caa84939SJeenu Viswambharanpcpu_exception_stack:
430*caa84939SJeenu Viswambharan	/* Zero fill */
431*caa84939SJeenu Viswambharan	.space (PLATFORM_CORE_COUNT * PCPU_EXCEPTION_STACK_SIZE), 0
4324f6ad66aSAchin Gupta
433