xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 3b4ffdf0eea446f592b08ead6d1554247e6d2d9e)
11bb92983SJerome Forissier/* SPDX-License-Identifier: BSD-2-Clause */
2abe38974SJens Wiklander/*
3fae8192bSJens Wiklander * Copyright (c) 2016-2020, Linaro Limited
4abe38974SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V.
5abe38974SJens Wiklander */
6abe38974SJens Wiklander
7abe38974SJens Wiklander#include <arm32_macros.S>
80e7659caSJens Wiklander#include <arm.h>
90e7659caSJens Wiklander#include <asm.S>
1089fe7c3cSJerome Forissier#include <generated/asm-defines.h>
110e7659caSJens Wiklander#include <keep.h>
1216841254SJens Wiklander#include <kernel/abort.h>
1314d6d42bSJens Wiklander#include <kernel/cache_helpers.h>
14891569afSJens Wiklander#include <kernel/thread.h>
157e399f9bSJens Wiklander#include <kernel/thread_private.h>
165b8a58b4SJens Wiklander#include <mm/core_mmu.h>
17abe38974SJens Wiklander
18864e8de3SJerome Forissier	.syntax unified
197c176640SJerome Forissier	.arch_extension sec
207c176640SJerome Forissier
215b8a58b4SJens Wiklander	.macro cmp_spsr_user_mode reg:req
225b8a58b4SJens Wiklander		/*
235b8a58b4SJens Wiklander		 * We're only testing the lower 4 bits as bit 5 (0x10)
245b8a58b4SJens Wiklander		 * always is set.
255b8a58b4SJens Wiklander		 */
265b8a58b4SJens Wiklander		tst	\reg, #0x0f
275b8a58b4SJens Wiklander	.endm
285b8a58b4SJens Wiklander
29abe38974SJens WiklanderFUNC thread_set_abt_sp , :
30923c1f34SJens WiklanderUNWIND(	.cantunwind)
31abe38974SJens Wiklander	mrs	r1, cpsr
32abe38974SJens Wiklander	cps	#CPSR_MODE_ABT
33abe38974SJens Wiklander	mov	sp, r0
34abe38974SJens Wiklander	msr	cpsr, r1
35abe38974SJens Wiklander	bx	lr
36abe38974SJens WiklanderEND_FUNC thread_set_abt_sp
37abe38974SJens Wiklander
38935ac3ecSJens WiklanderFUNC thread_set_und_sp , :
39935ac3ecSJens WiklanderUNWIND(	.cantunwind)
40935ac3ecSJens Wiklander	mrs	r1, cpsr
41935ac3ecSJens Wiklander	cps	#CPSR_MODE_UND
42935ac3ecSJens Wiklander	mov	sp, r0
43935ac3ecSJens Wiklander	msr	cpsr, r1
44935ac3ecSJens Wiklander	bx	lr
45eb7b47bbSJens WiklanderEND_FUNC thread_set_und_sp
46935ac3ecSJens Wiklander
47abe38974SJens WiklanderFUNC thread_set_irq_sp , :
48923c1f34SJens WiklanderUNWIND(	.cantunwind)
49abe38974SJens Wiklander	mrs	r1, cpsr
50abe38974SJens Wiklander	cps	#CPSR_MODE_IRQ
51abe38974SJens Wiklander	mov	sp, r0
52abe38974SJens Wiklander	msr	cpsr, r1
53abe38974SJens Wiklander	bx	lr
54abe38974SJens WiklanderEND_FUNC thread_set_irq_sp
55abe38974SJens Wiklander
56abe38974SJens WiklanderFUNC thread_set_fiq_sp , :
57923c1f34SJens WiklanderUNWIND(	.cantunwind)
58abe38974SJens Wiklander	mrs	r1, cpsr
59abe38974SJens Wiklander	cps	#CPSR_MODE_FIQ
60abe38974SJens Wiklander	mov	sp, r0
61abe38974SJens Wiklander	msr	cpsr, r1
62abe38974SJens Wiklander	bx	lr
63abe38974SJens WiklanderEND_FUNC thread_set_fiq_sp
64abe38974SJens Wiklander
652dfd8eefSEtienne CarriereFUNC thread_get_usr_sp , :
662dfd8eefSEtienne Carriere	mrs	r1, cpsr
672dfd8eefSEtienne Carriere	cpsid	aif
682dfd8eefSEtienne Carriere	cps	#CPSR_MODE_SYS
692dfd8eefSEtienne Carriere	mov	r0, sp
702dfd8eefSEtienne Carriere	msr	cpsr, r1
712dfd8eefSEtienne Carriere	bx	lr
722dfd8eefSEtienne CarriereEND_FUNC thread_get_usr_sp
732dfd8eefSEtienne Carriere
74cc4054ffSEtienne CarriereFUNC thread_get_usr_lr , :
75cc4054ffSEtienne Carriere	mrs	r1, cpsr
76cc4054ffSEtienne Carriere	cpsid	aif
77cc4054ffSEtienne Carriere	cps	#CPSR_MODE_SYS
78cc4054ffSEtienne Carriere	mov	r0, lr
79cc4054ffSEtienne Carriere	msr	cpsr, r1
80cc4054ffSEtienne Carriere	bx	lr
81cc4054ffSEtienne CarriereEND_FUNC thread_get_usr_lr
82cc4054ffSEtienne Carriere
83cc4054ffSEtienne CarriereFUNC thread_set_usr_lr , :
84cc4054ffSEtienne Carriere	mrs	r1, cpsr
85cc4054ffSEtienne Carriere	cpsid	aif
86cc4054ffSEtienne Carriere	cps	#CPSR_MODE_SYS
87cc4054ffSEtienne Carriere	mov	lr, r0
88cc4054ffSEtienne Carriere	msr	cpsr, r1
89cc4054ffSEtienne Carriere	bx	lr
90cc4054ffSEtienne CarriereEND_FUNC thread_set_usr_lr
91cc4054ffSEtienne Carriere
92abe38974SJens Wiklander/* void thread_resume(struct thread_ctx_regs *regs) */
93abe38974SJens WiklanderFUNC thread_resume , :
94923c1f34SJens WiklanderUNWIND(	.cantunwind)
95abe38974SJens Wiklander	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
96abe38974SJens Wiklander
97abe38974SJens Wiklander	cps	#CPSR_MODE_SYS
989de8272eSJerome Forissier	ldr	sp, [r12], #4
999de8272eSJerome Forissier	ldr	lr, [r12], #4
100abe38974SJens Wiklander
101abe38974SJens Wiklander	cps	#CPSR_MODE_SVC
1029de8272eSJerome Forissier	ldr	r1, [r12], #4
1039de8272eSJerome Forissier	ldr	sp, [r12], #4
1049de8272eSJerome Forissier	ldr	lr, [r12], #4
105abe38974SJens Wiklander	msr	spsr_fsxc, r1
106abe38974SJens Wiklander
107abe38974SJens Wiklander	ldm	r12, {r1, r2}
108abe38974SJens Wiklander
1095b8a58b4SJens Wiklander	/*
1105b8a58b4SJens Wiklander	 * Switching to some other mode than SVC as we need to set spsr in
1115b8a58b4SJens Wiklander	 * order to return into the old state properly and it may be SVC
1125b8a58b4SJens Wiklander	 * mode we're returning to.
1135b8a58b4SJens Wiklander	 */
1145b8a58b4SJens Wiklander	cps	#CPSR_MODE_ABT
1155b8a58b4SJens Wiklander	cmp_spsr_user_mode r2
1165b8a58b4SJens Wiklander	mov	lr, r1
1175b8a58b4SJens Wiklander	msr	spsr_fsxc, r2
118abe38974SJens Wiklander	ldm	r0, {r0-r12}
119864e8de3SJerome Forissier	movsne	pc, lr
1205b8a58b4SJens Wiklander	b	eret_to_user_mode
121abe38974SJens WiklanderEND_FUNC thread_resume
122abe38974SJens Wiklander
123abe38974SJens Wiklander/*
1243361bca8SDavid Wang * Disables IRQ and FIQ and saves state of thread in fiq mode which has
1253361bca8SDavid Wang * the banked r8-r12 registers, returns original CPSR.
1263361bca8SDavid Wang */
1273361bca8SDavid WangLOCAL_FUNC thread_save_state_fiq , :
1283361bca8SDavid WangUNWIND(	.cantunwind)
1293361bca8SDavid Wang	mov	r9, lr
1303361bca8SDavid Wang
1313361bca8SDavid Wang	/*
1323361bca8SDavid Wang	 * Uses stack for temporary storage, while storing needed
1333361bca8SDavid Wang	 * context in the thread context struct.
1343361bca8SDavid Wang	 */
1353361bca8SDavid Wang
1363361bca8SDavid Wang	mrs	r8, cpsr
1373361bca8SDavid Wang
1383361bca8SDavid Wang	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
1393361bca8SDavid Wang
1403361bca8SDavid Wang	push	{r4-r7}
1413361bca8SDavid Wang	push	{r0-r3}
1423361bca8SDavid Wang
1433361bca8SDavid Wang	mrs	r6, cpsr		/* Save current CPSR */
1443361bca8SDavid Wang
1453361bca8SDavid Wang	bl	thread_get_ctx_regs
1463361bca8SDavid Wang
1473361bca8SDavid Wang	pop	{r1-r4}			/* r0-r3 pushed above */
1483361bca8SDavid Wang	stm	r0!, {r1-r4}
1493361bca8SDavid Wang	pop	{r1-r4}			/* r4-r7 pushed above */
1503361bca8SDavid Wang	stm	r0!, {r1-r4}
1513361bca8SDavid Wang
1523361bca8SDavid Wang	cps     #CPSR_MODE_SYS
1533361bca8SDavid Wang	stm	r0!, {r8-r12}
1549de8272eSJerome Forissier	str	sp, [r0], #4
1559de8272eSJerome Forissier	str	lr, [r0], #4
1563361bca8SDavid Wang
1573361bca8SDavid Wang	cps     #CPSR_MODE_SVC
1583361bca8SDavid Wang	mrs     r1, spsr
1599de8272eSJerome Forissier	str	r1, [r0], #4
1609de8272eSJerome Forissier	str	sp, [r0], #4
1619de8272eSJerome Forissier	str	lr, [r0], #4
1623361bca8SDavid Wang
1633361bca8SDavid Wang	/* back to fiq mode */
1643361bca8SDavid Wang	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
1653361bca8SDavid Wang	msr	cpsr, r6		/* Restore mode */
1663361bca8SDavid Wang
1673361bca8SDavid Wang	mov	r0, r8			/* Return original CPSR */
1683361bca8SDavid Wang	bx	r9
1693361bca8SDavid WangEND_FUNC thread_save_state_fiq
1703361bca8SDavid Wang
1713361bca8SDavid Wang/*
172abe38974SJens Wiklander * Disables IRQ and FIQ and saves state of thread, returns original
173abe38974SJens Wiklander * CPSR.
174abe38974SJens Wiklander */
1752786f143SJens WiklanderFUNC thread_save_state , :
176923c1f34SJens WiklanderUNWIND(	.cantunwind)
177abe38974SJens Wiklander	push	{r12, lr}
178abe38974SJens Wiklander	/*
179abe38974SJens Wiklander	 * Uses stack for temporary storage, while storing needed
180abe38974SJens Wiklander	 * context in the thread context struct.
181abe38974SJens Wiklander	 */
182abe38974SJens Wiklander
183abe38974SJens Wiklander	mrs	r12, cpsr
184abe38974SJens Wiklander
185abe38974SJens Wiklander	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
186abe38974SJens Wiklander
187abe38974SJens Wiklander	push	{r4-r7}
188abe38974SJens Wiklander	push	{r0-r3}
189abe38974SJens Wiklander
190abe38974SJens Wiklander	mov	r5, r12			/* Save CPSR in a preserved register */
191abe38974SJens Wiklander	mrs	r6, cpsr		/* Save current CPSR */
192abe38974SJens Wiklander
193abe38974SJens Wiklander	bl	thread_get_ctx_regs
194abe38974SJens Wiklander
195abe38974SJens Wiklander	pop	{r1-r4}			/* r0-r3 pushed above */
196abe38974SJens Wiklander	stm	r0!, {r1-r4}
197abe38974SJens Wiklander	pop	{r1-r4}			/* r4-r7 pushed above */
198abe38974SJens Wiklander	stm	r0!, {r1-r4}
199abe38974SJens Wiklander	stm	r0!, {r8-r11}
200abe38974SJens Wiklander
201abe38974SJens Wiklander	pop	{r12, lr}
202abe38974SJens Wiklander	stm	r0!, {r12}
203abe38974SJens Wiklander
204abe38974SJens Wiklander        cps     #CPSR_MODE_SYS
2059de8272eSJerome Forissier	str	sp, [r0], #4
2069de8272eSJerome Forissier	str	lr, [r0], #4
207abe38974SJens Wiklander
208abe38974SJens Wiklander        cps     #CPSR_MODE_SVC
209abe38974SJens Wiklander        mrs     r1, spsr
2109de8272eSJerome Forissier	str	r1, [r0], #4
2119de8272eSJerome Forissier	str	sp, [r0], #4
2129de8272eSJerome Forissier	str	lr, [r0], #4
213abe38974SJens Wiklander
214b50c6a0fSJens Wiklander	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
215abe38974SJens Wiklander	msr	cpsr, r6		/* Restore mode */
216abe38974SJens Wiklander
217abe38974SJens Wiklander	mov	r0, r5			/* Return original CPSR */
218abe38974SJens Wiklander	bx	lr
219abe38974SJens WiklanderEND_FUNC thread_save_state
220abe38974SJens Wiklander
221bc09bb53SJens Wiklander#ifdef CFG_CORE_SEL2_SPMC
222bc09bb53SJens Wiklander/*
223bc09bb53SJens Wiklander * unsigned long thread_hvc(unsigned long func_id, unsigned long a1,
224bc09bb53SJens Wiklander *			    unsigned long a2, unsigned long a3)
225bc09bb53SJens Wiklander */
226bc09bb53SJens WiklanderFUNC thread_hvc , :
227bc09bb53SJens Wiklander	push	{r4-r7}
228bc09bb53SJens WiklanderUNWIND(	.save	{r4-r7})
229bc09bb53SJens Wiklander	hvc	#0
230bc09bb53SJens Wiklander	pop	{r4-r7}
231bc09bb53SJens Wiklander	bx	lr
232bc09bb53SJens WiklanderEND_FUNC thread_hvc
233bc09bb53SJens Wiklander#endif /*CFG_CORE_SEL2_SPMC*/
234bc09bb53SJens Wiklander
235cd69dc9eSJens Wiklander/*
236cd69dc9eSJens Wiklander * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
237cd69dc9eSJens Wiklander *			    unsigned long a2, unsigned long a3)
238cd69dc9eSJens Wiklander */
239cd69dc9eSJens WiklanderFUNC thread_smc , :
2406aff280fSJens Wiklander	push	{r4-r7}
2416aff280fSJens WiklanderUNWIND(	.save	{r4-r7})
242cd69dc9eSJens Wiklander	smc	#0
2436aff280fSJens Wiklander	pop	{r4-r7}
244cd69dc9eSJens Wiklander	bx	lr
245cd69dc9eSJens WiklanderEND_FUNC thread_smc
246cd69dc9eSJens Wiklander
24779454c60SJens Wiklander/* void thread_smccc(struct thread_smc_args *arg_res) */
24879454c60SJens WiklanderFUNC thread_smccc , :
24979454c60SJens Wiklander	push	{r4-r7}
25079454c60SJens Wiklander	push	{r0, lr}
25179454c60SJens Wiklander	ldm	r0, {r0-r7}
25279454c60SJens Wiklander#ifdef CFG_CORE_SEL2_SPMC
25379454c60SJens Wiklander	hvc	#0
25479454c60SJens Wiklander#else
25579454c60SJens Wiklander	smc	#0
25679454c60SJens Wiklander#endif
25779454c60SJens Wiklander	pop	{r12, lr}
25879454c60SJens Wiklander	stm	r12, {r0-r7}
25979454c60SJens Wiklander	pop	{r4-r7}
26079454c60SJens Wiklander	bx	lr
26179454c60SJens WiklanderEND_FUNC thread_smccc
26279454c60SJens Wiklander
263abe38974SJens WiklanderFUNC thread_init_vbar , :
264abe38974SJens Wiklander	/* Set vector (VBAR) */
2653889635bSJens Wiklander	write_vbar r0
266abe38974SJens Wiklander	bx	lr
267abe38974SJens WiklanderEND_FUNC thread_init_vbar
2683639b55fSJerome ForissierDECLARE_KEEP_PAGER thread_init_vbar
269abe38974SJens Wiklander
270abe38974SJens Wiklander/*
271abe38974SJens Wiklander * Below are low level routines handling entry and return from user mode.
272abe38974SJens Wiklander *
273abe38974SJens Wiklander * thread_enter_user_mode() saves all that registers user mode can change
274abe38974SJens Wiklander * so kernel mode can restore needed registers when resuming execution
275abe38974SJens Wiklander * after the call to thread_enter_user_mode() has returned.
276abe38974SJens Wiklander * thread_enter_user_mode() doesn't return directly since it enters user
277abe38974SJens Wiklander * mode instead, it's thread_unwind_user_mode() that does the
278abe38974SJens Wiklander * returning by restoring the registers saved by thread_enter_user_mode().
279abe38974SJens Wiklander *
280abe38974SJens Wiklander * There's three ways for thread_enter_user_mode() to return to caller,
2812c028fdeSJerome Forissier * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
282abe38974SJens Wiklander *
2832c028fdeSJerome Forissier * Calls to _utee_return or _utee_panic are handled as:
284ab5363c6SJens Wiklander * __thread_svc_handler() -> thread_scall_handler() -> scall_do_call() which
285453a5030SJerome Forissier * calls syscall_return() or syscall_panic().
286abe38974SJens Wiklander *
287ab5363c6SJens Wiklander * These function calls returns normally except thread_scall_handler() which
288abe38974SJens Wiklander * which is an exception handling routine so it reads return address and
289453a5030SJerome Forissier * SPSR to restore from the stack. syscall_return() and syscall_panic()
290ab5363c6SJens Wiklander * changes return address and SPSR used by thread_scall_handler() to instead of
291abe38974SJens Wiklander * returning into user mode as with other syscalls it returns into
292abe38974SJens Wiklander * thread_unwind_user_mode() in kernel mode instead.  When
293ab5363c6SJens Wiklander * thread_scall_handler() returns the stack pointer at the point where
294abe38974SJens Wiklander * thread_enter_user_mode() left it so this is where
295abe38974SJens Wiklander * thread_unwind_user_mode() can operate.
296abe38974SJens Wiklander *
297abe38974SJens Wiklander * Aborts are handled in a similar way but by thread_abort_handler()
298abe38974SJens Wiklander * instead, when the pager sees that it's an abort from user mode that
299abe38974SJens Wiklander * can't be handled it updates SPSR and return address used by
300abe38974SJens Wiklander * thread_abort_handler() to return into thread_unwind_user_mode()
301abe38974SJens Wiklander * instead.
302abe38974SJens Wiklander */
303abe38974SJens Wiklander
304abe38974SJens Wiklander/*
305e94702a4SJens Wiklander * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
306e94702a4SJens Wiklander *				     uint32_t *exit_status0,
307e94702a4SJens Wiklander *				     uint32_t *exit_status1);
308a702f5e7SJens Wiklander *
309e94702a4SJens Wiklander * This function depends on being called with exceptions masked.
310abe38974SJens Wiklander */
311a702f5e7SJens WiklanderFUNC __thread_enter_user_mode , :
312923c1f34SJens WiklanderUNWIND(	.cantunwind)
313abe38974SJens Wiklander	/*
314453a5030SJerome Forissier	 * Save all registers to allow syscall_return() to resume execution
315453a5030SJerome Forissier	 * as if this function would have returned. This is also used in
316453a5030SJerome Forissier	 * syscall_panic().
317abe38974SJens Wiklander	 *
318abe38974SJens Wiklander	 * If stack usage of this function is changed
319abe38974SJens Wiklander	 * thread_unwind_user_mode() has to be updated.
320abe38974SJens Wiklander	 */
321abe38974SJens Wiklander	push    {r4-r12,lr}
322abe38974SJens Wiklander
323a702f5e7SJens Wiklander	/*
324abe38974SJens Wiklander	 * Save old user sp and set new user sp.
325abe38974SJens Wiklander	 */
326abe38974SJens Wiklander	cps	#CPSR_MODE_SYS
327e94702a4SJens Wiklander	mov	r4, sp
328e94702a4SJens Wiklander	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
329abe38974SJens Wiklander	cps	#CPSR_MODE_SVC
330e94702a4SJens Wiklander
331e94702a4SJens Wiklander	push	{r1, r2, r4, r5}
332abe38974SJens Wiklander
3335b8a58b4SJens Wiklander	/* Prepare user mode entry via eret_to_user_mode */
334e94702a4SJens Wiklander	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
335e94702a4SJens Wiklander	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
336e94702a4SJens Wiklander	msr     spsr_fsxc, r4
337e94702a4SJens Wiklander
338e94702a4SJens Wiklander	ldm	r0, {r0-r12}
3395b8a58b4SJens Wiklander
3405b8a58b4SJens Wiklander	b	eret_to_user_mode
341a702f5e7SJens WiklanderEND_FUNC __thread_enter_user_mode
342abe38974SJens Wiklander
343abe38974SJens Wiklander/*
344abe38974SJens Wiklander * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
345abe38974SJens Wiklander *              uint32_t exit_status1);
346abe38974SJens Wiklander * See description in thread.h
347abe38974SJens Wiklander */
348abe38974SJens WiklanderFUNC thread_unwind_user_mode , :
349923c1f34SJens WiklanderUNWIND(	.cantunwind)
350e94702a4SJens Wiklander	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
351e94702a4SJens Wiklander	pop	{r4-r7}
352e94702a4SJens Wiklander	str	r1, [r4]
353e94702a4SJens Wiklander	str	r2, [r5]
354abe38974SJens Wiklander
355abe38974SJens Wiklander	/* Restore old user sp */
356abe38974SJens Wiklander	cps	#CPSR_MODE_SYS
357e94702a4SJens Wiklander	mov	sp, r6
358abe38974SJens Wiklander	cps	#CPSR_MODE_SVC
359abe38974SJens Wiklander
360e94702a4SJens Wiklander	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
361e94702a4SJens Wiklander	pop     {r4-r12,pc}
362abe38974SJens WiklanderEND_FUNC thread_unwind_user_mode
363abe38974SJens Wiklander
3645b8a58b4SJens Wiklander	.macro maybe_restore_mapping
3655b8a58b4SJens Wiklander		/*
3665b8a58b4SJens Wiklander		 * This macro is a bit hard to read due to all the ifdefs,
3675b8a58b4SJens Wiklander		 * we're testing for two different configs which makes four
3685b8a58b4SJens Wiklander		 * different combinations.
3695b8a58b4SJens Wiklander		 *
3705b8a58b4SJens Wiklander		 * - With LPAE, and then some extra code if with
3715b8a58b4SJens Wiklander		 *   CFG_CORE_UNMAP_CORE_AT_EL0
3725b8a58b4SJens Wiklander		 * - Without LPAE, and then some extra code if with
3735b8a58b4SJens Wiklander		 *   CFG_CORE_UNMAP_CORE_AT_EL0
3745b8a58b4SJens Wiklander		 */
3755b8a58b4SJens Wiklander
3765b8a58b4SJens Wiklander		/*
3775b8a58b4SJens Wiklander		 * At this point we can't rely on any memory being writable
3785b8a58b4SJens Wiklander		 * yet, so we're using TPIDRPRW to store r0, and if with
3795b8a58b4SJens Wiklander		 * LPAE TPIDRURO to store r1 too.
3805b8a58b4SJens Wiklander		 */
3815b8a58b4SJens Wiklander		write_tpidrprw r0
38240ffa84fSJens Wiklander#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
38340ffa84fSJens Wiklander		write_tpidruro r1
38440ffa84fSJens Wiklander#endif
3855b8a58b4SJens Wiklander
3865b8a58b4SJens Wiklander#ifdef CFG_WITH_LPAE
3875b8a58b4SJens Wiklander		read_ttbr0_64bit r0, r1
3885b8a58b4SJens Wiklander		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
3895b8a58b4SJens Wiklander		beq	11f
3905b8a58b4SJens Wiklander
3915b8a58b4SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
3925b8a58b4SJens Wiklander		/*
3935b8a58b4SJens Wiklander		 * Update the mapping to use the full kernel mode mapping.
3945b8a58b4SJens Wiklander		 * Since the translation table could reside above 4GB we'll
3955b8a58b4SJens Wiklander		 * have to use 64-bit arithmetics.
3965b8a58b4SJens Wiklander		 */
3970d206ea0SIzik Dubnov		subs	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
3985b8a58b4SJens Wiklander		sbc	r1, r1, #0
3995b8a58b4SJens Wiklander#endif
4005b8a58b4SJens Wiklander		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
4015b8a58b4SJens Wiklander		write_ttbr0_64bit r0, r1
4025b8a58b4SJens Wiklander		isb
4035b8a58b4SJens Wiklander
4045b8a58b4SJens Wiklander#else /*!CFG_WITH_LPAE*/
4055b8a58b4SJens Wiklander		read_contextidr r0
4065b8a58b4SJens Wiklander		tst	r0, #1
4075b8a58b4SJens Wiklander		beq	11f
4085b8a58b4SJens Wiklander
4095b8a58b4SJens Wiklander		/* Update the mapping to use the full kernel mode mapping. */
4105b8a58b4SJens Wiklander		bic	r0, r0, #1
4115b8a58b4SJens Wiklander		write_contextidr r0
4125b8a58b4SJens Wiklander		isb
4135b8a58b4SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
4149faa7444SJens Wiklander		read_ttbcr r0
4159faa7444SJens Wiklander		bic	r0, r0, #TTBCR_PD1
4169faa7444SJens Wiklander		write_ttbcr r0
4175b8a58b4SJens Wiklander		isb
4185b8a58b4SJens Wiklander#endif
4195b8a58b4SJens Wiklander
4205b8a58b4SJens Wiklander#endif /*!CFG_WITH_LPAE*/
4215b8a58b4SJens Wiklander
4225b8a58b4SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
42340ffa84fSJens Wiklander		ldr	r0, =thread_user_kcode_offset
42440ffa84fSJens Wiklander		ldr	r0, [r0]
42540ffa84fSJens Wiklander		read_vbar r1
42640ffa84fSJens Wiklander		add	r1, r1, r0
42740ffa84fSJens Wiklander		write_vbar r1
4285b8a58b4SJens Wiklander		isb
4295b8a58b4SJens Wiklander
4305b8a58b4SJens Wiklander	11:	/*
4315b8a58b4SJens Wiklander		 * The PC is adjusted unconditionally to guard against the
4325b8a58b4SJens Wiklander		 * case there was an FIQ just before we did the "cpsid aif".
4335b8a58b4SJens Wiklander		 */
4345b8a58b4SJens Wiklander		ldr	r0, =22f
4355b8a58b4SJens Wiklander		bx	r0
4365b8a58b4SJens Wiklander	22:
4375b8a58b4SJens Wiklander#else
4385b8a58b4SJens Wiklander	11:
4395b8a58b4SJens Wiklander#endif
4405b8a58b4SJens Wiklander		read_tpidrprw r0
44140ffa84fSJens Wiklander#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
4425b8a58b4SJens Wiklander		read_tpidruro r1
4435b8a58b4SJens Wiklander#endif
4445b8a58b4SJens Wiklander	.endm
4455b8a58b4SJens Wiklander
4465cee6ca7SJens Wiklander/* The handler of native interrupt. */
4475cee6ca7SJens Wiklander.macro	native_intr_handler mode:req
4485b8a58b4SJens Wiklander	cpsid	aif
4495b8a58b4SJens Wiklander	maybe_restore_mapping
4505b8a58b4SJens Wiklander
4515cee6ca7SJens Wiklander	/*
4525cee6ca7SJens Wiklander	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
4535cee6ca7SJens Wiklander	 * address
4545cee6ca7SJens Wiklander	 */
4555cee6ca7SJens Wiklander	sub     lr, lr, #4
4565cee6ca7SJens Wiklander
4575cee6ca7SJens Wiklander	/*
4585cee6ca7SJens Wiklander	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
4595cee6ca7SJens Wiklander	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
4605cee6ca7SJens Wiklander	 * because the secure monitor doesn't save those. The treatment of
4615cee6ca7SJens Wiklander	 * the banked fiq registers is somewhat analogous to the lazy save
4625cee6ca7SJens Wiklander	 * of VFP registers.
4635cee6ca7SJens Wiklander	 */
4645cee6ca7SJens Wiklander	.ifc	\mode\(),fiq
4655ff5a48eSJens Wiklander	push	{r0-r3, r8-r12, lr}
4665cee6ca7SJens Wiklander	.else
4675ff5a48eSJens Wiklander	push	{r0-r3, r12, lr}
4685cee6ca7SJens Wiklander	.endif
4695b8a58b4SJens Wiklander
470*3b4ffdf0SJens Wiklander	/*
471*3b4ffdf0SJens Wiklander	 * Use SP_abt to update core local flags.
472*3b4ffdf0SJens Wiklander	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP |
473*3b4ffdf0SJens Wiklander	 *         THREAD_CLF_{FIQ|IRQ};
474*3b4ffdf0SJens Wiklander	 */
475*3b4ffdf0SJens Wiklander	cps     #CPSR_MODE_ABT
476*3b4ffdf0SJens Wiklander	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
477*3b4ffdf0SJens Wiklander	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
478*3b4ffdf0SJens Wiklander	.ifc    \mode\(),fiq
479*3b4ffdf0SJens Wiklander	orr     r1, r1, #(THREAD_CLF_TMP | THREAD_CLF_FIQ)
480*3b4ffdf0SJens Wiklander	.else
481*3b4ffdf0SJens Wiklander	orr     r1, r1, #(THREAD_CLF_TMP | THREAD_CLF_IRQ)
482*3b4ffdf0SJens Wiklander	.endif
483*3b4ffdf0SJens Wiklander	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
484*3b4ffdf0SJens Wiklander	.ifc    \mode\(),fiq
485*3b4ffdf0SJens Wiklander	cps     #CPSR_MODE_FIQ
486*3b4ffdf0SJens Wiklander	.else
487*3b4ffdf0SJens Wiklander	cps     #CPSR_MODE_IRQ
488*3b4ffdf0SJens Wiklander	.endif
489*3b4ffdf0SJens Wiklander
4905cee6ca7SJens Wiklander	bl	thread_check_canaries
491358bf47cSEtienne Carriere	bl	interrupt_main_handler
4925b8a58b4SJens Wiklander
493*3b4ffdf0SJens Wiklander	/*
494*3b4ffdf0SJens Wiklander	 * Use SP_abt to update core local flags.
495*3b4ffdf0SJens Wiklander	 * flags >>= THREAD_CLF_SAVED_SHIFT;
496*3b4ffdf0SJens Wiklander	 */
497*3b4ffdf0SJens Wiklander	cps     #CPSR_MODE_ABT
498*3b4ffdf0SJens Wiklander	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
499*3b4ffdf0SJens Wiklander	lsr     r1, r1, #THREAD_CLF_SAVED_SHIFT
500*3b4ffdf0SJens Wiklander	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
501*3b4ffdf0SJens Wiklander	.ifc    \mode\(),fiq
502*3b4ffdf0SJens Wiklander	cps     #CPSR_MODE_FIQ
503*3b4ffdf0SJens Wiklander	.else
504*3b4ffdf0SJens Wiklander	cps     #CPSR_MODE_IRQ
505*3b4ffdf0SJens Wiklander	.endif
506*3b4ffdf0SJens Wiklander
5075ff5a48eSJens Wiklander	mrs	r0, spsr
5085ff5a48eSJens Wiklander	cmp_spsr_user_mode r0
5095ff5a48eSJens Wiklander
5105cee6ca7SJens Wiklander	.ifc	\mode\(),fiq
5115ff5a48eSJens Wiklander	pop	{r0-r3, r8-r12, lr}
5125cee6ca7SJens Wiklander	.else
5135ff5a48eSJens Wiklander	pop	{r0-r3, r12, lr}
5145cee6ca7SJens Wiklander	.endif
5155b8a58b4SJens Wiklander
516864e8de3SJerome Forissier	movsne	pc, lr
5175b8a58b4SJens Wiklander	b	eret_to_user_mode
5185cee6ca7SJens Wiklander.endm
5195cee6ca7SJens Wiklander
5205cee6ca7SJens Wiklander/* The handler of foreign interrupt. */
5215cee6ca7SJens Wiklander.macro foreign_intr_handler mode:req
5225b8a58b4SJens Wiklander	cpsid	aif
5235b8a58b4SJens Wiklander	maybe_restore_mapping
5245b8a58b4SJens Wiklander
5255cee6ca7SJens Wiklander	sub	lr, lr, #4
5265cee6ca7SJens Wiklander	push	{r12}
5275cee6ca7SJens Wiklander
5285cee6ca7SJens Wiklander	.ifc	\mode\(),fiq
52986b8b340SJens Wiklander	/*
53086b8b340SJens Wiklander	 * If a foreign (non-secure) interrupt is received as a FIQ we need
53186b8b340SJens Wiklander	 * to check that we're in a saveable state or if we need to mask
53286b8b340SJens Wiklander	 * the interrupt to be handled later.
53386b8b340SJens Wiklander	 *
53486b8b340SJens Wiklander	 * The window when this is needed is quite narrow, it's between
53586b8b340SJens Wiklander	 * entering the exception vector and until the "cpsid" instruction
53686b8b340SJens Wiklander	 * of the handler has been executed.
53786b8b340SJens Wiklander	 *
53886b8b340SJens Wiklander	 * Currently we can save the state properly if the FIQ is received
53986b8b340SJens Wiklander	 * while in user or svc (kernel) mode.
54086b8b340SJens Wiklander	 *
54186b8b340SJens Wiklander	 * If we're returning to abort, undef or irq mode we're returning
54286b8b340SJens Wiklander	 * with the mapping restored. This is OK since before the handler
54386b8b340SJens Wiklander	 * we're returning to eventually returns to user mode the reduced
54486b8b340SJens Wiklander	 * mapping will be restored.
54586b8b340SJens Wiklander	 */
54686b8b340SJens Wiklander	mrs	r12, spsr
54786b8b340SJens Wiklander	and	r12, r12, #ARM32_CPSR_MODE_MASK
54886b8b340SJens Wiklander	cmp	r12, #ARM32_CPSR_MODE_USR
54986b8b340SJens Wiklander	cmpne	r12, #ARM32_CPSR_MODE_SVC
55086b8b340SJens Wiklander	beq	1f
55186b8b340SJens Wiklander	mrs	r12, spsr
55286b8b340SJens Wiklander	orr	r12, r12, #ARM32_CPSR_F
55386b8b340SJens Wiklander	msr	spsr_fsxc, r12
55486b8b340SJens Wiklander	pop	{r12}
55586b8b340SJens Wiklander	movs	pc, lr
55686b8b340SJens Wiklander1:
55786b8b340SJens Wiklander	.endif
55886b8b340SJens Wiklander
55986b8b340SJens Wiklander	push	{lr}
56086b8b340SJens Wiklander
56186b8b340SJens Wiklander	.ifc	\mode\(),fiq
5625cee6ca7SJens Wiklander	bl	thread_save_state_fiq
5635cee6ca7SJens Wiklander	.else
5645cee6ca7SJens Wiklander	bl	thread_save_state
5655cee6ca7SJens Wiklander	.endif
5665cee6ca7SJens Wiklander
56714d6d42bSJens Wiklander#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
56814d6d42bSJens Wiklander	/*
56914d6d42bSJens Wiklander	 * Prevent leaking information about which entries has been used in
57014d6d42bSJens Wiklander	 * cache. We're relying on the secure monitor/dispatcher to take
57114d6d42bSJens Wiklander	 * care of the BTB.
57214d6d42bSJens Wiklander	 */
57314d6d42bSJens Wiklander	mov	r0, #DCACHE_OP_CLEAN_INV
57414d6d42bSJens Wiklander	bl	dcache_op_louis
57514d6d42bSJens Wiklander	write_iciallu
57614d6d42bSJens Wiklander#endif
57714d6d42bSJens Wiklander
5780733f3d1SJerome Forissier	/*
5790733f3d1SJerome Forissier	 * Use SP_abt to update core local flags.
5800733f3d1SJerome Forissier	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
5810733f3d1SJerome Forissier	 */
5820733f3d1SJerome Forissier	cps     #CPSR_MODE_ABT
5830733f3d1SJerome Forissier	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
5840733f3d1SJerome Forissier	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
5850733f3d1SJerome Forissier	orr     r1, r1, #THREAD_CLF_TMP
5860733f3d1SJerome Forissier	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
5870733f3d1SJerome Forissier	.ifc    \mode\(),fiq
5880733f3d1SJerome Forissier	cps     #CPSR_MODE_FIQ
5890733f3d1SJerome Forissier	.else
5900733f3d1SJerome Forissier	cps     #CPSR_MODE_IRQ
5910733f3d1SJerome Forissier	.endif
5920733f3d1SJerome Forissier
5932786f143SJens Wiklander	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
5942786f143SJens Wiklander	mrs	r1, spsr
5952786f143SJens Wiklander	pop	{r2}
5962786f143SJens Wiklander	pop	{r12}
5972786f143SJens Wiklander	blx	thread_state_suspend
5982786f143SJens Wiklander
5992786f143SJens Wiklander	/*
6002786f143SJens Wiklander	 * Switch to SVC mode and copy current stack pointer as it already
6012786f143SJens Wiklander	 * is the tmp stack.
6022786f143SJens Wiklander	 */
6032786f143SJens Wiklander	mov	r1, sp
6042786f143SJens Wiklander	cps	#CPSR_MODE_SVC
6052786f143SJens Wiklander	mov	sp, r1
6062786f143SJens Wiklander
6072786f143SJens Wiklander	/* Passing thread index in r0 */
6082786f143SJens Wiklander	b	thread_foreign_intr_exit
6095cee6ca7SJens Wiklander.endm
6105cee6ca7SJens Wiklander
611fae8192bSJens WiklanderFUNC thread_excp_vect , :, align=32
612923c1f34SJens WiklanderUNWIND(	.cantunwind)
613722b96eeSJens Wiklander	b	.			/* Reset			*/
6147c732ee4SJens Wiklander	b	__thread_und_handler	/* Undefined instruction	*/
6157c732ee4SJens Wiklander	b	__thread_svc_handler	/* System call			*/
6167c732ee4SJens Wiklander	b	__thread_pabort_handler	/* Prefetch abort		*/
6177c732ee4SJens Wiklander	b	__thread_dabort_handler	/* Data abort			*/
618722b96eeSJens Wiklander	b	.			/* Reserved			*/
6197c732ee4SJens Wiklander	b	__thread_irq_handler	/* IRQ				*/
6207c732ee4SJens Wiklander	b	__thread_fiq_handler	/* FIQ				*/
62140511940SJens Wiklander#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
62240511940SJens Wiklander	.macro vector_prologue_spectre
62340511940SJens Wiklander		/*
62440511940SJens Wiklander		 * This depends on SP being 8 byte aligned, that is, the
62540511940SJens Wiklander		 * lowest three bits in SP are zero.
62640511940SJens Wiklander		 *
62740511940SJens Wiklander		 * To avoid unexpected speculation we need to invalidate
62840511940SJens Wiklander		 * the branch predictor before we do the first branch. It
62940511940SJens Wiklander		 * doesn't matter if it's a conditional or an unconditional
63040511940SJens Wiklander		 * branch speculation can still occur.
63140511940SJens Wiklander		 *
63240511940SJens Wiklander		 * The idea is to form a specific bit pattern in the lowest
63340511940SJens Wiklander		 * three bits of SP depending on which entry in the vector
63440511940SJens Wiklander		 * we enter via.  This is done by adding 1 to SP in each
63540511940SJens Wiklander		 * entry but the last.
63640511940SJens Wiklander		 */
63740511940SJens Wiklander		add	sp, sp, #1	/* 7:111 Reset			*/
63840511940SJens Wiklander		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
63940511940SJens Wiklander		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
64040511940SJens Wiklander		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
64140511940SJens Wiklander		add	sp, sp, #1	/* 3:011 Data abort		*/
64240511940SJens Wiklander		add	sp, sp, #1	/* 2:010 Reserved		*/
64340511940SJens Wiklander		add	sp, sp, #1	/* 1:001 IRQ			*/
64452a109cdSMark-PK Tsai		cpsid   aif		/* 0:000 FIQ			*/
64540511940SJens Wiklander	.endm
64640511940SJens Wiklander
6470d57f57cSJens Wiklander        .balign	32
648a9869a4cSJens Wiklander	.global thread_excp_vect_wa_a15_spectre_v2
649a9869a4cSJens Wiklanderthread_excp_vect_wa_a15_spectre_v2:
65040511940SJens Wiklander	vector_prologue_spectre
65152a109cdSMark-PK Tsai	write_tpidrprw r0
65240511940SJens Wiklander	mrs	r0, spsr
65340511940SJens Wiklander	cmp_spsr_user_mode r0
65440511940SJens Wiklander	bne	1f
65540511940SJens Wiklander	/*
65640511940SJens Wiklander	 * Invalidate the branch predictor for the current processor.
657ae9208f1SJens Wiklander	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
658ae9208f1SJens Wiklander	 * effective.
65940511940SJens Wiklander	 * Note that the BPIALL instruction is not effective in
66040511940SJens Wiklander	 * invalidating the branch predictor on Cortex-A15. For that CPU,
66140511940SJens Wiklander	 * set ACTLR[0] to 1 during early processor initialisation, and
66240511940SJens Wiklander	 * invalidate the branch predictor by performing an ICIALLU
66340511940SJens Wiklander	 * instruction. See also:
66440511940SJens Wiklander	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
66540511940SJens Wiklander	 */
66640511940SJens Wiklander	write_iciallu
66740511940SJens Wiklander	isb
66840511940SJens Wiklander	b	1f
66940511940SJens Wiklander
6700d57f57cSJens Wiklander        .balign	32
671a9869a4cSJens Wiklander	.global thread_excp_vect_wa_spectre_v2
672a9869a4cSJens Wiklanderthread_excp_vect_wa_spectre_v2:
67340511940SJens Wiklander	vector_prologue_spectre
67452a109cdSMark-PK Tsai	write_tpidrprw r0
67540511940SJens Wiklander	mrs	r0, spsr
67640511940SJens Wiklander	cmp_spsr_user_mode r0
67740511940SJens Wiklander	bne	1f
67840511940SJens Wiklander	/* Invalidate the branch predictor for the current processor. */
67940511940SJens Wiklander	write_bpiall
68040511940SJens Wiklander	isb
68140511940SJens Wiklander
68240511940SJens Wiklander1:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
68340511940SJens Wiklander	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
68440511940SJens Wiklander	add	pc, pc, r0, LSL #3
68540511940SJens Wiklander	nop
68640511940SJens Wiklander
68740511940SJens Wiklander	read_tpidrprw r0
6887c732ee4SJens Wiklander	b	__thread_fiq_handler	/* FIQ				*/
68940511940SJens Wiklander	read_tpidrprw r0
6907c732ee4SJens Wiklander	b	__thread_irq_handler	/* IRQ				*/
69140511940SJens Wiklander	read_tpidrprw r0
69240511940SJens Wiklander	b	.			/* Reserved			*/
69340511940SJens Wiklander	read_tpidrprw r0
6947c732ee4SJens Wiklander	b	__thread_dabort_handler	/* Data abort			*/
69540511940SJens Wiklander	read_tpidrprw r0
6967c732ee4SJens Wiklander	b	__thread_pabort_handler	/* Prefetch abort		*/
69740511940SJens Wiklander	read_tpidrprw r0
6987c732ee4SJens Wiklander	b	__thread_svc_handler	/* System call			*/
69940511940SJens Wiklander	read_tpidrprw r0
7007c732ee4SJens Wiklander	b	__thread_und_handler	/* Undefined instruction	*/
70140511940SJens Wiklander	read_tpidrprw r0
70240511940SJens Wiklander	b	.			/* Reset			*/
70340511940SJens Wiklander#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
704722b96eeSJens Wiklander
7057c732ee4SJens Wiklander__thread_und_handler:
7065b8a58b4SJens Wiklander	cpsid	aif
7075b8a58b4SJens Wiklander	maybe_restore_mapping
708935ac3ecSJens Wiklander	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
709935ac3ecSJens Wiklander	mrs	r1, spsr
710935ac3ecSJens Wiklander	tst	r1, #CPSR_T
711935ac3ecSJens Wiklander	subne	lr, lr, #2
712935ac3ecSJens Wiklander	subeq	lr, lr, #4
71329e63291SJens Wiklander	mov	r0, #ABORT_TYPE_UNDEF
7147c732ee4SJens Wiklander	b	__thread_abort_common
715abe38974SJens Wiklander
7167c732ee4SJens Wiklander__thread_dabort_handler:
7175b8a58b4SJens Wiklander	cpsid	aif
7185b8a58b4SJens Wiklander	maybe_restore_mapping
719935ac3ecSJens Wiklander	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
720935ac3ecSJens Wiklander	sub	lr, lr, #8
72129e63291SJens Wiklander	mov	r0, #ABORT_TYPE_DATA
7227c732ee4SJens Wiklander	b	__thread_abort_common
723abe38974SJens Wiklander
7247c732ee4SJens Wiklander__thread_pabort_handler:
7255b8a58b4SJens Wiklander	cpsid	aif
7265b8a58b4SJens Wiklander	maybe_restore_mapping
727935ac3ecSJens Wiklander	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
728935ac3ecSJens Wiklander	sub	lr, lr, #4
72929e63291SJens Wiklander	mov	r0, #ABORT_TYPE_PREFETCH
730abe38974SJens Wiklander
7317c732ee4SJens Wiklander__thread_abort_common:
732935ac3ecSJens Wiklander	/*
733935ac3ecSJens Wiklander	 * At this label:
734935ac3ecSJens Wiklander	 * cpsr is in mode undef or abort
735935ac3ecSJens Wiklander	 * sp is still pointing to struct thread_core_local belonging to
736935ac3ecSJens Wiklander	 * this core.
737935ac3ecSJens Wiklander	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
738935ac3ecSJens Wiklander	 * {r2-r11, ip} are untouched.
739935ac3ecSJens Wiklander	 * r0 holds the first argument for abort_handler()
740935ac3ecSJens Wiklander	 */
741935ac3ecSJens Wiklander
742935ac3ecSJens Wiklander	/*
743935ac3ecSJens Wiklander	 * Update core local flags.
744935ac3ecSJens Wiklander	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
745935ac3ecSJens Wiklander	 */
746935ac3ecSJens Wiklander	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
747935ac3ecSJens Wiklander	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
748935ac3ecSJens Wiklander	orr	r1, r1, #THREAD_CLF_ABORT
749935ac3ecSJens Wiklander
750935ac3ecSJens Wiklander	/*
751935ac3ecSJens Wiklander	 * Select stack and update flags accordingly
752935ac3ecSJens Wiklander	 *
753935ac3ecSJens Wiklander	 * Normal case:
754935ac3ecSJens Wiklander	 * If the abort stack is unused select that.
755935ac3ecSJens Wiklander	 *
756935ac3ecSJens Wiklander	 * Fatal error handling:
757935ac3ecSJens Wiklander	 * If we're already using the abort stack as noted by bit
758935ac3ecSJens Wiklander	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
759935ac3ecSJens Wiklander	 * field we're selecting the temporary stack instead to be able to
760935ac3ecSJens Wiklander	 * make a stack trace of the abort in abort mode.
761935ac3ecSJens Wiklander	 *
762935ac3ecSJens Wiklander	 * r1 is initialized as a temporary stack pointer until we've
763935ac3ecSJens Wiklander	 * switched to system mode.
764935ac3ecSJens Wiklander	 */
765935ac3ecSJens Wiklander	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
766935ac3ecSJens Wiklander	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
767935ac3ecSJens Wiklander	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
768935ac3ecSJens Wiklander	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
769935ac3ecSJens Wiklander	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
770935ac3ecSJens Wiklander
771935ac3ecSJens Wiklander	/*
772935ac3ecSJens Wiklander	 * Store registers on stack fitting struct thread_abort_regs
773935ac3ecSJens Wiklander	 * start from the end of the struct
774935ac3ecSJens Wiklander	 * {r2-r11, ip}
775935ac3ecSJens Wiklander	 * Load content of previously saved {r0-r1} and stores
776935ac3ecSJens Wiklander	 * it up to the pad field.
777935ac3ecSJens Wiklander	 * After this is only {usr_sp, usr_lr} missing in the struct
778935ac3ecSJens Wiklander	 */
779935ac3ecSJens Wiklander	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
780935ac3ecSJens Wiklander	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
781935ac3ecSJens Wiklander	/* Push the original {r0-r1} on the selected stack */
782935ac3ecSJens Wiklander	stmdb	r1!, {r2-r3}
783935ac3ecSJens Wiklander	mrs	r3, spsr
784935ac3ecSJens Wiklander	/* Push {pad, spsr, elr} on the selected stack */
785935ac3ecSJens Wiklander	stmdb	r1!, {r2, r3, lr}
786935ac3ecSJens Wiklander
787abe38974SJens Wiklander	cps	#CPSR_MODE_SYS
788935ac3ecSJens Wiklander	str	lr, [r1, #-4]!
789935ac3ecSJens Wiklander	str	sp, [r1, #-4]!
790abe38974SJens Wiklander	mov	sp, r1
791935ac3ecSJens Wiklander
792935ac3ecSJens Wiklander	bl	abort_handler
793935ac3ecSJens Wiklander
794935ac3ecSJens Wiklander	mov	ip, sp
795935ac3ecSJens Wiklander	ldr	sp, [ip], #4
796935ac3ecSJens Wiklander	ldr	lr, [ip], #4
797935ac3ecSJens Wiklander
798935ac3ecSJens Wiklander	/*
799935ac3ecSJens Wiklander	 * Even if we entered via CPSR_MODE_UND, we are returning via
800935ac3ecSJens Wiklander	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
801935ac3ecSJens Wiklander	 * here.
802935ac3ecSJens Wiklander	 */
803abe38974SJens Wiklander	cps	#CPSR_MODE_ABT
804935ac3ecSJens Wiklander	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
805935ac3ecSJens Wiklander	msr	spsr_fsxc, r1
806935ac3ecSJens Wiklander
807935ac3ecSJens Wiklander	/* Update core local flags */
808935ac3ecSJens Wiklander	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
809935ac3ecSJens Wiklander	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
810935ac3ecSJens Wiklander	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
811935ac3ecSJens Wiklander
8125b8a58b4SJens Wiklander	cmp_spsr_user_mode r1
813935ac3ecSJens Wiklander	ldm	ip, {r0-r11, ip}
814864e8de3SJerome Forissier	movsne	pc, lr
8155b8a58b4SJens Wiklander	b	eret_to_user_mode
816722b96eeSJens Wiklander	/* end thread_abort_common */
817abe38974SJens Wiklander
8187c732ee4SJens Wiklander__thread_svc_handler:
8195b8a58b4SJens Wiklander	cpsid	aif
8205b8a58b4SJens Wiklander
8215b8a58b4SJens Wiklander	maybe_restore_mapping
8225b8a58b4SJens Wiklander
823abe38974SJens Wiklander	push	{r0-r7, lr}
824abe38974SJens Wiklander	mrs	r0, spsr
825abe38974SJens Wiklander	push	{r0}
826abe38974SJens Wiklander	mov	r0, sp
827ab5363c6SJens Wiklander	bl	thread_scall_handler
8285b8a58b4SJens Wiklander	cpsid	aif	/* In case something was unmasked */
8295ff5a48eSJens Wiklander	pop	{r0}
830abe38974SJens Wiklander	msr	spsr_fsxc, r0
8315b8a58b4SJens Wiklander	cmp_spsr_user_mode r0
8325ff5a48eSJens Wiklander	pop	{r0-r7, lr}
833864e8de3SJerome Forissier	movsne	pc, lr
8345b8a58b4SJens Wiklander	b	eret_to_user_mode
835722b96eeSJens Wiklander	/* end thread_svc_handler */
836abe38974SJens Wiklander
8377c732ee4SJens Wiklander__thread_fiq_handler:
838087c9fbbSJens Wiklander#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
839722b96eeSJens Wiklander	foreign_intr_handler	fiq
840722b96eeSJens Wiklander#else
841722b96eeSJens Wiklander	native_intr_handler	fiq
842722b96eeSJens Wiklander#endif
843722b96eeSJens Wiklander	/* end thread_fiq_handler */
844722b96eeSJens Wiklander
8457c732ee4SJens Wiklander__thread_irq_handler:
846087c9fbbSJens Wiklander#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
847722b96eeSJens Wiklander	native_intr_handler	irq
848722b96eeSJens Wiklander#else
849722b96eeSJens Wiklander	foreign_intr_handler	irq
850722b96eeSJens Wiklander#endif
851722b96eeSJens Wiklander	/* end thread_irq_handler */
8525b8a58b4SJens Wiklander
8535b8a58b4SJens Wiklander	/*
8545b8a58b4SJens Wiklander	 * Returns to user mode.
8555b8a58b4SJens Wiklander	 * Expects to be jumped to with lr pointing to the user space
8565b8a58b4SJens Wiklander	 * address to jump to and spsr holding the desired cpsr. Async
8575b8a58b4SJens Wiklander	 * abort, irq and fiq should be masked.
8585b8a58b4SJens Wiklander	 */
8595b8a58b4SJens Wiklandereret_to_user_mode:
8605b8a58b4SJens Wiklander	write_tpidrprw r0
8615b8a58b4SJens Wiklander#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
8625b8a58b4SJens Wiklander	write_tpidruro r1
8635b8a58b4SJens Wiklander#endif
8645b8a58b4SJens Wiklander
8655b8a58b4SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
8665b8a58b4SJens Wiklander	ldr	r0, =thread_user_kcode_offset
8675b8a58b4SJens Wiklander	ldr	r0, [r0]
86840ffa84fSJens Wiklander	read_vbar r1
8695b8a58b4SJens Wiklander	sub	r1, r1, r0
8705b8a58b4SJens Wiklander	write_vbar r1
8715b8a58b4SJens Wiklander	isb
8725b8a58b4SJens Wiklander
8735b8a58b4SJens Wiklander	/* Jump into the reduced mapping before the full mapping is removed */
8745b8a58b4SJens Wiklander	ldr	r1, =1f
8755b8a58b4SJens Wiklander	sub	r1, r1, r0
8765b8a58b4SJens Wiklander	bx	r1
8775b8a58b4SJens Wiklander1:
8785b8a58b4SJens Wiklander#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
8795b8a58b4SJens Wiklander
8805b8a58b4SJens Wiklander#ifdef CFG_WITH_LPAE
8815b8a58b4SJens Wiklander	read_ttbr0_64bit r0, r1
8825b8a58b4SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
8830d206ea0SIzik Dubnov	add	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
8845b8a58b4SJens Wiklander#endif
8855b8a58b4SJens Wiklander	/* switch to user ASID */
8865b8a58b4SJens Wiklander	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
8875b8a58b4SJens Wiklander	write_ttbr0_64bit r0, r1
8885b8a58b4SJens Wiklander	isb
8895b8a58b4SJens Wiklander#else /*!CFG_WITH_LPAE*/
8905b8a58b4SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
8919faa7444SJens Wiklander	read_ttbcr r0
8929faa7444SJens Wiklander	orr	r0, r0, #TTBCR_PD1
8939faa7444SJens Wiklander	write_ttbcr r0
8945b8a58b4SJens Wiklander	isb
8955b8a58b4SJens Wiklander#endif
8965b8a58b4SJens Wiklander	read_contextidr r0
8975b8a58b4SJens Wiklander	orr	r0, r0, #BIT(0)
8985b8a58b4SJens Wiklander	write_contextidr r0
8995b8a58b4SJens Wiklander	isb
9005b8a58b4SJens Wiklander#endif /*!CFG_WITH_LPAE*/
9015b8a58b4SJens Wiklander
9025b8a58b4SJens Wiklander	read_tpidrprw r0
9035b8a58b4SJens Wiklander#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
9045b8a58b4SJens Wiklander	read_tpidruro r1
9055b8a58b4SJens Wiklander#endif
9065b8a58b4SJens Wiklander
9075b8a58b4SJens Wiklander	movs	pc, lr
90879083642SJens Wiklander
90979083642SJens Wiklander	/*
91079083642SJens Wiklander	 * void icache_inv_user_range(void *addr, size_t size);
91179083642SJens Wiklander	 *
91279083642SJens Wiklander	 * This function has to execute with the user space ASID active,
91379083642SJens Wiklander	 * this means executing with reduced mapping and the code needs
91479083642SJens Wiklander	 * to be located here together with the vector.
91579083642SJens Wiklander	 */
91679083642SJens Wiklander	.global icache_inv_user_range
91779083642SJens Wiklander	.type icache_inv_user_range , %function
91879083642SJens Wiklandericache_inv_user_range:
91979083642SJens Wiklander	push	{r4-r7}
92079083642SJens Wiklander
92179083642SJens Wiklander	/* Mask all exceptions */
92279083642SJens Wiklander	mrs	r4, cpsr	/* This register must be preserved */
92379083642SJens Wiklander	cpsid	aif
92479083642SJens Wiklander
92579083642SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
92679083642SJens Wiklander	ldr	r2, =thread_user_kcode_offset
92779083642SJens Wiklander	ldr	r2, [r2]
92879083642SJens Wiklander	read_vbar r5		/* This register must be preserved */
92979083642SJens Wiklander	sub	r3, r5, r2
93079083642SJens Wiklander	write_vbar r3
93179083642SJens Wiklander	isb
93279083642SJens Wiklander
93379083642SJens Wiklander	/* Jump into the reduced mapping before the full mapping is removed */
93479083642SJens Wiklander	ldr	r3, =1f
93579083642SJens Wiklander	sub	r3, r3, r2
93679083642SJens Wiklander	bx	r3
93779083642SJens Wiklander1:
93879083642SJens Wiklander#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
93979083642SJens Wiklander
94079083642SJens Wiklander#ifdef CFG_WITH_LPAE
94179083642SJens Wiklander	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
94279083642SJens Wiklander	/* switch to user ASID */
94379083642SJens Wiklander	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
944df580f57SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
9450d206ea0SIzik Dubnov	add	r2, r6, #CORE_MMU_BASE_TABLE_OFFSET
94679083642SJens Wiklander	write_ttbr0_64bit r2, r3
947df580f57SJens Wiklander#else
948df580f57SJens Wiklander	write_ttbr0_64bit r6, r3
949df580f57SJens Wiklander#endif
95079083642SJens Wiklander	isb
95179083642SJens Wiklander#else /*!CFG_WITH_LPAE*/
95279083642SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
9539faa7444SJens Wiklander	read_ttbcr r6	/* This register must be preserved */
9549faa7444SJens Wiklander	orr	r2, r6, #TTBCR_PD1
9559faa7444SJens Wiklander	write_ttbcr r2
95679083642SJens Wiklander	isb
95779083642SJens Wiklander#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
95879083642SJens Wiklander	read_contextidr r7	/* This register must be preserved */
95979083642SJens Wiklander	orr	r2, r7, #BIT(0)
96079083642SJens Wiklander	write_contextidr r2
96179083642SJens Wiklander	isb
96279083642SJens Wiklander#endif /*!CFG_WITH_LPAE*/
96379083642SJens Wiklander
96479083642SJens Wiklander	/*
96579083642SJens Wiklander	 * Do the actual icache invalidation
96679083642SJens Wiklander	 */
96779083642SJens Wiklander
96879083642SJens Wiklander	/* Calculate minimum icache line size, result in r2 */
96979083642SJens Wiklander	read_ctr r3
97079083642SJens Wiklander	and     r3, r3, #CTR_IMINLINE_MASK
97179083642SJens Wiklander	mov     r2, #CTR_WORD_SIZE
97279083642SJens Wiklander	lsl     r2, r2, r3
97379083642SJens Wiklander
97479083642SJens Wiklander	add	r1, r0, r1
97579083642SJens Wiklander	sub	r3, r2, #1
97679083642SJens Wiklander	bic	r0, r0, r3
97779083642SJens Wiklander1:
97879083642SJens Wiklander	write_icimvau r0
97979083642SJens Wiklander	add	r0, r0, r2
98079083642SJens Wiklander	cmp	r0, r1
98179083642SJens Wiklander	blo	1b
98279083642SJens Wiklander
98379083642SJens Wiklander	/* Invalidate entire branch predictor array inner shareable */
98479083642SJens Wiklander	write_bpiallis
98579083642SJens Wiklander
98679083642SJens Wiklander	dsb	ishst
98779083642SJens Wiklander	isb
98879083642SJens Wiklander
98979083642SJens Wiklander#ifdef CFG_WITH_LPAE
99079083642SJens Wiklander	write_ttbr0_64bit r6, r7
99179083642SJens Wiklander	isb
99279083642SJens Wiklander#else /*!CFG_WITH_LPAE*/
99379083642SJens Wiklander	write_contextidr r7
99479083642SJens Wiklander	isb
99579083642SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
9969faa7444SJens Wiklander	write_ttbcr r6
99779083642SJens Wiklander	isb
99879083642SJens Wiklander#endif
99979083642SJens Wiklander#endif /*!CFG_WITH_LPAE*/
100079083642SJens Wiklander
100179083642SJens Wiklander#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
100279083642SJens Wiklander	write_vbar r5
100379083642SJens Wiklander	isb
100479083642SJens Wiklander	/*
100579083642SJens Wiklander	 * The PC is adjusted unconditionally to guard against the
100679083642SJens Wiklander	 * case there was an FIQ just before we did the "cpsid aif".
100779083642SJens Wiklander	 */
100879083642SJens Wiklander	ldr	r0, =1f
100979083642SJens Wiklander	bx	r0
101079083642SJens Wiklander1:
101179083642SJens Wiklander#endif
101279083642SJens Wiklander
101379083642SJens Wiklander	msr	cpsr_fsxc, r4	/* Restore exceptions */
101479083642SJens Wiklander	pop	{r4-r7}
101579083642SJens Wiklander	bx	lr		/* End of icache_inv_user_range() */
101679083642SJens Wiklander
1017a8948228SJens Wiklander	/*
1018a8948228SJens Wiklander	 * Make sure that literals are placed before the
1019a8948228SJens Wiklander	 * thread_excp_vect_end label.
1020a8948228SJens Wiklander	 */
1021a8948228SJens Wiklander	.pool
102274977ea7SJens Wiklander	.global thread_excp_vect_end
102374977ea7SJens Wiklanderthread_excp_vect_end:
1024f8031323SJens WiklanderEND_FUNC thread_excp_vect
1025