xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 0d77037f5943c86560dd7c8f473fbc6a55d60a34)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <kernel/unwind.h>
16#include <mm/core_mmu.h>
17
18#include "thread_private.h"
19
20	.syntax unified
21	.arch_extension sec
22
23	.macro cmp_spsr_user_mode reg:req
24		/*
25		 * We're only testing the lower 4 bits as bit 5 (0x10)
26		 * always is set.
27		 */
28		tst	\reg, #0x0f
29	.endm
30
31FUNC thread_set_abt_sp , :
32UNWIND(	.fnstart)
33UNWIND(	.cantunwind)
34	mrs	r1, cpsr
35	cps	#CPSR_MODE_ABT
36	mov	sp, r0
37	msr	cpsr, r1
38	bx	lr
39UNWIND(	.fnend)
40END_FUNC thread_set_abt_sp
41
42FUNC thread_set_und_sp , :
43UNWIND(	.fnstart)
44UNWIND(	.cantunwind)
45	mrs	r1, cpsr
46	cps	#CPSR_MODE_UND
47	mov	sp, r0
48	msr	cpsr, r1
49	bx	lr
50UNWIND(	.fnend)
51END_FUNC thread_set_und_sp
52
53FUNC thread_set_irq_sp , :
54UNWIND(	.fnstart)
55UNWIND(	.cantunwind)
56	mrs	r1, cpsr
57	cps	#CPSR_MODE_IRQ
58	mov	sp, r0
59	msr	cpsr, r1
60	bx	lr
61UNWIND(	.fnend)
62END_FUNC thread_set_irq_sp
63
64FUNC thread_set_fiq_sp , :
65UNWIND(	.fnstart)
66UNWIND(	.cantunwind)
67	mrs	r1, cpsr
68	cps	#CPSR_MODE_FIQ
69	mov	sp, r0
70	msr	cpsr, r1
71	bx	lr
72UNWIND(	.fnend)
73END_FUNC thread_set_fiq_sp
74
75/* void thread_resume(struct thread_ctx_regs *regs) */
76FUNC thread_resume , :
77UNWIND(	.fnstart)
78UNWIND(	.cantunwind)
79	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
80
81	cps	#CPSR_MODE_SYS
82	ldr	sp, [r12], #4
83	ldr	lr, [r12], #4
84
85	cps	#CPSR_MODE_SVC
86	ldr	r1, [r12], #4
87	ldr	sp, [r12], #4
88	ldr	lr, [r12], #4
89	msr	spsr_fsxc, r1
90
91	ldm	r12, {r1, r2}
92
93	/*
94	 * Switching to some other mode than SVC as we need to set spsr in
95	 * order to return into the old state properly and it may be SVC
96	 * mode we're returning to.
97	 */
98	cps	#CPSR_MODE_ABT
99	cmp_spsr_user_mode r2
100	mov	lr, r1
101	msr	spsr_fsxc, r2
102	ldm	r0, {r0-r12}
103	movsne	pc, lr
104	b	eret_to_user_mode
105UNWIND(	.fnend)
106END_FUNC thread_resume
107
108/*
109 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
110 * the banked r8-r12 registers, returns original CPSR.
111 */
112LOCAL_FUNC thread_save_state_fiq , :
113UNWIND(	.fnstart)
114UNWIND(	.cantunwind)
115	mov	r9, lr
116
117	/*
118	 * Uses stack for temporary storage, while storing needed
119	 * context in the thread context struct.
120	 */
121
122	mrs	r8, cpsr
123
124	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
125
126	push	{r4-r7}
127	push	{r0-r3}
128
129	mrs	r6, cpsr		/* Save current CPSR */
130
131	bl	thread_get_ctx_regs
132
133	pop	{r1-r4}			/* r0-r3 pushed above */
134	stm	r0!, {r1-r4}
135	pop	{r1-r4}			/* r4-r7 pushed above */
136	stm	r0!, {r1-r4}
137
138	cps     #CPSR_MODE_SYS
139	stm	r0!, {r8-r12}
140	str	sp, [r0], #4
141	str	lr, [r0], #4
142
143	cps     #CPSR_MODE_SVC
144	mrs     r1, spsr
145	str	r1, [r0], #4
146	str	sp, [r0], #4
147	str	lr, [r0], #4
148
149	/* back to fiq mode */
150	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
151	msr	cpsr, r6		/* Restore mode */
152
153	mov	r0, r8			/* Return original CPSR */
154	bx	r9
155UNWIND(	.fnend)
156END_FUNC thread_save_state_fiq
157
158/*
159 * Disables IRQ and FIQ and saves state of thread, returns original
160 * CPSR.
161 */
162FUNC thread_save_state , :
163UNWIND(	.fnstart)
164UNWIND(	.cantunwind)
165	push	{r12, lr}
166	/*
167	 * Uses stack for temporary storage, while storing needed
168	 * context in the thread context struct.
169	 */
170
171	mrs	r12, cpsr
172
173	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
174
175	push	{r4-r7}
176	push	{r0-r3}
177
178	mov	r5, r12			/* Save CPSR in a preserved register */
179	mrs	r6, cpsr		/* Save current CPSR */
180
181	bl	thread_get_ctx_regs
182
183	pop	{r1-r4}			/* r0-r3 pushed above */
184	stm	r0!, {r1-r4}
185	pop	{r1-r4}			/* r4-r7 pushed above */
186	stm	r0!, {r1-r4}
187	stm	r0!, {r8-r11}
188
189	pop	{r12, lr}
190	stm	r0!, {r12}
191
192        cps     #CPSR_MODE_SYS
193	str	sp, [r0], #4
194	str	lr, [r0], #4
195
196        cps     #CPSR_MODE_SVC
197        mrs     r1, spsr
198	str	r1, [r0], #4
199	str	sp, [r0], #4
200	str	lr, [r0], #4
201
202	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
203	msr	cpsr, r6		/* Restore mode */
204
205	mov	r0, r5			/* Return original CPSR */
206	bx	lr
207UNWIND(	.fnend)
208END_FUNC thread_save_state
209
210/*
211 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
212 *			    unsigned long a2, unsigned long a3)
213 */
214FUNC thread_smc , :
215UNWIND(	.fnstart)
216	smc	#0
217	bx	lr
218UNWIND(	.fnend)
219END_FUNC thread_smc
220
221FUNC thread_init_vbar , :
222UNWIND(	.fnstart)
223	/* Set vector (VBAR) */
224	write_vbar r0
225	bx	lr
226UNWIND(	.fnend)
227END_FUNC thread_init_vbar
228KEEP_PAGER thread_init_vbar
229
230/*
231 * Below are low level routines handling entry and return from user mode.
232 *
233 * thread_enter_user_mode() saves all that registers user mode can change
234 * so kernel mode can restore needed registers when resuming execution
235 * after the call to thread_enter_user_mode() has returned.
236 * thread_enter_user_mode() doesn't return directly since it enters user
237 * mode instead, it's thread_unwind_user_mode() that does the
238 * returning by restoring the registers saved by thread_enter_user_mode().
239 *
240 * There's three ways for thread_enter_user_mode() to return to caller,
241 * user TA calls utee_return, user TA calls utee_panic or through an abort.
242 *
243 * Calls to utee_return or utee_panic are handled as:
244 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
245 * calls syscall_return() or syscall_panic().
246 *
247 * These function calls returns normally except thread_svc_handler() which
248 * which is an exception handling routine so it reads return address and
249 * SPSR to restore from the stack. syscall_return() and syscall_panic()
250 * changes return address and SPSR used by thread_svc_handler() to instead of
251 * returning into user mode as with other syscalls it returns into
252 * thread_unwind_user_mode() in kernel mode instead.  When
253 * thread_svc_handler() returns the stack pointer at the point where
254 * thread_enter_user_mode() left it so this is where
255 * thread_unwind_user_mode() can operate.
256 *
257 * Aborts are handled in a similar way but by thread_abort_handler()
258 * instead, when the pager sees that it's an abort from user mode that
259 * can't be handled it updates SPSR and return address used by
260 * thread_abort_handler() to return into thread_unwind_user_mode()
261 * instead.
262 */
263
264/*
265 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
266 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
267 *               unsigned long user_func, unsigned long spsr,
268 *               uint32_t *exit_status0, uint32_t *exit_status1)
269 *
270 */
271FUNC __thread_enter_user_mode , :
272UNWIND(	.fnstart)
273UNWIND(	.cantunwind)
274	/*
275	 * Save all registers to allow syscall_return() to resume execution
276	 * as if this function would have returned. This is also used in
277	 * syscall_panic().
278	 *
279	 * If stack usage of this function is changed
280	 * thread_unwind_user_mode() has to be updated.
281	 */
282	push    {r4-r12,lr}
283
284	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
285	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
286	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
287
288	/*
289	 * Save old user sp and set new user sp.
290	 */
291	cps	#CPSR_MODE_SYS
292	mov	r7, sp
293	mov     sp, r4
294	cps	#CPSR_MODE_SVC
295	push	{r7,r8}
296
297	/* Prepare user mode entry via eret_to_user_mode */
298	cpsid	aif
299	msr     spsr_fsxc, r6
300	mov	lr, r5
301
302	b	eret_to_user_mode
303UNWIND(	.fnend)
304END_FUNC __thread_enter_user_mode
305
306/*
307 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
308 *              uint32_t exit_status1);
309 * See description in thread.h
310 */
311FUNC thread_unwind_user_mode , :
312UNWIND(	.fnstart)
313UNWIND(	.cantunwind)
314	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
315	str	r1, [ip]
316	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
317	str	r2, [ip]
318
319	/* Restore old user sp */
320	pop	{r4,r7}
321	cps	#CPSR_MODE_SYS
322	mov	sp, r4
323	cps	#CPSR_MODE_SVC
324
325	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
326UNWIND(	.fnend)
327END_FUNC thread_unwind_user_mode
328
329	.macro maybe_restore_mapping
330		/*
331		 * This macro is a bit hard to read due to all the ifdefs,
332		 * we're testing for two different configs which makes four
333		 * different combinations.
334		 *
335		 * - With LPAE, and then some extra code if with
336		 *   CFG_CORE_UNMAP_CORE_AT_EL0
337		 * - Without LPAE, and then some extra code if with
338		 *   CFG_CORE_UNMAP_CORE_AT_EL0
339		 */
340
341		/*
342		 * At this point we can't rely on any memory being writable
343		 * yet, so we're using TPIDRPRW to store r0, and if with
344		 * LPAE TPIDRURO to store r1 too.
345		 */
346		write_tpidrprw r0
347#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
348		write_tpidruro r1
349#endif
350
351#ifdef CFG_WITH_LPAE
352		read_ttbr0_64bit r0, r1
353		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
354		beq	11f
355
356#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
357		/*
358		 * Update the mapping to use the full kernel mode mapping.
359		 * Since the translation table could reside above 4GB we'll
360		 * have to use 64-bit arithmetics.
361		 */
362		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
363		sbc	r1, r1, #0
364#endif
365		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
366		write_ttbr0_64bit r0, r1
367		isb
368
369#else /*!CFG_WITH_LPAE*/
370		read_contextidr r0
371		tst	r0, #1
372		beq	11f
373
374		/* Update the mapping to use the full kernel mode mapping. */
375		bic	r0, r0, #1
376		write_contextidr r0
377		isb
378#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
379		read_ttbr1 r0
380		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
381		write_ttbr1 r0
382		isb
383#endif
384
385#endif /*!CFG_WITH_LPAE*/
386
387#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
388		ldr	r0, =thread_user_kcode_offset
389		ldr	r0, [r0]
390		read_vbar r1
391		add	r1, r1, r0
392		write_vbar r1
393		isb
394
395	11:	/*
396		 * The PC is adjusted unconditionally to guard against the
397		 * case there was an FIQ just before we did the "cpsid aif".
398		 */
399		ldr	r0, =22f
400		bx	r0
401	22:
402#else
403	11:
404#endif
405		read_tpidrprw r0
406#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
407		read_tpidruro r1
408#endif
409	.endm
410
411/* The handler of native interrupt. */
412.macro	native_intr_handler mode:req
413	cpsid	aif
414	maybe_restore_mapping
415
416	/*
417	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
418	 * address
419	 */
420	sub     lr, lr, #4
421
422	/*
423	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
424	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
425	 * because the secure monitor doesn't save those. The treatment of
426	 * the banked fiq registers is somewhat analogous to the lazy save
427	 * of VFP registers.
428	 */
429	.ifc	\mode\(),fiq
430	push	{r0-r3, r8-r12, lr}
431	.else
432	push	{r0-r3, r12, lr}
433	.endif
434
435	bl	thread_check_canaries
436	bl	itr_core_handler
437
438	mrs	r0, spsr
439	cmp_spsr_user_mode r0
440
441	.ifc	\mode\(),fiq
442	pop	{r0-r3, r8-r12, lr}
443	.else
444	pop	{r0-r3, r12, lr}
445	.endif
446
447	movsne	pc, lr
448	b	eret_to_user_mode
449.endm
450
451/* The handler of foreign interrupt. */
452.macro foreign_intr_handler mode:req
453	cpsid	aif
454	maybe_restore_mapping
455
456	sub	lr, lr, #4
457	push	{r12}
458
459	.ifc	\mode\(),fiq
460	/*
461	 * If a foreign (non-secure) interrupt is received as a FIQ we need
462	 * to check that we're in a saveable state or if we need to mask
463	 * the interrupt to be handled later.
464	 *
465	 * The window when this is needed is quite narrow, it's between
466	 * entering the exception vector and until the "cpsid" instruction
467	 * of the handler has been executed.
468	 *
469	 * Currently we can save the state properly if the FIQ is received
470	 * while in user or svc (kernel) mode.
471	 *
472	 * If we're returning to abort, undef or irq mode we're returning
473	 * with the mapping restored. This is OK since before the handler
474	 * we're returning to eventually returns to user mode the reduced
475	 * mapping will be restored.
476	 */
477	mrs	r12, spsr
478	and	r12, r12, #ARM32_CPSR_MODE_MASK
479	cmp	r12, #ARM32_CPSR_MODE_USR
480	cmpne	r12, #ARM32_CPSR_MODE_SVC
481	beq	1f
482	mrs	r12, spsr
483	orr	r12, r12, #ARM32_CPSR_F
484	msr	spsr_fsxc, r12
485	pop	{r12}
486	movs	pc, lr
4871:
488	.endif
489
490	push	{lr}
491
492	.ifc	\mode\(),fiq
493	bl	thread_save_state_fiq
494	.else
495	bl	thread_save_state
496	.endif
497
498#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
499	/*
500	 * Prevent leaking information about which entries has been used in
501	 * cache. We're relying on the secure monitor/dispatcher to take
502	 * care of the BTB.
503	 */
504	mov	r0, #DCACHE_OP_CLEAN_INV
505	bl	dcache_op_louis
506	write_iciallu
507#endif
508
509	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
510	mrs	r1, spsr
511	pop	{r2}
512	pop	{r12}
513	blx	thread_state_suspend
514
515	/*
516	 * Switch to SVC mode and copy current stack pointer as it already
517	 * is the tmp stack.
518	 */
519	mov	r1, sp
520	cps	#CPSR_MODE_SVC
521	mov	sp, r1
522
523	/* Passing thread index in r0 */
524	b	thread_foreign_intr_exit
525.endm
526
527	.section .text.thread_excp_vect
528        .align	5
529FUNC thread_excp_vect , :
530UNWIND(	.fnstart)
531UNWIND(	.cantunwind)
532	b	.			/* Reset			*/
533	b	thread_und_handler	/* Undefined instruction	*/
534	b	thread_svc_handler	/* System call			*/
535	b	thread_pabort_handler	/* Prefetch abort		*/
536	b	thread_dabort_handler	/* Data abort			*/
537	b	.			/* Reserved			*/
538	b	thread_irq_handler	/* IRQ				*/
539	b	thread_fiq_handler	/* FIQ				*/
540#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
541	.macro vector_prologue_spectre
542		/*
543		 * This depends on SP being 8 byte aligned, that is, the
544		 * lowest three bits in SP are zero.
545		 *
546		 * To avoid unexpected speculation we need to invalidate
547		 * the branch predictor before we do the first branch. It
548		 * doesn't matter if it's a conditional or an unconditional
549		 * branch speculation can still occur.
550		 *
551		 * The idea is to form a specific bit pattern in the lowest
552		 * three bits of SP depending on which entry in the vector
553		 * we enter via.  This is done by adding 1 to SP in each
554		 * entry but the last.
555		 */
556		add	sp, sp, #1	/* 7:111 Reset			*/
557		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
558		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
559		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
560		add	sp, sp, #1	/* 3:011 Data abort		*/
561		add	sp, sp, #1	/* 2:010 Reserved		*/
562		add	sp, sp, #1	/* 1:001 IRQ			*/
563		cpsid   aif		/* 0:000 FIQ			*/
564	.endm
565
566        .align	5
567	.global thread_excp_vect_workaround_a15
568thread_excp_vect_workaround_a15:
569	vector_prologue_spectre
570	write_tpidrprw r0
571	mrs	r0, spsr
572	cmp_spsr_user_mode r0
573	bne	1f
574	/*
575	 * Invalidate the branch predictor for the current processor.
576	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
577	 * effective.
578	 * Note that the BPIALL instruction is not effective in
579	 * invalidating the branch predictor on Cortex-A15. For that CPU,
580	 * set ACTLR[0] to 1 during early processor initialisation, and
581	 * invalidate the branch predictor by performing an ICIALLU
582	 * instruction. See also:
583	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
584	 */
585	write_iciallu
586	isb
587	b	1f
588
589        .align	5
590	.global thread_excp_vect_workaround
591thread_excp_vect_workaround:
592	vector_prologue_spectre
593	write_tpidrprw r0
594	mrs	r0, spsr
595	cmp_spsr_user_mode r0
596	bne	1f
597	/* Invalidate the branch predictor for the current processor. */
598	write_bpiall
599	isb
600
6011:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
602	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
603	add	pc, pc, r0, LSL #3
604	nop
605
606	read_tpidrprw r0
607	b	thread_fiq_handler	/* FIQ				*/
608	read_tpidrprw r0
609	b	thread_irq_handler	/* IRQ				*/
610	read_tpidrprw r0
611	b	.			/* Reserved			*/
612	read_tpidrprw r0
613	b	thread_dabort_handler	/* Data abort			*/
614	read_tpidrprw r0
615	b	thread_pabort_handler	/* Prefetch abort		*/
616	read_tpidrprw r0
617	b	thread_svc_handler	/* System call			*/
618	read_tpidrprw r0
619	b	thread_und_handler	/* Undefined instruction	*/
620	read_tpidrprw r0
621	b	.			/* Reset			*/
622#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
623
624thread_und_handler:
625	cpsid	aif
626	maybe_restore_mapping
627	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
628	mrs	r1, spsr
629	tst	r1, #CPSR_T
630	subne	lr, lr, #2
631	subeq	lr, lr, #4
632	mov	r0, #ABORT_TYPE_UNDEF
633	b	thread_abort_common
634
635thread_dabort_handler:
636	cpsid	aif
637	maybe_restore_mapping
638	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
639	sub	lr, lr, #8
640	mov	r0, #ABORT_TYPE_DATA
641	b	thread_abort_common
642
643thread_pabort_handler:
644	cpsid	aif
645	maybe_restore_mapping
646	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
647	sub	lr, lr, #4
648	mov	r0, #ABORT_TYPE_PREFETCH
649
650thread_abort_common:
651	/*
652	 * At this label:
653	 * cpsr is in mode undef or abort
654	 * sp is still pointing to struct thread_core_local belonging to
655	 * this core.
656	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
657	 * {r2-r11, ip} are untouched.
658	 * r0 holds the first argument for abort_handler()
659	 */
660
661	/*
662	 * Update core local flags.
663	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
664	 */
665	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
666	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
667	orr	r1, r1, #THREAD_CLF_ABORT
668
669	/*
670	 * Select stack and update flags accordingly
671	 *
672	 * Normal case:
673	 * If the abort stack is unused select that.
674	 *
675	 * Fatal error handling:
676	 * If we're already using the abort stack as noted by bit
677	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
678	 * field we're selecting the temporary stack instead to be able to
679	 * make a stack trace of the abort in abort mode.
680	 *
681	 * r1 is initialized as a temporary stack pointer until we've
682	 * switched to system mode.
683	 */
684	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
685	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
686	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
687	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
688	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
689
690	/*
691	 * Store registers on stack fitting struct thread_abort_regs
692	 * start from the end of the struct
693	 * {r2-r11, ip}
694	 * Load content of previously saved {r0-r1} and stores
695	 * it up to the pad field.
696	 * After this is only {usr_sp, usr_lr} missing in the struct
697	 */
698	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
699	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
700	/* Push the original {r0-r1} on the selected stack */
701	stmdb	r1!, {r2-r3}
702	mrs	r3, spsr
703	/* Push {pad, spsr, elr} on the selected stack */
704	stmdb	r1!, {r2, r3, lr}
705
706	cps	#CPSR_MODE_SYS
707	str	lr, [r1, #-4]!
708	str	sp, [r1, #-4]!
709	mov	sp, r1
710
711	bl	abort_handler
712
713	mov	ip, sp
714	ldr	sp, [ip], #4
715	ldr	lr, [ip], #4
716
717	/*
718	 * Even if we entered via CPSR_MODE_UND, we are returning via
719	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
720	 * here.
721	 */
722	cps	#CPSR_MODE_ABT
723	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
724	msr	spsr_fsxc, r1
725
726	/* Update core local flags */
727	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
728	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
729	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
730
731	cmp_spsr_user_mode r1
732	ldm	ip, {r0-r11, ip}
733	movsne	pc, lr
734	b	eret_to_user_mode
735	/* end thread_abort_common */
736
737thread_svc_handler:
738	cpsid	aif
739
740	maybe_restore_mapping
741
742	push	{r0-r7, lr}
743	mrs	r0, spsr
744	push	{r0}
745	mov	r0, sp
746	bl	tee_svc_handler
747	cpsid	aif	/* In case something was unmasked */
748	pop	{r0}
749	msr	spsr_fsxc, r0
750	cmp_spsr_user_mode r0
751	pop	{r0-r7, lr}
752	movsne	pc, lr
753	b	eret_to_user_mode
754	/* end thread_svc_handler */
755
756thread_fiq_handler:
757#if defined(CFG_ARM_GICV3)
758	foreign_intr_handler	fiq
759#else
760	native_intr_handler	fiq
761#endif
762	/* end thread_fiq_handler */
763
764thread_irq_handler:
765#if defined(CFG_ARM_GICV3)
766	native_intr_handler	irq
767#else
768	foreign_intr_handler	irq
769#endif
770	/* end thread_irq_handler */
771
772	/*
773	 * Returns to user mode.
774	 * Expects to be jumped to with lr pointing to the user space
775	 * address to jump to and spsr holding the desired cpsr. Async
776	 * abort, irq and fiq should be masked.
777	 */
778eret_to_user_mode:
779	write_tpidrprw r0
780#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
781	write_tpidruro r1
782#endif
783
784#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
785	ldr	r0, =thread_user_kcode_offset
786	ldr	r0, [r0]
787	read_vbar r1
788	sub	r1, r1, r0
789	write_vbar r1
790	isb
791
792	/* Jump into the reduced mapping before the full mapping is removed */
793	ldr	r1, =1f
794	sub	r1, r1, r0
795	bx	r1
7961:
797#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
798
799#ifdef CFG_WITH_LPAE
800	read_ttbr0_64bit r0, r1
801#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
802	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
803#endif
804	/* switch to user ASID */
805	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
806	write_ttbr0_64bit r0, r1
807	isb
808#else /*!CFG_WITH_LPAE*/
809#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
810	read_ttbr1 r0
811	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
812	write_ttbr1 r0
813	isb
814#endif
815	read_contextidr r0
816	orr	r0, r0, #BIT(0)
817	write_contextidr r0
818	isb
819#endif /*!CFG_WITH_LPAE*/
820
821	read_tpidrprw r0
822#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
823	read_tpidruro r1
824#endif
825
826	movs	pc, lr
827
828	/*
829	 * void icache_inv_user_range(void *addr, size_t size);
830	 *
831	 * This function has to execute with the user space ASID active,
832	 * this means executing with reduced mapping and the code needs
833	 * to be located here together with the vector.
834	 */
835	.global icache_inv_user_range
836	.type icache_inv_user_range , %function
837icache_inv_user_range:
838	push	{r4-r7}
839
840	/* Mask all exceptions */
841	mrs	r4, cpsr	/* This register must be preserved */
842	cpsid	aif
843
844#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
845	ldr	r2, =thread_user_kcode_offset
846	ldr	r2, [r2]
847	read_vbar r5		/* This register must be preserved */
848	sub	r3, r5, r2
849	write_vbar r3
850	isb
851
852	/* Jump into the reduced mapping before the full mapping is removed */
853	ldr	r3, =1f
854	sub	r3, r3, r2
855	bx	r3
8561:
857#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
858
859#ifdef CFG_WITH_LPAE
860	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
861#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
862	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
863#endif
864	/* switch to user ASID */
865	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
866	write_ttbr0_64bit r2, r3
867	isb
868#else /*!CFG_WITH_LPAE*/
869#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
870	read_ttbr1 r6		/* This register must be preserved */
871	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
872	write_ttbr1 r2
873	isb
874#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
875	read_contextidr r7	/* This register must be preserved */
876	orr	r2, r7, #BIT(0)
877	write_contextidr r2
878	isb
879#endif /*!CFG_WITH_LPAE*/
880
881	/*
882	 * Do the actual icache invalidation
883	 */
884
885	/* Calculate minimum icache line size, result in r2 */
886	read_ctr r3
887	and     r3, r3, #CTR_IMINLINE_MASK
888	mov     r2, #CTR_WORD_SIZE
889	lsl     r2, r2, r3
890
891	add	r1, r0, r1
892	sub	r3, r2, #1
893	bic	r0, r0, r3
8941:
895	write_icimvau r0
896	add	r0, r0, r2
897	cmp	r0, r1
898	blo	1b
899
900	/* Invalidate entire branch predictor array inner shareable */
901	write_bpiallis
902
903	dsb	ishst
904	isb
905
906#ifdef CFG_WITH_LPAE
907	write_ttbr0_64bit r6, r7
908	isb
909#else /*!CFG_WITH_LPAE*/
910	write_contextidr r7
911	isb
912#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
913	write_ttbr1 r6
914	isb
915#endif
916#endif /*!CFG_WITH_LPAE*/
917
918#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
919	write_vbar r5
920	isb
921	/*
922	 * The PC is adjusted unconditionally to guard against the
923	 * case there was an FIQ just before we did the "cpsid aif".
924	 */
925	ldr	r0, =1f
926	bx	r0
9271:
928#endif
929
930	msr	cpsr_fsxc, r4	/* Restore exceptions */
931	pop	{r4-r7}
932	bx	lr		/* End of icache_inv_user_range() */
933
934	/*
935	 * Make sure that literals are placed before the
936	 * thread_excp_vect_end label.
937	 */
938	.pool
939UNWIND(	.fnend)
940	.global thread_excp_vect_end
941thread_excp_vect_end:
942END_FUNC thread_excp_vect
943