xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision b6ca7e5dd226f2c3691d798ab81d1cf35dfec33e)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <mm/core_mmu.h>
16
17#include "thread_private.h"
18
19	.syntax unified
20	.arch_extension sec
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30FUNC thread_set_abt_sp , :
31UNWIND(	.cantunwind)
32	mrs	r1, cpsr
33	cps	#CPSR_MODE_ABT
34	mov	sp, r0
35	msr	cpsr, r1
36	bx	lr
37END_FUNC thread_set_abt_sp
38
39FUNC thread_set_und_sp , :
40UNWIND(	.cantunwind)
41	mrs	r1, cpsr
42	cps	#CPSR_MODE_UND
43	mov	sp, r0
44	msr	cpsr, r1
45	bx	lr
46END_FUNC thread_set_und_sp
47
48FUNC thread_set_irq_sp , :
49UNWIND(	.cantunwind)
50	mrs	r1, cpsr
51	cps	#CPSR_MODE_IRQ
52	mov	sp, r0
53	msr	cpsr, r1
54	bx	lr
55END_FUNC thread_set_irq_sp
56
57FUNC thread_set_fiq_sp , :
58UNWIND(	.cantunwind)
59	mrs	r1, cpsr
60	cps	#CPSR_MODE_FIQ
61	mov	sp, r0
62	msr	cpsr, r1
63	bx	lr
64END_FUNC thread_set_fiq_sp
65
66FUNC thread_get_usr_sp , :
67	mrs	r1, cpsr
68	cpsid	aif
69	cps	#CPSR_MODE_SYS
70	mov	r0, sp
71	msr	cpsr, r1
72	bx	lr
73END_FUNC thread_get_usr_sp
74
75/* void thread_resume(struct thread_ctx_regs *regs) */
76FUNC thread_resume , :
77UNWIND(	.cantunwind)
78	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
79
80	cps	#CPSR_MODE_SYS
81	ldr	sp, [r12], #4
82	ldr	lr, [r12], #4
83
84	cps	#CPSR_MODE_SVC
85	ldr	r1, [r12], #4
86	ldr	sp, [r12], #4
87	ldr	lr, [r12], #4
88	msr	spsr_fsxc, r1
89
90	ldm	r12, {r1, r2}
91
92	/*
93	 * Switching to some other mode than SVC as we need to set spsr in
94	 * order to return into the old state properly and it may be SVC
95	 * mode we're returning to.
96	 */
97	cps	#CPSR_MODE_ABT
98	cmp_spsr_user_mode r2
99	mov	lr, r1
100	msr	spsr_fsxc, r2
101	ldm	r0, {r0-r12}
102	movsne	pc, lr
103	b	eret_to_user_mode
104END_FUNC thread_resume
105
106/*
107 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
108 * the banked r8-r12 registers, returns original CPSR.
109 */
110LOCAL_FUNC thread_save_state_fiq , :
111UNWIND(	.cantunwind)
112	mov	r9, lr
113
114	/*
115	 * Uses stack for temporary storage, while storing needed
116	 * context in the thread context struct.
117	 */
118
119	mrs	r8, cpsr
120
121	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
122
123	push	{r4-r7}
124	push	{r0-r3}
125
126	mrs	r6, cpsr		/* Save current CPSR */
127
128	bl	thread_get_ctx_regs
129
130	pop	{r1-r4}			/* r0-r3 pushed above */
131	stm	r0!, {r1-r4}
132	pop	{r1-r4}			/* r4-r7 pushed above */
133	stm	r0!, {r1-r4}
134
135	cps     #CPSR_MODE_SYS
136	stm	r0!, {r8-r12}
137	str	sp, [r0], #4
138	str	lr, [r0], #4
139
140	cps     #CPSR_MODE_SVC
141	mrs     r1, spsr
142	str	r1, [r0], #4
143	str	sp, [r0], #4
144	str	lr, [r0], #4
145
146	/* back to fiq mode */
147	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
148	msr	cpsr, r6		/* Restore mode */
149
150	mov	r0, r8			/* Return original CPSR */
151	bx	r9
152END_FUNC thread_save_state_fiq
153
154/*
155 * Disables IRQ and FIQ and saves state of thread, returns original
156 * CPSR.
157 */
158FUNC thread_save_state , :
159UNWIND(	.cantunwind)
160	push	{r12, lr}
161	/*
162	 * Uses stack for temporary storage, while storing needed
163	 * context in the thread context struct.
164	 */
165
166	mrs	r12, cpsr
167
168	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
169
170	push	{r4-r7}
171	push	{r0-r3}
172
173	mov	r5, r12			/* Save CPSR in a preserved register */
174	mrs	r6, cpsr		/* Save current CPSR */
175
176	bl	thread_get_ctx_regs
177
178	pop	{r1-r4}			/* r0-r3 pushed above */
179	stm	r0!, {r1-r4}
180	pop	{r1-r4}			/* r4-r7 pushed above */
181	stm	r0!, {r1-r4}
182	stm	r0!, {r8-r11}
183
184	pop	{r12, lr}
185	stm	r0!, {r12}
186
187        cps     #CPSR_MODE_SYS
188	str	sp, [r0], #4
189	str	lr, [r0], #4
190
191        cps     #CPSR_MODE_SVC
192        mrs     r1, spsr
193	str	r1, [r0], #4
194	str	sp, [r0], #4
195	str	lr, [r0], #4
196
197	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
198	msr	cpsr, r6		/* Restore mode */
199
200	mov	r0, r5			/* Return original CPSR */
201	bx	lr
202END_FUNC thread_save_state
203
204/*
205 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
206 *			    unsigned long a2, unsigned long a3)
207 */
208FUNC thread_smc , :
209	smc	#0
210	bx	lr
211END_FUNC thread_smc
212
213FUNC thread_init_vbar , :
214	/* Set vector (VBAR) */
215	write_vbar r0
216	bx	lr
217END_FUNC thread_init_vbar
218DECLARE_KEEP_PAGER thread_init_vbar
219
220/*
221 * Below are low level routines handling entry and return from user mode.
222 *
223 * thread_enter_user_mode() saves all that registers user mode can change
224 * so kernel mode can restore needed registers when resuming execution
225 * after the call to thread_enter_user_mode() has returned.
226 * thread_enter_user_mode() doesn't return directly since it enters user
227 * mode instead, it's thread_unwind_user_mode() that does the
228 * returning by restoring the registers saved by thread_enter_user_mode().
229 *
230 * There's three ways for thread_enter_user_mode() to return to caller,
231 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
232 *
233 * Calls to _utee_return or _utee_panic are handled as:
234 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
235 * calls syscall_return() or syscall_panic().
236 *
237 * These function calls returns normally except thread_svc_handler() which
238 * which is an exception handling routine so it reads return address and
239 * SPSR to restore from the stack. syscall_return() and syscall_panic()
240 * changes return address and SPSR used by thread_svc_handler() to instead of
241 * returning into user mode as with other syscalls it returns into
242 * thread_unwind_user_mode() in kernel mode instead.  When
243 * thread_svc_handler() returns the stack pointer at the point where
244 * thread_enter_user_mode() left it so this is where
245 * thread_unwind_user_mode() can operate.
246 *
247 * Aborts are handled in a similar way but by thread_abort_handler()
248 * instead, when the pager sees that it's an abort from user mode that
249 * can't be handled it updates SPSR and return address used by
250 * thread_abort_handler() to return into thread_unwind_user_mode()
251 * instead.
252 */
253
254/*
255 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
256 *				     uint32_t *exit_status0,
257 *				     uint32_t *exit_status1);
258 *
259 * This function depends on being called with exceptions masked.
260 */
261FUNC __thread_enter_user_mode , :
262UNWIND(	.cantunwind)
263	/*
264	 * Save all registers to allow syscall_return() to resume execution
265	 * as if this function would have returned. This is also used in
266	 * syscall_panic().
267	 *
268	 * If stack usage of this function is changed
269	 * thread_unwind_user_mode() has to be updated.
270	 */
271	push    {r4-r12,lr}
272
273	/*
274	 * Save old user sp and set new user sp.
275	 */
276	cps	#CPSR_MODE_SYS
277	mov	r4, sp
278	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
279	cps	#CPSR_MODE_SVC
280
281	push	{r1, r2, r4, r5}
282
283	/* Prepare user mode entry via eret_to_user_mode */
284	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
285	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
286	msr     spsr_fsxc, r4
287
288	ldm	r0, {r0-r12}
289
290	b	eret_to_user_mode
291END_FUNC __thread_enter_user_mode
292
293/*
294 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
295 *              uint32_t exit_status1);
296 * See description in thread.h
297 */
298FUNC thread_unwind_user_mode , :
299UNWIND(	.cantunwind)
300	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
301	pop	{r4-r7}
302	str	r1, [r4]
303	str	r2, [r5]
304
305	/* Restore old user sp */
306	cps	#CPSR_MODE_SYS
307	mov	sp, r6
308	cps	#CPSR_MODE_SVC
309
310	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
311	pop     {r4-r12,pc}
312END_FUNC thread_unwind_user_mode
313
314	.macro maybe_restore_mapping
315		/*
316		 * This macro is a bit hard to read due to all the ifdefs,
317		 * we're testing for two different configs which makes four
318		 * different combinations.
319		 *
320		 * - With LPAE, and then some extra code if with
321		 *   CFG_CORE_UNMAP_CORE_AT_EL0
322		 * - Without LPAE, and then some extra code if with
323		 *   CFG_CORE_UNMAP_CORE_AT_EL0
324		 */
325
326		/*
327		 * At this point we can't rely on any memory being writable
328		 * yet, so we're using TPIDRPRW to store r0, and if with
329		 * LPAE TPIDRURO to store r1 too.
330		 */
331		write_tpidrprw r0
332#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
333		write_tpidruro r1
334#endif
335
336#ifdef CFG_WITH_LPAE
337		read_ttbr0_64bit r0, r1
338		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
339		beq	11f
340
341#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
342		/*
343		 * Update the mapping to use the full kernel mode mapping.
344		 * Since the translation table could reside above 4GB we'll
345		 * have to use 64-bit arithmetics.
346		 */
347		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
348		sbc	r1, r1, #0
349#endif
350		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
351		write_ttbr0_64bit r0, r1
352		isb
353
354#else /*!CFG_WITH_LPAE*/
355		read_contextidr r0
356		tst	r0, #1
357		beq	11f
358
359		/* Update the mapping to use the full kernel mode mapping. */
360		bic	r0, r0, #1
361		write_contextidr r0
362		isb
363#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
364		read_ttbcr r0
365		bic	r0, r0, #TTBCR_PD1
366		write_ttbcr r0
367		isb
368#endif
369
370#endif /*!CFG_WITH_LPAE*/
371
372#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
373		ldr	r0, =thread_user_kcode_offset
374		ldr	r0, [r0]
375		read_vbar r1
376		add	r1, r1, r0
377		write_vbar r1
378		isb
379
380	11:	/*
381		 * The PC is adjusted unconditionally to guard against the
382		 * case there was an FIQ just before we did the "cpsid aif".
383		 */
384		ldr	r0, =22f
385		bx	r0
386	22:
387#else
388	11:
389#endif
390		read_tpidrprw r0
391#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
392		read_tpidruro r1
393#endif
394	.endm
395
396/* The handler of native interrupt. */
397.macro	native_intr_handler mode:req
398	cpsid	aif
399	maybe_restore_mapping
400
401	/*
402	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
403	 * address
404	 */
405	sub     lr, lr, #4
406
407	/*
408	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
409	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
410	 * because the secure monitor doesn't save those. The treatment of
411	 * the banked fiq registers is somewhat analogous to the lazy save
412	 * of VFP registers.
413	 */
414	.ifc	\mode\(),fiq
415	push	{r0-r3, r8-r12, lr}
416	.else
417	push	{r0-r3, r12, lr}
418	.endif
419
420	bl	thread_check_canaries
421	bl	itr_core_handler
422
423	mrs	r0, spsr
424	cmp_spsr_user_mode r0
425
426	.ifc	\mode\(),fiq
427	pop	{r0-r3, r8-r12, lr}
428	.else
429	pop	{r0-r3, r12, lr}
430	.endif
431
432	movsne	pc, lr
433	b	eret_to_user_mode
434.endm
435
436/* The handler of foreign interrupt. */
437.macro foreign_intr_handler mode:req
438	cpsid	aif
439	maybe_restore_mapping
440
441	sub	lr, lr, #4
442	push	{r12}
443
444	.ifc	\mode\(),fiq
445	/*
446	 * If a foreign (non-secure) interrupt is received as a FIQ we need
447	 * to check that we're in a saveable state or if we need to mask
448	 * the interrupt to be handled later.
449	 *
450	 * The window when this is needed is quite narrow, it's between
451	 * entering the exception vector and until the "cpsid" instruction
452	 * of the handler has been executed.
453	 *
454	 * Currently we can save the state properly if the FIQ is received
455	 * while in user or svc (kernel) mode.
456	 *
457	 * If we're returning to abort, undef or irq mode we're returning
458	 * with the mapping restored. This is OK since before the handler
459	 * we're returning to eventually returns to user mode the reduced
460	 * mapping will be restored.
461	 */
462	mrs	r12, spsr
463	and	r12, r12, #ARM32_CPSR_MODE_MASK
464	cmp	r12, #ARM32_CPSR_MODE_USR
465	cmpne	r12, #ARM32_CPSR_MODE_SVC
466	beq	1f
467	mrs	r12, spsr
468	orr	r12, r12, #ARM32_CPSR_F
469	msr	spsr_fsxc, r12
470	pop	{r12}
471	movs	pc, lr
4721:
473	.endif
474
475	push	{lr}
476
477	.ifc	\mode\(),fiq
478	bl	thread_save_state_fiq
479	.else
480	bl	thread_save_state
481	.endif
482
483#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
484	/*
485	 * Prevent leaking information about which entries has been used in
486	 * cache. We're relying on the secure monitor/dispatcher to take
487	 * care of the BTB.
488	 */
489	mov	r0, #DCACHE_OP_CLEAN_INV
490	bl	dcache_op_louis
491	write_iciallu
492#endif
493
494	/*
495	 * Use SP_abt to update core local flags.
496	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
497	 */
498	cps     #CPSR_MODE_ABT
499	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
500	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
501	orr     r1, r1, #THREAD_CLF_TMP
502	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
503	.ifc    \mode\(),fiq
504	cps     #CPSR_MODE_FIQ
505	.else
506	cps     #CPSR_MODE_IRQ
507	.endif
508
509	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
510	mrs	r1, spsr
511	pop	{r2}
512	pop	{r12}
513	blx	thread_state_suspend
514
515	/*
516	 * Switch to SVC mode and copy current stack pointer as it already
517	 * is the tmp stack.
518	 */
519	mov	r1, sp
520	cps	#CPSR_MODE_SVC
521	mov	sp, r1
522
523	/* Passing thread index in r0 */
524	b	thread_foreign_intr_exit
525.endm
526
527FUNC thread_excp_vect , :, align=32
528UNWIND(	.cantunwind)
529	b	.			/* Reset			*/
530	b	__thread_und_handler	/* Undefined instruction	*/
531	b	__thread_svc_handler	/* System call			*/
532	b	__thread_pabort_handler	/* Prefetch abort		*/
533	b	__thread_dabort_handler	/* Data abort			*/
534	b	.			/* Reserved			*/
535	b	__thread_irq_handler	/* IRQ				*/
536	b	__thread_fiq_handler	/* FIQ				*/
537#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
538	.macro vector_prologue_spectre
539		/*
540		 * This depends on SP being 8 byte aligned, that is, the
541		 * lowest three bits in SP are zero.
542		 *
543		 * To avoid unexpected speculation we need to invalidate
544		 * the branch predictor before we do the first branch. It
545		 * doesn't matter if it's a conditional or an unconditional
546		 * branch speculation can still occur.
547		 *
548		 * The idea is to form a specific bit pattern in the lowest
549		 * three bits of SP depending on which entry in the vector
550		 * we enter via.  This is done by adding 1 to SP in each
551		 * entry but the last.
552		 */
553		add	sp, sp, #1	/* 7:111 Reset			*/
554		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
555		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
556		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
557		add	sp, sp, #1	/* 3:011 Data abort		*/
558		add	sp, sp, #1	/* 2:010 Reserved		*/
559		add	sp, sp, #1	/* 1:001 IRQ			*/
560		cpsid   aif		/* 0:000 FIQ			*/
561	.endm
562
563        .balign	32
564	.global thread_excp_vect_workaround_a15
565thread_excp_vect_workaround_a15:
566	vector_prologue_spectre
567	write_tpidrprw r0
568	mrs	r0, spsr
569	cmp_spsr_user_mode r0
570	bne	1f
571	/*
572	 * Invalidate the branch predictor for the current processor.
573	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
574	 * effective.
575	 * Note that the BPIALL instruction is not effective in
576	 * invalidating the branch predictor on Cortex-A15. For that CPU,
577	 * set ACTLR[0] to 1 during early processor initialisation, and
578	 * invalidate the branch predictor by performing an ICIALLU
579	 * instruction. See also:
580	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
581	 */
582	write_iciallu
583	isb
584	b	1f
585
586        .balign	32
587	.global thread_excp_vect_workaround
588thread_excp_vect_workaround:
589	vector_prologue_spectre
590	write_tpidrprw r0
591	mrs	r0, spsr
592	cmp_spsr_user_mode r0
593	bne	1f
594	/* Invalidate the branch predictor for the current processor. */
595	write_bpiall
596	isb
597
5981:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
599	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
600	add	pc, pc, r0, LSL #3
601	nop
602
603	read_tpidrprw r0
604	b	__thread_fiq_handler	/* FIQ				*/
605	read_tpidrprw r0
606	b	__thread_irq_handler	/* IRQ				*/
607	read_tpidrprw r0
608	b	.			/* Reserved			*/
609	read_tpidrprw r0
610	b	__thread_dabort_handler	/* Data abort			*/
611	read_tpidrprw r0
612	b	__thread_pabort_handler	/* Prefetch abort		*/
613	read_tpidrprw r0
614	b	__thread_svc_handler	/* System call			*/
615	read_tpidrprw r0
616	b	__thread_und_handler	/* Undefined instruction	*/
617	read_tpidrprw r0
618	b	.			/* Reset			*/
619#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
620
621__thread_und_handler:
622	cpsid	aif
623	maybe_restore_mapping
624	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
625	mrs	r1, spsr
626	tst	r1, #CPSR_T
627	subne	lr, lr, #2
628	subeq	lr, lr, #4
629	mov	r0, #ABORT_TYPE_UNDEF
630	b	__thread_abort_common
631
632__thread_dabort_handler:
633	cpsid	aif
634	maybe_restore_mapping
635	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
636	sub	lr, lr, #8
637	mov	r0, #ABORT_TYPE_DATA
638	b	__thread_abort_common
639
640__thread_pabort_handler:
641	cpsid	aif
642	maybe_restore_mapping
643	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
644	sub	lr, lr, #4
645	mov	r0, #ABORT_TYPE_PREFETCH
646
647__thread_abort_common:
648	/*
649	 * At this label:
650	 * cpsr is in mode undef or abort
651	 * sp is still pointing to struct thread_core_local belonging to
652	 * this core.
653	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
654	 * {r2-r11, ip} are untouched.
655	 * r0 holds the first argument for abort_handler()
656	 */
657
658	/*
659	 * Update core local flags.
660	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
661	 */
662	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
663	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
664	orr	r1, r1, #THREAD_CLF_ABORT
665
666	/*
667	 * Select stack and update flags accordingly
668	 *
669	 * Normal case:
670	 * If the abort stack is unused select that.
671	 *
672	 * Fatal error handling:
673	 * If we're already using the abort stack as noted by bit
674	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
675	 * field we're selecting the temporary stack instead to be able to
676	 * make a stack trace of the abort in abort mode.
677	 *
678	 * r1 is initialized as a temporary stack pointer until we've
679	 * switched to system mode.
680	 */
681	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
682	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
683	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
684	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
685	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
686
687	/*
688	 * Store registers on stack fitting struct thread_abort_regs
689	 * start from the end of the struct
690	 * {r2-r11, ip}
691	 * Load content of previously saved {r0-r1} and stores
692	 * it up to the pad field.
693	 * After this is only {usr_sp, usr_lr} missing in the struct
694	 */
695	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
696	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
697	/* Push the original {r0-r1} on the selected stack */
698	stmdb	r1!, {r2-r3}
699	mrs	r3, spsr
700	/* Push {pad, spsr, elr} on the selected stack */
701	stmdb	r1!, {r2, r3, lr}
702
703	cps	#CPSR_MODE_SYS
704	str	lr, [r1, #-4]!
705	str	sp, [r1, #-4]!
706	mov	sp, r1
707
708	bl	abort_handler
709
710	mov	ip, sp
711	ldr	sp, [ip], #4
712	ldr	lr, [ip], #4
713
714	/*
715	 * Even if we entered via CPSR_MODE_UND, we are returning via
716	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
717	 * here.
718	 */
719	cps	#CPSR_MODE_ABT
720	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
721	msr	spsr_fsxc, r1
722
723	/* Update core local flags */
724	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
725	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
726	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
727
728	cmp_spsr_user_mode r1
729	ldm	ip, {r0-r11, ip}
730	movsne	pc, lr
731	b	eret_to_user_mode
732	/* end thread_abort_common */
733
734__thread_svc_handler:
735	cpsid	aif
736
737	maybe_restore_mapping
738
739	push	{r0-r7, lr}
740	mrs	r0, spsr
741	push	{r0}
742	mov	r0, sp
743	bl	thread_svc_handler
744	cpsid	aif	/* In case something was unmasked */
745	pop	{r0}
746	msr	spsr_fsxc, r0
747	cmp_spsr_user_mode r0
748	pop	{r0-r7, lr}
749	movsne	pc, lr
750	b	eret_to_user_mode
751	/* end thread_svc_handler */
752
753__thread_fiq_handler:
754#if defined(CFG_ARM_GICV3)
755	foreign_intr_handler	fiq
756#else
757	native_intr_handler	fiq
758#endif
759	/* end thread_fiq_handler */
760
761__thread_irq_handler:
762#if defined(CFG_ARM_GICV3)
763	native_intr_handler	irq
764#else
765	foreign_intr_handler	irq
766#endif
767	/* end thread_irq_handler */
768
769	/*
770	 * Returns to user mode.
771	 * Expects to be jumped to with lr pointing to the user space
772	 * address to jump to and spsr holding the desired cpsr. Async
773	 * abort, irq and fiq should be masked.
774	 */
775eret_to_user_mode:
776	write_tpidrprw r0
777#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
778	write_tpidruro r1
779#endif
780
781#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
782	ldr	r0, =thread_user_kcode_offset
783	ldr	r0, [r0]
784	read_vbar r1
785	sub	r1, r1, r0
786	write_vbar r1
787	isb
788
789	/* Jump into the reduced mapping before the full mapping is removed */
790	ldr	r1, =1f
791	sub	r1, r1, r0
792	bx	r1
7931:
794#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
795
796#ifdef CFG_WITH_LPAE
797	read_ttbr0_64bit r0, r1
798#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
799	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
800#endif
801	/* switch to user ASID */
802	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
803	write_ttbr0_64bit r0, r1
804	isb
805#else /*!CFG_WITH_LPAE*/
806#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
807	read_ttbcr r0
808	orr	r0, r0, #TTBCR_PD1
809	write_ttbcr r0
810	isb
811#endif
812	read_contextidr r0
813	orr	r0, r0, #BIT(0)
814	write_contextidr r0
815	isb
816#endif /*!CFG_WITH_LPAE*/
817
818	read_tpidrprw r0
819#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
820	read_tpidruro r1
821#endif
822
823	movs	pc, lr
824
825	/*
826	 * void icache_inv_user_range(void *addr, size_t size);
827	 *
828	 * This function has to execute with the user space ASID active,
829	 * this means executing with reduced mapping and the code needs
830	 * to be located here together with the vector.
831	 */
832	.global icache_inv_user_range
833	.type icache_inv_user_range , %function
834icache_inv_user_range:
835	push	{r4-r7}
836
837	/* Mask all exceptions */
838	mrs	r4, cpsr	/* This register must be preserved */
839	cpsid	aif
840
841#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
842	ldr	r2, =thread_user_kcode_offset
843	ldr	r2, [r2]
844	read_vbar r5		/* This register must be preserved */
845	sub	r3, r5, r2
846	write_vbar r3
847	isb
848
849	/* Jump into the reduced mapping before the full mapping is removed */
850	ldr	r3, =1f
851	sub	r3, r3, r2
852	bx	r3
8531:
854#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
855
856#ifdef CFG_WITH_LPAE
857	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
858	/* switch to user ASID */
859	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
860#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
861	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
862	write_ttbr0_64bit r2, r3
863#else
864	write_ttbr0_64bit r6, r3
865#endif
866	isb
867#else /*!CFG_WITH_LPAE*/
868#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
869	read_ttbcr r6	/* This register must be preserved */
870	orr	r2, r6, #TTBCR_PD1
871	write_ttbcr r2
872	isb
873#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
874	read_contextidr r7	/* This register must be preserved */
875	orr	r2, r7, #BIT(0)
876	write_contextidr r2
877	isb
878#endif /*!CFG_WITH_LPAE*/
879
880	/*
881	 * Do the actual icache invalidation
882	 */
883
884	/* Calculate minimum icache line size, result in r2 */
885	read_ctr r3
886	and     r3, r3, #CTR_IMINLINE_MASK
887	mov     r2, #CTR_WORD_SIZE
888	lsl     r2, r2, r3
889
890	add	r1, r0, r1
891	sub	r3, r2, #1
892	bic	r0, r0, r3
8931:
894	write_icimvau r0
895	add	r0, r0, r2
896	cmp	r0, r1
897	blo	1b
898
899	/* Invalidate entire branch predictor array inner shareable */
900	write_bpiallis
901
902	dsb	ishst
903	isb
904
905#ifdef CFG_WITH_LPAE
906	write_ttbr0_64bit r6, r7
907	isb
908#else /*!CFG_WITH_LPAE*/
909	write_contextidr r7
910	isb
911#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
912	write_ttbcr r6
913	isb
914#endif
915#endif /*!CFG_WITH_LPAE*/
916
917#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
918	write_vbar r5
919	isb
920	/*
921	 * The PC is adjusted unconditionally to guard against the
922	 * case there was an FIQ just before we did the "cpsid aif".
923	 */
924	ldr	r0, =1f
925	bx	r0
9261:
927#endif
928
929	msr	cpsr_fsxc, r4	/* Restore exceptions */
930	pop	{r4-r7}
931	bx	lr		/* End of icache_inv_user_range() */
932
933	/*
934	 * Make sure that literals are placed before the
935	 * thread_excp_vect_end label.
936	 */
937	.pool
938	.global thread_excp_vect_end
939thread_excp_vect_end:
940END_FUNC thread_excp_vect
941