xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 5118efbe82358fd69fda6e0158a30e59f59ba09d)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <mm/core_mmu.h>
16
17#include "thread_private.h"
18
19	.syntax unified
20	.arch_extension sec
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30FUNC thread_set_abt_sp , :
31UNWIND(	.cantunwind)
32	mrs	r1, cpsr
33	cps	#CPSR_MODE_ABT
34	mov	sp, r0
35	msr	cpsr, r1
36	bx	lr
37END_FUNC thread_set_abt_sp
38
39FUNC thread_set_und_sp , :
40UNWIND(	.cantunwind)
41	mrs	r1, cpsr
42	cps	#CPSR_MODE_UND
43	mov	sp, r0
44	msr	cpsr, r1
45	bx	lr
46END_FUNC thread_set_und_sp
47
48FUNC thread_set_irq_sp , :
49UNWIND(	.cantunwind)
50	mrs	r1, cpsr
51	cps	#CPSR_MODE_IRQ
52	mov	sp, r0
53	msr	cpsr, r1
54	bx	lr
55END_FUNC thread_set_irq_sp
56
57FUNC thread_set_fiq_sp , :
58UNWIND(	.cantunwind)
59	mrs	r1, cpsr
60	cps	#CPSR_MODE_FIQ
61	mov	sp, r0
62	msr	cpsr, r1
63	bx	lr
64END_FUNC thread_set_fiq_sp
65
66/* void thread_resume(struct thread_ctx_regs *regs) */
67FUNC thread_resume , :
68UNWIND(	.cantunwind)
69	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
70
71	cps	#CPSR_MODE_SYS
72	ldr	sp, [r12], #4
73	ldr	lr, [r12], #4
74
75	cps	#CPSR_MODE_SVC
76	ldr	r1, [r12], #4
77	ldr	sp, [r12], #4
78	ldr	lr, [r12], #4
79	msr	spsr_fsxc, r1
80
81	ldm	r12, {r1, r2}
82
83	/*
84	 * Switching to some other mode than SVC as we need to set spsr in
85	 * order to return into the old state properly and it may be SVC
86	 * mode we're returning to.
87	 */
88	cps	#CPSR_MODE_ABT
89	cmp_spsr_user_mode r2
90	mov	lr, r1
91	msr	spsr_fsxc, r2
92	ldm	r0, {r0-r12}
93	movsne	pc, lr
94	b	eret_to_user_mode
95END_FUNC thread_resume
96
97/*
98 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
99 * the banked r8-r12 registers, returns original CPSR.
100 */
101LOCAL_FUNC thread_save_state_fiq , :
102UNWIND(	.cantunwind)
103	mov	r9, lr
104
105	/*
106	 * Uses stack for temporary storage, while storing needed
107	 * context in the thread context struct.
108	 */
109
110	mrs	r8, cpsr
111
112	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
113
114	push	{r4-r7}
115	push	{r0-r3}
116
117	mrs	r6, cpsr		/* Save current CPSR */
118
119	bl	thread_get_ctx_regs
120
121	pop	{r1-r4}			/* r0-r3 pushed above */
122	stm	r0!, {r1-r4}
123	pop	{r1-r4}			/* r4-r7 pushed above */
124	stm	r0!, {r1-r4}
125
126	cps     #CPSR_MODE_SYS
127	stm	r0!, {r8-r12}
128	str	sp, [r0], #4
129	str	lr, [r0], #4
130
131	cps     #CPSR_MODE_SVC
132	mrs     r1, spsr
133	str	r1, [r0], #4
134	str	sp, [r0], #4
135	str	lr, [r0], #4
136
137	/* back to fiq mode */
138	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
139	msr	cpsr, r6		/* Restore mode */
140
141	mov	r0, r8			/* Return original CPSR */
142	bx	r9
143END_FUNC thread_save_state_fiq
144
145/*
146 * Disables IRQ and FIQ and saves state of thread, returns original
147 * CPSR.
148 */
149FUNC thread_save_state , :
150UNWIND(	.cantunwind)
151	push	{r12, lr}
152	/*
153	 * Uses stack for temporary storage, while storing needed
154	 * context in the thread context struct.
155	 */
156
157	mrs	r12, cpsr
158
159	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
160
161	push	{r4-r7}
162	push	{r0-r3}
163
164	mov	r5, r12			/* Save CPSR in a preserved register */
165	mrs	r6, cpsr		/* Save current CPSR */
166
167	bl	thread_get_ctx_regs
168
169	pop	{r1-r4}			/* r0-r3 pushed above */
170	stm	r0!, {r1-r4}
171	pop	{r1-r4}			/* r4-r7 pushed above */
172	stm	r0!, {r1-r4}
173	stm	r0!, {r8-r11}
174
175	pop	{r12, lr}
176	stm	r0!, {r12}
177
178        cps     #CPSR_MODE_SYS
179	str	sp, [r0], #4
180	str	lr, [r0], #4
181
182        cps     #CPSR_MODE_SVC
183        mrs     r1, spsr
184	str	r1, [r0], #4
185	str	sp, [r0], #4
186	str	lr, [r0], #4
187
188	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
189	msr	cpsr, r6		/* Restore mode */
190
191	mov	r0, r5			/* Return original CPSR */
192	bx	lr
193END_FUNC thread_save_state
194
195/*
196 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
197 *			    unsigned long a2, unsigned long a3)
198 */
199FUNC thread_smc , :
200	smc	#0
201	bx	lr
202END_FUNC thread_smc
203
204FUNC thread_init_vbar , :
205	/* Set vector (VBAR) */
206	write_vbar r0
207	bx	lr
208END_FUNC thread_init_vbar
209DECLARE_KEEP_PAGER thread_init_vbar
210
211/*
212 * Below are low level routines handling entry and return from user mode.
213 *
214 * thread_enter_user_mode() saves all that registers user mode can change
215 * so kernel mode can restore needed registers when resuming execution
216 * after the call to thread_enter_user_mode() has returned.
217 * thread_enter_user_mode() doesn't return directly since it enters user
218 * mode instead, it's thread_unwind_user_mode() that does the
219 * returning by restoring the registers saved by thread_enter_user_mode().
220 *
221 * There's three ways for thread_enter_user_mode() to return to caller,
222 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
223 *
224 * Calls to _utee_return or _utee_panic are handled as:
225 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
226 * calls syscall_return() or syscall_panic().
227 *
228 * These function calls returns normally except thread_svc_handler() which
229 * which is an exception handling routine so it reads return address and
230 * SPSR to restore from the stack. syscall_return() and syscall_panic()
231 * changes return address and SPSR used by thread_svc_handler() to instead of
232 * returning into user mode as with other syscalls it returns into
233 * thread_unwind_user_mode() in kernel mode instead.  When
234 * thread_svc_handler() returns the stack pointer at the point where
235 * thread_enter_user_mode() left it so this is where
236 * thread_unwind_user_mode() can operate.
237 *
238 * Aborts are handled in a similar way but by thread_abort_handler()
239 * instead, when the pager sees that it's an abort from user mode that
240 * can't be handled it updates SPSR and return address used by
241 * thread_abort_handler() to return into thread_unwind_user_mode()
242 * instead.
243 */
244
245/*
246 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
247 *				     uint32_t *exit_status0,
248 *				     uint32_t *exit_status1);
249 *
250 * This function depends on being called with exceptions masked.
251 */
252FUNC __thread_enter_user_mode , :
253UNWIND(	.cantunwind)
254	/*
255	 * Save all registers to allow syscall_return() to resume execution
256	 * as if this function would have returned. This is also used in
257	 * syscall_panic().
258	 *
259	 * If stack usage of this function is changed
260	 * thread_unwind_user_mode() has to be updated.
261	 */
262	push    {r4-r12,lr}
263
264	/*
265	 * Save old user sp and set new user sp.
266	 */
267	cps	#CPSR_MODE_SYS
268	mov	r4, sp
269	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
270	cps	#CPSR_MODE_SVC
271
272	push	{r1, r2, r4, r5}
273
274	/* Prepare user mode entry via eret_to_user_mode */
275	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
276	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
277	msr     spsr_fsxc, r4
278
279	ldm	r0, {r0-r12}
280
281	b	eret_to_user_mode
282END_FUNC __thread_enter_user_mode
283
284/*
285 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
286 *              uint32_t exit_status1);
287 * See description in thread.h
288 */
289FUNC thread_unwind_user_mode , :
290UNWIND(	.cantunwind)
291	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
292	pop	{r4-r7}
293	str	r1, [r4]
294	str	r2, [r5]
295
296	/* Restore old user sp */
297	cps	#CPSR_MODE_SYS
298	mov	sp, r6
299	cps	#CPSR_MODE_SVC
300
301	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
302	pop     {r4-r12,pc}
303END_FUNC thread_unwind_user_mode
304
305	.macro maybe_restore_mapping
306		/*
307		 * This macro is a bit hard to read due to all the ifdefs,
308		 * we're testing for two different configs which makes four
309		 * different combinations.
310		 *
311		 * - With LPAE, and then some extra code if with
312		 *   CFG_CORE_UNMAP_CORE_AT_EL0
313		 * - Without LPAE, and then some extra code if with
314		 *   CFG_CORE_UNMAP_CORE_AT_EL0
315		 */
316
317		/*
318		 * At this point we can't rely on any memory being writable
319		 * yet, so we're using TPIDRPRW to store r0, and if with
320		 * LPAE TPIDRURO to store r1 too.
321		 */
322		write_tpidrprw r0
323#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
324		write_tpidruro r1
325#endif
326
327#ifdef CFG_WITH_LPAE
328		read_ttbr0_64bit r0, r1
329		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
330		beq	11f
331
332#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
333		/*
334		 * Update the mapping to use the full kernel mode mapping.
335		 * Since the translation table could reside above 4GB we'll
336		 * have to use 64-bit arithmetics.
337		 */
338		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
339		sbc	r1, r1, #0
340#endif
341		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
342		write_ttbr0_64bit r0, r1
343		isb
344
345#else /*!CFG_WITH_LPAE*/
346		read_contextidr r0
347		tst	r0, #1
348		beq	11f
349
350		/* Update the mapping to use the full kernel mode mapping. */
351		bic	r0, r0, #1
352		write_contextidr r0
353		isb
354#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
355		read_ttbcr r0
356		bic	r0, r0, #TTBCR_PD1
357		write_ttbcr r0
358		isb
359#endif
360
361#endif /*!CFG_WITH_LPAE*/
362
363#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
364		ldr	r0, =thread_user_kcode_offset
365		ldr	r0, [r0]
366		read_vbar r1
367		add	r1, r1, r0
368		write_vbar r1
369		isb
370
371	11:	/*
372		 * The PC is adjusted unconditionally to guard against the
373		 * case there was an FIQ just before we did the "cpsid aif".
374		 */
375		ldr	r0, =22f
376		bx	r0
377	22:
378#else
379	11:
380#endif
381		read_tpidrprw r0
382#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
383		read_tpidruro r1
384#endif
385	.endm
386
387/* The handler of native interrupt. */
388.macro	native_intr_handler mode:req
389	cpsid	aif
390	maybe_restore_mapping
391
392	/*
393	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
394	 * address
395	 */
396	sub     lr, lr, #4
397
398	/*
399	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
400	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
401	 * because the secure monitor doesn't save those. The treatment of
402	 * the banked fiq registers is somewhat analogous to the lazy save
403	 * of VFP registers.
404	 */
405	.ifc	\mode\(),fiq
406	push	{r0-r3, r8-r12, lr}
407	.else
408	push	{r0-r3, r12, lr}
409	.endif
410
411	bl	thread_check_canaries
412	bl	itr_core_handler
413
414	mrs	r0, spsr
415	cmp_spsr_user_mode r0
416
417	.ifc	\mode\(),fiq
418	pop	{r0-r3, r8-r12, lr}
419	.else
420	pop	{r0-r3, r12, lr}
421	.endif
422
423	movsne	pc, lr
424	b	eret_to_user_mode
425.endm
426
427/* The handler of foreign interrupt. */
428.macro foreign_intr_handler mode:req
429	cpsid	aif
430	maybe_restore_mapping
431
432	sub	lr, lr, #4
433	push	{r12}
434
435	.ifc	\mode\(),fiq
436	/*
437	 * If a foreign (non-secure) interrupt is received as a FIQ we need
438	 * to check that we're in a saveable state or if we need to mask
439	 * the interrupt to be handled later.
440	 *
441	 * The window when this is needed is quite narrow, it's between
442	 * entering the exception vector and until the "cpsid" instruction
443	 * of the handler has been executed.
444	 *
445	 * Currently we can save the state properly if the FIQ is received
446	 * while in user or svc (kernel) mode.
447	 *
448	 * If we're returning to abort, undef or irq mode we're returning
449	 * with the mapping restored. This is OK since before the handler
450	 * we're returning to eventually returns to user mode the reduced
451	 * mapping will be restored.
452	 */
453	mrs	r12, spsr
454	and	r12, r12, #ARM32_CPSR_MODE_MASK
455	cmp	r12, #ARM32_CPSR_MODE_USR
456	cmpne	r12, #ARM32_CPSR_MODE_SVC
457	beq	1f
458	mrs	r12, spsr
459	orr	r12, r12, #ARM32_CPSR_F
460	msr	spsr_fsxc, r12
461	pop	{r12}
462	movs	pc, lr
4631:
464	.endif
465
466	push	{lr}
467
468	.ifc	\mode\(),fiq
469	bl	thread_save_state_fiq
470	.else
471	bl	thread_save_state
472	.endif
473
474#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
475	/*
476	 * Prevent leaking information about which entries has been used in
477	 * cache. We're relying on the secure monitor/dispatcher to take
478	 * care of the BTB.
479	 */
480	mov	r0, #DCACHE_OP_CLEAN_INV
481	bl	dcache_op_louis
482	write_iciallu
483#endif
484
485	/*
486	 * Use SP_abt to update core local flags.
487	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
488	 */
489	cps     #CPSR_MODE_ABT
490	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
491	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
492	orr     r1, r1, #THREAD_CLF_TMP
493	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
494	.ifc    \mode\(),fiq
495	cps     #CPSR_MODE_FIQ
496	.else
497	cps     #CPSR_MODE_IRQ
498	.endif
499
500	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
501	mrs	r1, spsr
502	pop	{r2}
503	pop	{r12}
504	blx	thread_state_suspend
505
506	/*
507	 * Switch to SVC mode and copy current stack pointer as it already
508	 * is the tmp stack.
509	 */
510	mov	r1, sp
511	cps	#CPSR_MODE_SVC
512	mov	sp, r1
513
514	/* Passing thread index in r0 */
515	b	thread_foreign_intr_exit
516.endm
517
518FUNC thread_excp_vect , :, align=32
519UNWIND(	.cantunwind)
520	b	.			/* Reset			*/
521	b	__thread_und_handler	/* Undefined instruction	*/
522	b	__thread_svc_handler	/* System call			*/
523	b	__thread_pabort_handler	/* Prefetch abort		*/
524	b	__thread_dabort_handler	/* Data abort			*/
525	b	.			/* Reserved			*/
526	b	__thread_irq_handler	/* IRQ				*/
527	b	__thread_fiq_handler	/* FIQ				*/
528#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
529	.macro vector_prologue_spectre
530		/*
531		 * This depends on SP being 8 byte aligned, that is, the
532		 * lowest three bits in SP are zero.
533		 *
534		 * To avoid unexpected speculation we need to invalidate
535		 * the branch predictor before we do the first branch. It
536		 * doesn't matter if it's a conditional or an unconditional
537		 * branch speculation can still occur.
538		 *
539		 * The idea is to form a specific bit pattern in the lowest
540		 * three bits of SP depending on which entry in the vector
541		 * we enter via.  This is done by adding 1 to SP in each
542		 * entry but the last.
543		 */
544		add	sp, sp, #1	/* 7:111 Reset			*/
545		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
546		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
547		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
548		add	sp, sp, #1	/* 3:011 Data abort		*/
549		add	sp, sp, #1	/* 2:010 Reserved		*/
550		add	sp, sp, #1	/* 1:001 IRQ			*/
551		cpsid   aif		/* 0:000 FIQ			*/
552	.endm
553
554        .balign	32
555	.global thread_excp_vect_workaround_a15
556thread_excp_vect_workaround_a15:
557	vector_prologue_spectre
558	write_tpidrprw r0
559	mrs	r0, spsr
560	cmp_spsr_user_mode r0
561	bne	1f
562	/*
563	 * Invalidate the branch predictor for the current processor.
564	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
565	 * effective.
566	 * Note that the BPIALL instruction is not effective in
567	 * invalidating the branch predictor on Cortex-A15. For that CPU,
568	 * set ACTLR[0] to 1 during early processor initialisation, and
569	 * invalidate the branch predictor by performing an ICIALLU
570	 * instruction. See also:
571	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
572	 */
573	write_iciallu
574	isb
575	b	1f
576
577        .balign	32
578	.global thread_excp_vect_workaround
579thread_excp_vect_workaround:
580	vector_prologue_spectre
581	write_tpidrprw r0
582	mrs	r0, spsr
583	cmp_spsr_user_mode r0
584	bne	1f
585	/* Invalidate the branch predictor for the current processor. */
586	write_bpiall
587	isb
588
5891:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
590	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
591	add	pc, pc, r0, LSL #3
592	nop
593
594	read_tpidrprw r0
595	b	__thread_fiq_handler	/* FIQ				*/
596	read_tpidrprw r0
597	b	__thread_irq_handler	/* IRQ				*/
598	read_tpidrprw r0
599	b	.			/* Reserved			*/
600	read_tpidrprw r0
601	b	__thread_dabort_handler	/* Data abort			*/
602	read_tpidrprw r0
603	b	__thread_pabort_handler	/* Prefetch abort		*/
604	read_tpidrprw r0
605	b	__thread_svc_handler	/* System call			*/
606	read_tpidrprw r0
607	b	__thread_und_handler	/* Undefined instruction	*/
608	read_tpidrprw r0
609	b	.			/* Reset			*/
610#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
611
612__thread_und_handler:
613	cpsid	aif
614	maybe_restore_mapping
615	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
616	mrs	r1, spsr
617	tst	r1, #CPSR_T
618	subne	lr, lr, #2
619	subeq	lr, lr, #4
620	mov	r0, #ABORT_TYPE_UNDEF
621	b	__thread_abort_common
622
623__thread_dabort_handler:
624	cpsid	aif
625	maybe_restore_mapping
626	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
627	sub	lr, lr, #8
628	mov	r0, #ABORT_TYPE_DATA
629	b	__thread_abort_common
630
631__thread_pabort_handler:
632	cpsid	aif
633	maybe_restore_mapping
634	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
635	sub	lr, lr, #4
636	mov	r0, #ABORT_TYPE_PREFETCH
637
638__thread_abort_common:
639	/*
640	 * At this label:
641	 * cpsr is in mode undef or abort
642	 * sp is still pointing to struct thread_core_local belonging to
643	 * this core.
644	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
645	 * {r2-r11, ip} are untouched.
646	 * r0 holds the first argument for abort_handler()
647	 */
648
649	/*
650	 * Update core local flags.
651	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
652	 */
653	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
654	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
655	orr	r1, r1, #THREAD_CLF_ABORT
656
657	/*
658	 * Select stack and update flags accordingly
659	 *
660	 * Normal case:
661	 * If the abort stack is unused select that.
662	 *
663	 * Fatal error handling:
664	 * If we're already using the abort stack as noted by bit
665	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
666	 * field we're selecting the temporary stack instead to be able to
667	 * make a stack trace of the abort in abort mode.
668	 *
669	 * r1 is initialized as a temporary stack pointer until we've
670	 * switched to system mode.
671	 */
672	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
673	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
674	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
675	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
676	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
677
678	/*
679	 * Store registers on stack fitting struct thread_abort_regs
680	 * start from the end of the struct
681	 * {r2-r11, ip}
682	 * Load content of previously saved {r0-r1} and stores
683	 * it up to the pad field.
684	 * After this is only {usr_sp, usr_lr} missing in the struct
685	 */
686	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
687	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
688	/* Push the original {r0-r1} on the selected stack */
689	stmdb	r1!, {r2-r3}
690	mrs	r3, spsr
691	/* Push {pad, spsr, elr} on the selected stack */
692	stmdb	r1!, {r2, r3, lr}
693
694	cps	#CPSR_MODE_SYS
695	str	lr, [r1, #-4]!
696	str	sp, [r1, #-4]!
697	mov	sp, r1
698
699	bl	abort_handler
700
701	mov	ip, sp
702	ldr	sp, [ip], #4
703	ldr	lr, [ip], #4
704
705	/*
706	 * Even if we entered via CPSR_MODE_UND, we are returning via
707	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
708	 * here.
709	 */
710	cps	#CPSR_MODE_ABT
711	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
712	msr	spsr_fsxc, r1
713
714	/* Update core local flags */
715	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
716	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
717	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
718
719	cmp_spsr_user_mode r1
720	ldm	ip, {r0-r11, ip}
721	movsne	pc, lr
722	b	eret_to_user_mode
723	/* end thread_abort_common */
724
725__thread_svc_handler:
726	cpsid	aif
727
728	maybe_restore_mapping
729
730	push	{r0-r7, lr}
731	mrs	r0, spsr
732	push	{r0}
733	mov	r0, sp
734	bl	thread_svc_handler
735	cpsid	aif	/* In case something was unmasked */
736	pop	{r0}
737	msr	spsr_fsxc, r0
738	cmp_spsr_user_mode r0
739	pop	{r0-r7, lr}
740	movsne	pc, lr
741	b	eret_to_user_mode
742	/* end thread_svc_handler */
743
744__thread_fiq_handler:
745#if defined(CFG_ARM_GICV3)
746	foreign_intr_handler	fiq
747#else
748	native_intr_handler	fiq
749#endif
750	/* end thread_fiq_handler */
751
752__thread_irq_handler:
753#if defined(CFG_ARM_GICV3)
754	native_intr_handler	irq
755#else
756	foreign_intr_handler	irq
757#endif
758	/* end thread_irq_handler */
759
760	/*
761	 * Returns to user mode.
762	 * Expects to be jumped to with lr pointing to the user space
763	 * address to jump to and spsr holding the desired cpsr. Async
764	 * abort, irq and fiq should be masked.
765	 */
766eret_to_user_mode:
767	write_tpidrprw r0
768#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
769	write_tpidruro r1
770#endif
771
772#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
773	ldr	r0, =thread_user_kcode_offset
774	ldr	r0, [r0]
775	read_vbar r1
776	sub	r1, r1, r0
777	write_vbar r1
778	isb
779
780	/* Jump into the reduced mapping before the full mapping is removed */
781	ldr	r1, =1f
782	sub	r1, r1, r0
783	bx	r1
7841:
785#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
786
787#ifdef CFG_WITH_LPAE
788	read_ttbr0_64bit r0, r1
789#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
790	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
791#endif
792	/* switch to user ASID */
793	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
794	write_ttbr0_64bit r0, r1
795	isb
796#else /*!CFG_WITH_LPAE*/
797#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
798	read_ttbcr r0
799	orr	r0, r0, #TTBCR_PD1
800	write_ttbcr r0
801	isb
802#endif
803	read_contextidr r0
804	orr	r0, r0, #BIT(0)
805	write_contextidr r0
806	isb
807#endif /*!CFG_WITH_LPAE*/
808
809	read_tpidrprw r0
810#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
811	read_tpidruro r1
812#endif
813
814	movs	pc, lr
815
816	/*
817	 * void icache_inv_user_range(void *addr, size_t size);
818	 *
819	 * This function has to execute with the user space ASID active,
820	 * this means executing with reduced mapping and the code needs
821	 * to be located here together with the vector.
822	 */
823	.global icache_inv_user_range
824	.type icache_inv_user_range , %function
825icache_inv_user_range:
826	push	{r4-r7}
827
828	/* Mask all exceptions */
829	mrs	r4, cpsr	/* This register must be preserved */
830	cpsid	aif
831
832#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
833	ldr	r2, =thread_user_kcode_offset
834	ldr	r2, [r2]
835	read_vbar r5		/* This register must be preserved */
836	sub	r3, r5, r2
837	write_vbar r3
838	isb
839
840	/* Jump into the reduced mapping before the full mapping is removed */
841	ldr	r3, =1f
842	sub	r3, r3, r2
843	bx	r3
8441:
845#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
846
847#ifdef CFG_WITH_LPAE
848	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
849#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
850	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
851#endif
852	/* switch to user ASID */
853	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
854	write_ttbr0_64bit r2, r3
855	isb
856#else /*!CFG_WITH_LPAE*/
857#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
858	read_ttbcr r6	/* This register must be preserved */
859	orr	r2, r6, #TTBCR_PD1
860	write_ttbcr r2
861	isb
862#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
863	read_contextidr r7	/* This register must be preserved */
864	orr	r2, r7, #BIT(0)
865	write_contextidr r2
866	isb
867#endif /*!CFG_WITH_LPAE*/
868
869	/*
870	 * Do the actual icache invalidation
871	 */
872
873	/* Calculate minimum icache line size, result in r2 */
874	read_ctr r3
875	and     r3, r3, #CTR_IMINLINE_MASK
876	mov     r2, #CTR_WORD_SIZE
877	lsl     r2, r2, r3
878
879	add	r1, r0, r1
880	sub	r3, r2, #1
881	bic	r0, r0, r3
8821:
883	write_icimvau r0
884	add	r0, r0, r2
885	cmp	r0, r1
886	blo	1b
887
888	/* Invalidate entire branch predictor array inner shareable */
889	write_bpiallis
890
891	dsb	ishst
892	isb
893
894#ifdef CFG_WITH_LPAE
895	write_ttbr0_64bit r6, r7
896	isb
897#else /*!CFG_WITH_LPAE*/
898	write_contextidr r7
899	isb
900#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
901	write_ttbcr r6
902	isb
903#endif
904#endif /*!CFG_WITH_LPAE*/
905
906#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
907	write_vbar r5
908	isb
909	/*
910	 * The PC is adjusted unconditionally to guard against the
911	 * case there was an FIQ just before we did the "cpsid aif".
912	 */
913	ldr	r0, =1f
914	bx	r0
9151:
916#endif
917
918	msr	cpsr_fsxc, r4	/* Restore exceptions */
919	pop	{r4-r7}
920	bx	lr		/* End of icache_inv_user_range() */
921
922	/*
923	 * Make sure that literals are placed before the
924	 * thread_excp_vect_end label.
925	 */
926	.pool
927	.global thread_excp_vect_end
928thread_excp_vect_end:
929END_FUNC thread_excp_vect
930