xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision a1d5c81f8834a9d2c6f4372cce2e59e70e709121)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <mm/core_mmu.h>
16
17#include "thread_private.h"
18
19	.syntax unified
20	.arch_extension sec
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30FUNC thread_set_abt_sp , :
31UNWIND(	.cantunwind)
32	mrs	r1, cpsr
33	cps	#CPSR_MODE_ABT
34	mov	sp, r0
35	msr	cpsr, r1
36	bx	lr
37END_FUNC thread_set_abt_sp
38
39FUNC thread_set_und_sp , :
40UNWIND(	.cantunwind)
41	mrs	r1, cpsr
42	cps	#CPSR_MODE_UND
43	mov	sp, r0
44	msr	cpsr, r1
45	bx	lr
46END_FUNC thread_set_und_sp
47
48FUNC thread_set_irq_sp , :
49UNWIND(	.cantunwind)
50	mrs	r1, cpsr
51	cps	#CPSR_MODE_IRQ
52	mov	sp, r0
53	msr	cpsr, r1
54	bx	lr
55END_FUNC thread_set_irq_sp
56
57FUNC thread_set_fiq_sp , :
58UNWIND(	.cantunwind)
59	mrs	r1, cpsr
60	cps	#CPSR_MODE_FIQ
61	mov	sp, r0
62	msr	cpsr, r1
63	bx	lr
64END_FUNC thread_set_fiq_sp
65
66/* void thread_resume(struct thread_ctx_regs *regs) */
67FUNC thread_resume , :
68UNWIND(	.cantunwind)
69	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
70
71	cps	#CPSR_MODE_SYS
72	ldr	sp, [r12], #4
73	ldr	lr, [r12], #4
74
75	cps	#CPSR_MODE_SVC
76	ldr	r1, [r12], #4
77	ldr	sp, [r12], #4
78	ldr	lr, [r12], #4
79	msr	spsr_fsxc, r1
80
81	ldm	r12, {r1, r2}
82
83	/*
84	 * Switching to some other mode than SVC as we need to set spsr in
85	 * order to return into the old state properly and it may be SVC
86	 * mode we're returning to.
87	 */
88	cps	#CPSR_MODE_ABT
89	cmp_spsr_user_mode r2
90	mov	lr, r1
91	msr	spsr_fsxc, r2
92	ldm	r0, {r0-r12}
93	movsne	pc, lr
94	b	eret_to_user_mode
95END_FUNC thread_resume
96
97/*
98 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
99 * the banked r8-r12 registers, returns original CPSR.
100 */
101LOCAL_FUNC thread_save_state_fiq , :
102UNWIND(	.cantunwind)
103	mov	r9, lr
104
105	/*
106	 * Uses stack for temporary storage, while storing needed
107	 * context in the thread context struct.
108	 */
109
110	mrs	r8, cpsr
111
112	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
113
114	push	{r4-r7}
115	push	{r0-r3}
116
117	mrs	r6, cpsr		/* Save current CPSR */
118
119	bl	thread_get_ctx_regs
120
121	pop	{r1-r4}			/* r0-r3 pushed above */
122	stm	r0!, {r1-r4}
123	pop	{r1-r4}			/* r4-r7 pushed above */
124	stm	r0!, {r1-r4}
125
126	cps     #CPSR_MODE_SYS
127	stm	r0!, {r8-r12}
128	str	sp, [r0], #4
129	str	lr, [r0], #4
130
131	cps     #CPSR_MODE_SVC
132	mrs     r1, spsr
133	str	r1, [r0], #4
134	str	sp, [r0], #4
135	str	lr, [r0], #4
136
137	/* back to fiq mode */
138	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
139	msr	cpsr, r6		/* Restore mode */
140
141	mov	r0, r8			/* Return original CPSR */
142	bx	r9
143END_FUNC thread_save_state_fiq
144
145/*
146 * Disables IRQ and FIQ and saves state of thread, returns original
147 * CPSR.
148 */
149FUNC thread_save_state , :
150UNWIND(	.cantunwind)
151	push	{r12, lr}
152	/*
153	 * Uses stack for temporary storage, while storing needed
154	 * context in the thread context struct.
155	 */
156
157	mrs	r12, cpsr
158
159	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
160
161	push	{r4-r7}
162	push	{r0-r3}
163
164	mov	r5, r12			/* Save CPSR in a preserved register */
165	mrs	r6, cpsr		/* Save current CPSR */
166
167	bl	thread_get_ctx_regs
168
169	pop	{r1-r4}			/* r0-r3 pushed above */
170	stm	r0!, {r1-r4}
171	pop	{r1-r4}			/* r4-r7 pushed above */
172	stm	r0!, {r1-r4}
173	stm	r0!, {r8-r11}
174
175	pop	{r12, lr}
176	stm	r0!, {r12}
177
178        cps     #CPSR_MODE_SYS
179	str	sp, [r0], #4
180	str	lr, [r0], #4
181
182        cps     #CPSR_MODE_SVC
183        mrs     r1, spsr
184	str	r1, [r0], #4
185	str	sp, [r0], #4
186	str	lr, [r0], #4
187
188	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
189	msr	cpsr, r6		/* Restore mode */
190
191	mov	r0, r5			/* Return original CPSR */
192	bx	lr
193END_FUNC thread_save_state
194
195/*
196 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
197 *			    unsigned long a2, unsigned long a3)
198 */
199FUNC thread_smc , :
200	smc	#0
201	bx	lr
202END_FUNC thread_smc
203
204FUNC thread_init_vbar , :
205	/* Set vector (VBAR) */
206	write_vbar r0
207	bx	lr
208END_FUNC thread_init_vbar
209DECLARE_KEEP_PAGER thread_init_vbar
210
211/*
212 * Below are low level routines handling entry and return from user mode.
213 *
214 * thread_enter_user_mode() saves all that registers user mode can change
215 * so kernel mode can restore needed registers when resuming execution
216 * after the call to thread_enter_user_mode() has returned.
217 * thread_enter_user_mode() doesn't return directly since it enters user
218 * mode instead, it's thread_unwind_user_mode() that does the
219 * returning by restoring the registers saved by thread_enter_user_mode().
220 *
221 * There's three ways for thread_enter_user_mode() to return to caller,
222 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
223 *
224 * Calls to _utee_return or _utee_panic are handled as:
225 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
226 * calls syscall_return() or syscall_panic().
227 *
228 * These function calls returns normally except thread_svc_handler() which
229 * which is an exception handling routine so it reads return address and
230 * SPSR to restore from the stack. syscall_return() and syscall_panic()
231 * changes return address and SPSR used by thread_svc_handler() to instead of
232 * returning into user mode as with other syscalls it returns into
233 * thread_unwind_user_mode() in kernel mode instead.  When
234 * thread_svc_handler() returns the stack pointer at the point where
235 * thread_enter_user_mode() left it so this is where
236 * thread_unwind_user_mode() can operate.
237 *
238 * Aborts are handled in a similar way but by thread_abort_handler()
239 * instead, when the pager sees that it's an abort from user mode that
240 * can't be handled it updates SPSR and return address used by
241 * thread_abort_handler() to return into thread_unwind_user_mode()
242 * instead.
243 */
244
245/*
246 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
247 *				     uint32_t *exit_status0,
248 *				     uint32_t *exit_status1);
249 *
250 * This function depends on being called with exceptions masked.
251 */
252FUNC __thread_enter_user_mode , :
253UNWIND(	.cantunwind)
254	/*
255	 * Save all registers to allow syscall_return() to resume execution
256	 * as if this function would have returned. This is also used in
257	 * syscall_panic().
258	 *
259	 * If stack usage of this function is changed
260	 * thread_unwind_user_mode() has to be updated.
261	 */
262	push    {r4-r12,lr}
263
264	/*
265	 * Save old user sp and set new user sp.
266	 */
267	cps	#CPSR_MODE_SYS
268	mov	r4, sp
269	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
270	cps	#CPSR_MODE_SVC
271
272	push	{r1, r2, r4, r5}
273
274	/* Prepare user mode entry via eret_to_user_mode */
275	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
276	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
277	msr     spsr_fsxc, r4
278
279	ldm	r0, {r0-r12}
280
281	b	eret_to_user_mode
282END_FUNC __thread_enter_user_mode
283
284/*
285 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
286 *              uint32_t exit_status1);
287 * See description in thread.h
288 */
289FUNC thread_unwind_user_mode , :
290UNWIND(	.cantunwind)
291	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
292	pop	{r4-r7}
293	str	r1, [r4]
294	str	r2, [r5]
295
296	/* Restore old user sp */
297	cps	#CPSR_MODE_SYS
298	mov	sp, r6
299	cps	#CPSR_MODE_SVC
300
301	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
302	pop     {r4-r12,pc}
303END_FUNC thread_unwind_user_mode
304
305	.macro maybe_restore_mapping
306		/*
307		 * This macro is a bit hard to read due to all the ifdefs,
308		 * we're testing for two different configs which makes four
309		 * different combinations.
310		 *
311		 * - With LPAE, and then some extra code if with
312		 *   CFG_CORE_UNMAP_CORE_AT_EL0
313		 * - Without LPAE, and then some extra code if with
314		 *   CFG_CORE_UNMAP_CORE_AT_EL0
315		 */
316
317		/*
318		 * At this point we can't rely on any memory being writable
319		 * yet, so we're using TPIDRPRW to store r0, and if with
320		 * LPAE TPIDRURO to store r1 too.
321		 */
322		write_tpidrprw r0
323#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
324		write_tpidruro r1
325#endif
326
327#ifdef CFG_WITH_LPAE
328		read_ttbr0_64bit r0, r1
329		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
330		beq	11f
331
332#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
333		/*
334		 * Update the mapping to use the full kernel mode mapping.
335		 * Since the translation table could reside above 4GB we'll
336		 * have to use 64-bit arithmetics.
337		 */
338		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
339		sbc	r1, r1, #0
340#endif
341		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
342		write_ttbr0_64bit r0, r1
343		isb
344
345#else /*!CFG_WITH_LPAE*/
346		read_contextidr r0
347		tst	r0, #1
348		beq	11f
349
350		/* Update the mapping to use the full kernel mode mapping. */
351		bic	r0, r0, #1
352		write_contextidr r0
353		isb
354#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
355		read_ttbcr r0
356		bic	r0, r0, #TTBCR_PD1
357		write_ttbcr r0
358		isb
359#endif
360
361#endif /*!CFG_WITH_LPAE*/
362
363#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
364		ldr	r0, =thread_user_kcode_offset
365		ldr	r0, [r0]
366		read_vbar r1
367		add	r1, r1, r0
368		write_vbar r1
369		isb
370
371	11:	/*
372		 * The PC is adjusted unconditionally to guard against the
373		 * case there was an FIQ just before we did the "cpsid aif".
374		 */
375		ldr	r0, =22f
376		bx	r0
377	22:
378#else
379	11:
380#endif
381		read_tpidrprw r0
382#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
383		read_tpidruro r1
384#endif
385	.endm
386
387/* The handler of native interrupt. */
388.macro	native_intr_handler mode:req
389	cpsid	aif
390	maybe_restore_mapping
391
392	/*
393	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
394	 * address
395	 */
396	sub     lr, lr, #4
397
398	/*
399	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
400	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
401	 * because the secure monitor doesn't save those. The treatment of
402	 * the banked fiq registers is somewhat analogous to the lazy save
403	 * of VFP registers.
404	 */
405	.ifc	\mode\(),fiq
406	push	{r0-r3, r8-r12, lr}
407	.else
408	push	{r0-r3, r12, lr}
409	.endif
410
411	bl	thread_check_canaries
412	bl	itr_core_handler
413
414	mrs	r0, spsr
415	cmp_spsr_user_mode r0
416
417	.ifc	\mode\(),fiq
418	pop	{r0-r3, r8-r12, lr}
419	.else
420	pop	{r0-r3, r12, lr}
421	.endif
422
423	movsne	pc, lr
424	b	eret_to_user_mode
425.endm
426
427/* The handler of foreign interrupt. */
428.macro foreign_intr_handler mode:req
429	cpsid	aif
430	maybe_restore_mapping
431
432	sub	lr, lr, #4
433	push	{r12}
434
435	.ifc	\mode\(),fiq
436	/*
437	 * If a foreign (non-secure) interrupt is received as a FIQ we need
438	 * to check that we're in a saveable state or if we need to mask
439	 * the interrupt to be handled later.
440	 *
441	 * The window when this is needed is quite narrow, it's between
442	 * entering the exception vector and until the "cpsid" instruction
443	 * of the handler has been executed.
444	 *
445	 * Currently we can save the state properly if the FIQ is received
446	 * while in user or svc (kernel) mode.
447	 *
448	 * If we're returning to abort, undef or irq mode we're returning
449	 * with the mapping restored. This is OK since before the handler
450	 * we're returning to eventually returns to user mode the reduced
451	 * mapping will be restored.
452	 */
453	mrs	r12, spsr
454	and	r12, r12, #ARM32_CPSR_MODE_MASK
455	cmp	r12, #ARM32_CPSR_MODE_USR
456	cmpne	r12, #ARM32_CPSR_MODE_SVC
457	beq	1f
458	mrs	r12, spsr
459	orr	r12, r12, #ARM32_CPSR_F
460	msr	spsr_fsxc, r12
461	pop	{r12}
462	movs	pc, lr
4631:
464	.endif
465
466	push	{lr}
467
468	.ifc	\mode\(),fiq
469	bl	thread_save_state_fiq
470	.else
471	bl	thread_save_state
472	.endif
473
474#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
475	/*
476	 * Prevent leaking information about which entries has been used in
477	 * cache. We're relying on the secure monitor/dispatcher to take
478	 * care of the BTB.
479	 */
480	mov	r0, #DCACHE_OP_CLEAN_INV
481	bl	dcache_op_louis
482	write_iciallu
483#endif
484
485	/*
486	 * Use SP_abt to update core local flags.
487	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
488	 */
489	cps     #CPSR_MODE_ABT
490	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
491	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
492	orr     r1, r1, #THREAD_CLF_TMP
493	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
494	.ifc    \mode\(),fiq
495	cps     #CPSR_MODE_FIQ
496	.else
497	cps     #CPSR_MODE_IRQ
498	.endif
499
500	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
501	mrs	r1, spsr
502	pop	{r2}
503	pop	{r12}
504	blx	thread_state_suspend
505
506	/*
507	 * Switch to SVC mode and copy current stack pointer as it already
508	 * is the tmp stack.
509	 */
510	mov	r1, sp
511	cps	#CPSR_MODE_SVC
512	mov	sp, r1
513
514	/* Passing thread index in r0 */
515	b	thread_foreign_intr_exit
516.endm
517
518        .align	5
519FUNC thread_excp_vect , :
520UNWIND(	.cantunwind)
521	b	.			/* Reset			*/
522	b	__thread_und_handler	/* Undefined instruction	*/
523	b	__thread_svc_handler	/* System call			*/
524	b	__thread_pabort_handler	/* Prefetch abort		*/
525	b	__thread_dabort_handler	/* Data abort			*/
526	b	.			/* Reserved			*/
527	b	__thread_irq_handler	/* IRQ				*/
528	b	__thread_fiq_handler	/* FIQ				*/
529#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
530	.macro vector_prologue_spectre
531		/*
532		 * This depends on SP being 8 byte aligned, that is, the
533		 * lowest three bits in SP are zero.
534		 *
535		 * To avoid unexpected speculation we need to invalidate
536		 * the branch predictor before we do the first branch. It
537		 * doesn't matter if it's a conditional or an unconditional
538		 * branch speculation can still occur.
539		 *
540		 * The idea is to form a specific bit pattern in the lowest
541		 * three bits of SP depending on which entry in the vector
542		 * we enter via.  This is done by adding 1 to SP in each
543		 * entry but the last.
544		 */
545		add	sp, sp, #1	/* 7:111 Reset			*/
546		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
547		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
548		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
549		add	sp, sp, #1	/* 3:011 Data abort		*/
550		add	sp, sp, #1	/* 2:010 Reserved		*/
551		add	sp, sp, #1	/* 1:001 IRQ			*/
552		cpsid   aif		/* 0:000 FIQ			*/
553	.endm
554
555        .align	5
556	.global thread_excp_vect_workaround_a15
557thread_excp_vect_workaround_a15:
558	vector_prologue_spectre
559	write_tpidrprw r0
560	mrs	r0, spsr
561	cmp_spsr_user_mode r0
562	bne	1f
563	/*
564	 * Invalidate the branch predictor for the current processor.
565	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
566	 * effective.
567	 * Note that the BPIALL instruction is not effective in
568	 * invalidating the branch predictor on Cortex-A15. For that CPU,
569	 * set ACTLR[0] to 1 during early processor initialisation, and
570	 * invalidate the branch predictor by performing an ICIALLU
571	 * instruction. See also:
572	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
573	 */
574	write_iciallu
575	isb
576	b	1f
577
578        .align	5
579	.global thread_excp_vect_workaround
580thread_excp_vect_workaround:
581	vector_prologue_spectre
582	write_tpidrprw r0
583	mrs	r0, spsr
584	cmp_spsr_user_mode r0
585	bne	1f
586	/* Invalidate the branch predictor for the current processor. */
587	write_bpiall
588	isb
589
5901:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
591	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
592	add	pc, pc, r0, LSL #3
593	nop
594
595	read_tpidrprw r0
596	b	__thread_fiq_handler	/* FIQ				*/
597	read_tpidrprw r0
598	b	__thread_irq_handler	/* IRQ				*/
599	read_tpidrprw r0
600	b	.			/* Reserved			*/
601	read_tpidrprw r0
602	b	__thread_dabort_handler	/* Data abort			*/
603	read_tpidrprw r0
604	b	__thread_pabort_handler	/* Prefetch abort		*/
605	read_tpidrprw r0
606	b	__thread_svc_handler	/* System call			*/
607	read_tpidrprw r0
608	b	__thread_und_handler	/* Undefined instruction	*/
609	read_tpidrprw r0
610	b	.			/* Reset			*/
611#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
612
613__thread_und_handler:
614	cpsid	aif
615	maybe_restore_mapping
616	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
617	mrs	r1, spsr
618	tst	r1, #CPSR_T
619	subne	lr, lr, #2
620	subeq	lr, lr, #4
621	mov	r0, #ABORT_TYPE_UNDEF
622	b	__thread_abort_common
623
624__thread_dabort_handler:
625	cpsid	aif
626	maybe_restore_mapping
627	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
628	sub	lr, lr, #8
629	mov	r0, #ABORT_TYPE_DATA
630	b	__thread_abort_common
631
632__thread_pabort_handler:
633	cpsid	aif
634	maybe_restore_mapping
635	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
636	sub	lr, lr, #4
637	mov	r0, #ABORT_TYPE_PREFETCH
638
639__thread_abort_common:
640	/*
641	 * At this label:
642	 * cpsr is in mode undef or abort
643	 * sp is still pointing to struct thread_core_local belonging to
644	 * this core.
645	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
646	 * {r2-r11, ip} are untouched.
647	 * r0 holds the first argument for abort_handler()
648	 */
649
650	/*
651	 * Update core local flags.
652	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
653	 */
654	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
655	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
656	orr	r1, r1, #THREAD_CLF_ABORT
657
658	/*
659	 * Select stack and update flags accordingly
660	 *
661	 * Normal case:
662	 * If the abort stack is unused select that.
663	 *
664	 * Fatal error handling:
665	 * If we're already using the abort stack as noted by bit
666	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
667	 * field we're selecting the temporary stack instead to be able to
668	 * make a stack trace of the abort in abort mode.
669	 *
670	 * r1 is initialized as a temporary stack pointer until we've
671	 * switched to system mode.
672	 */
673	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
674	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
675	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
676	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
677	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
678
679	/*
680	 * Store registers on stack fitting struct thread_abort_regs
681	 * start from the end of the struct
682	 * {r2-r11, ip}
683	 * Load content of previously saved {r0-r1} and stores
684	 * it up to the pad field.
685	 * After this is only {usr_sp, usr_lr} missing in the struct
686	 */
687	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
688	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
689	/* Push the original {r0-r1} on the selected stack */
690	stmdb	r1!, {r2-r3}
691	mrs	r3, spsr
692	/* Push {pad, spsr, elr} on the selected stack */
693	stmdb	r1!, {r2, r3, lr}
694
695	cps	#CPSR_MODE_SYS
696	str	lr, [r1, #-4]!
697	str	sp, [r1, #-4]!
698	mov	sp, r1
699
700	bl	abort_handler
701
702	mov	ip, sp
703	ldr	sp, [ip], #4
704	ldr	lr, [ip], #4
705
706	/*
707	 * Even if we entered via CPSR_MODE_UND, we are returning via
708	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
709	 * here.
710	 */
711	cps	#CPSR_MODE_ABT
712	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
713	msr	spsr_fsxc, r1
714
715	/* Update core local flags */
716	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
717	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
718	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
719
720	cmp_spsr_user_mode r1
721	ldm	ip, {r0-r11, ip}
722	movsne	pc, lr
723	b	eret_to_user_mode
724	/* end thread_abort_common */
725
726__thread_svc_handler:
727	cpsid	aif
728
729	maybe_restore_mapping
730
731	push	{r0-r7, lr}
732	mrs	r0, spsr
733	push	{r0}
734	mov	r0, sp
735	bl	thread_svc_handler
736	cpsid	aif	/* In case something was unmasked */
737	pop	{r0}
738	msr	spsr_fsxc, r0
739	cmp_spsr_user_mode r0
740	pop	{r0-r7, lr}
741	movsne	pc, lr
742	b	eret_to_user_mode
743	/* end thread_svc_handler */
744
745__thread_fiq_handler:
746#if defined(CFG_ARM_GICV3)
747	foreign_intr_handler	fiq
748#else
749	native_intr_handler	fiq
750#endif
751	/* end thread_fiq_handler */
752
753__thread_irq_handler:
754#if defined(CFG_ARM_GICV3)
755	native_intr_handler	irq
756#else
757	foreign_intr_handler	irq
758#endif
759	/* end thread_irq_handler */
760
761	/*
762	 * Returns to user mode.
763	 * Expects to be jumped to with lr pointing to the user space
764	 * address to jump to and spsr holding the desired cpsr. Async
765	 * abort, irq and fiq should be masked.
766	 */
767eret_to_user_mode:
768	write_tpidrprw r0
769#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
770	write_tpidruro r1
771#endif
772
773#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
774	ldr	r0, =thread_user_kcode_offset
775	ldr	r0, [r0]
776	read_vbar r1
777	sub	r1, r1, r0
778	write_vbar r1
779	isb
780
781	/* Jump into the reduced mapping before the full mapping is removed */
782	ldr	r1, =1f
783	sub	r1, r1, r0
784	bx	r1
7851:
786#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
787
788#ifdef CFG_WITH_LPAE
789	read_ttbr0_64bit r0, r1
790#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
791	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
792#endif
793	/* switch to user ASID */
794	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
795	write_ttbr0_64bit r0, r1
796	isb
797#else /*!CFG_WITH_LPAE*/
798#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
799	read_ttbcr r0
800	orr	r0, r0, #TTBCR_PD1
801	write_ttbcr r0
802	isb
803#endif
804	read_contextidr r0
805	orr	r0, r0, #BIT(0)
806	write_contextidr r0
807	isb
808#endif /*!CFG_WITH_LPAE*/
809
810	read_tpidrprw r0
811#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
812	read_tpidruro r1
813#endif
814
815	movs	pc, lr
816
817	/*
818	 * void icache_inv_user_range(void *addr, size_t size);
819	 *
820	 * This function has to execute with the user space ASID active,
821	 * this means executing with reduced mapping and the code needs
822	 * to be located here together with the vector.
823	 */
824	.global icache_inv_user_range
825	.type icache_inv_user_range , %function
826icache_inv_user_range:
827	push	{r4-r7}
828
829	/* Mask all exceptions */
830	mrs	r4, cpsr	/* This register must be preserved */
831	cpsid	aif
832
833#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
834	ldr	r2, =thread_user_kcode_offset
835	ldr	r2, [r2]
836	read_vbar r5		/* This register must be preserved */
837	sub	r3, r5, r2
838	write_vbar r3
839	isb
840
841	/* Jump into the reduced mapping before the full mapping is removed */
842	ldr	r3, =1f
843	sub	r3, r3, r2
844	bx	r3
8451:
846#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
847
848#ifdef CFG_WITH_LPAE
849	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
850#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
851	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
852#endif
853	/* switch to user ASID */
854	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
855	write_ttbr0_64bit r2, r3
856	isb
857#else /*!CFG_WITH_LPAE*/
858#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
859	read_ttbcr r6	/* This register must be preserved */
860	orr	r2, r6, #TTBCR_PD1
861	write_ttbcr r2
862	isb
863#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
864	read_contextidr r7	/* This register must be preserved */
865	orr	r2, r7, #BIT(0)
866	write_contextidr r2
867	isb
868#endif /*!CFG_WITH_LPAE*/
869
870	/*
871	 * Do the actual icache invalidation
872	 */
873
874	/* Calculate minimum icache line size, result in r2 */
875	read_ctr r3
876	and     r3, r3, #CTR_IMINLINE_MASK
877	mov     r2, #CTR_WORD_SIZE
878	lsl     r2, r2, r3
879
880	add	r1, r0, r1
881	sub	r3, r2, #1
882	bic	r0, r0, r3
8831:
884	write_icimvau r0
885	add	r0, r0, r2
886	cmp	r0, r1
887	blo	1b
888
889	/* Invalidate entire branch predictor array inner shareable */
890	write_bpiallis
891
892	dsb	ishst
893	isb
894
895#ifdef CFG_WITH_LPAE
896	write_ttbr0_64bit r6, r7
897	isb
898#else /*!CFG_WITH_LPAE*/
899	write_contextidr r7
900	isb
901#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
902	write_ttbcr r6
903	isb
904#endif
905#endif /*!CFG_WITH_LPAE*/
906
907#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
908	write_vbar r5
909	isb
910	/*
911	 * The PC is adjusted unconditionally to guard against the
912	 * case there was an FIQ just before we did the "cpsid aif".
913	 */
914	ldr	r0, =1f
915	bx	r0
9161:
917#endif
918
919	msr	cpsr_fsxc, r4	/* Restore exceptions */
920	pop	{r4-r7}
921	bx	lr		/* End of icache_inv_user_range() */
922
923	/*
924	 * Make sure that literals are placed before the
925	 * thread_excp_vect_end label.
926	 */
927	.pool
928	.global thread_excp_vect_end
929thread_excp_vect_end:
930END_FUNC thread_excp_vect
931