xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 827be46c173f31c57006af70ca3a15a5b1a7fba3)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <mm/core_mmu.h>
16
17#include "thread_private.h"
18
19	.syntax unified
20	.arch_extension sec
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30FUNC thread_set_abt_sp , :
31UNWIND(	.fnstart)
32UNWIND(	.cantunwind)
33	mrs	r1, cpsr
34	cps	#CPSR_MODE_ABT
35	mov	sp, r0
36	msr	cpsr, r1
37	bx	lr
38UNWIND(	.fnend)
39END_FUNC thread_set_abt_sp
40
41FUNC thread_set_und_sp , :
42UNWIND(	.fnstart)
43UNWIND(	.cantunwind)
44	mrs	r1, cpsr
45	cps	#CPSR_MODE_UND
46	mov	sp, r0
47	msr	cpsr, r1
48	bx	lr
49UNWIND(	.fnend)
50END_FUNC thread_set_und_sp
51
52FUNC thread_set_irq_sp , :
53UNWIND(	.fnstart)
54UNWIND(	.cantunwind)
55	mrs	r1, cpsr
56	cps	#CPSR_MODE_IRQ
57	mov	sp, r0
58	msr	cpsr, r1
59	bx	lr
60UNWIND(	.fnend)
61END_FUNC thread_set_irq_sp
62
63FUNC thread_set_fiq_sp , :
64UNWIND(	.fnstart)
65UNWIND(	.cantunwind)
66	mrs	r1, cpsr
67	cps	#CPSR_MODE_FIQ
68	mov	sp, r0
69	msr	cpsr, r1
70	bx	lr
71UNWIND(	.fnend)
72END_FUNC thread_set_fiq_sp
73
74/* void thread_resume(struct thread_ctx_regs *regs) */
75FUNC thread_resume , :
76UNWIND(	.fnstart)
77UNWIND(	.cantunwind)
78	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
79
80	cps	#CPSR_MODE_SYS
81	ldr	sp, [r12], #4
82	ldr	lr, [r12], #4
83
84	cps	#CPSR_MODE_SVC
85	ldr	r1, [r12], #4
86	ldr	sp, [r12], #4
87	ldr	lr, [r12], #4
88	msr	spsr_fsxc, r1
89
90	ldm	r12, {r1, r2}
91
92	/*
93	 * Switching to some other mode than SVC as we need to set spsr in
94	 * order to return into the old state properly and it may be SVC
95	 * mode we're returning to.
96	 */
97	cps	#CPSR_MODE_ABT
98	cmp_spsr_user_mode r2
99	mov	lr, r1
100	msr	spsr_fsxc, r2
101	ldm	r0, {r0-r12}
102	movsne	pc, lr
103	b	eret_to_user_mode
104UNWIND(	.fnend)
105END_FUNC thread_resume
106
107/*
108 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
109 * the banked r8-r12 registers, returns original CPSR.
110 */
111LOCAL_FUNC thread_save_state_fiq , :
112UNWIND(	.fnstart)
113UNWIND(	.cantunwind)
114	mov	r9, lr
115
116	/*
117	 * Uses stack for temporary storage, while storing needed
118	 * context in the thread context struct.
119	 */
120
121	mrs	r8, cpsr
122
123	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
124
125	push	{r4-r7}
126	push	{r0-r3}
127
128	mrs	r6, cpsr		/* Save current CPSR */
129
130	bl	thread_get_ctx_regs
131
132	pop	{r1-r4}			/* r0-r3 pushed above */
133	stm	r0!, {r1-r4}
134	pop	{r1-r4}			/* r4-r7 pushed above */
135	stm	r0!, {r1-r4}
136
137	cps     #CPSR_MODE_SYS
138	stm	r0!, {r8-r12}
139	str	sp, [r0], #4
140	str	lr, [r0], #4
141
142	cps     #CPSR_MODE_SVC
143	mrs     r1, spsr
144	str	r1, [r0], #4
145	str	sp, [r0], #4
146	str	lr, [r0], #4
147
148	/* back to fiq mode */
149	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
150	msr	cpsr, r6		/* Restore mode */
151
152	mov	r0, r8			/* Return original CPSR */
153	bx	r9
154UNWIND(	.fnend)
155END_FUNC thread_save_state_fiq
156
157/*
158 * Disables IRQ and FIQ and saves state of thread, returns original
159 * CPSR.
160 */
161FUNC thread_save_state , :
162UNWIND(	.fnstart)
163UNWIND(	.cantunwind)
164	push	{r12, lr}
165	/*
166	 * Uses stack for temporary storage, while storing needed
167	 * context in the thread context struct.
168	 */
169
170	mrs	r12, cpsr
171
172	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
173
174	push	{r4-r7}
175	push	{r0-r3}
176
177	mov	r5, r12			/* Save CPSR in a preserved register */
178	mrs	r6, cpsr		/* Save current CPSR */
179
180	bl	thread_get_ctx_regs
181
182	pop	{r1-r4}			/* r0-r3 pushed above */
183	stm	r0!, {r1-r4}
184	pop	{r1-r4}			/* r4-r7 pushed above */
185	stm	r0!, {r1-r4}
186	stm	r0!, {r8-r11}
187
188	pop	{r12, lr}
189	stm	r0!, {r12}
190
191        cps     #CPSR_MODE_SYS
192	str	sp, [r0], #4
193	str	lr, [r0], #4
194
195        cps     #CPSR_MODE_SVC
196        mrs     r1, spsr
197	str	r1, [r0], #4
198	str	sp, [r0], #4
199	str	lr, [r0], #4
200
201	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
202	msr	cpsr, r6		/* Restore mode */
203
204	mov	r0, r5			/* Return original CPSR */
205	bx	lr
206UNWIND(	.fnend)
207END_FUNC thread_save_state
208
209/*
210 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
211 *			    unsigned long a2, unsigned long a3)
212 */
213FUNC thread_smc , :
214UNWIND(	.fnstart)
215	smc	#0
216	bx	lr
217UNWIND(	.fnend)
218END_FUNC thread_smc
219
220FUNC thread_init_vbar , :
221UNWIND(	.fnstart)
222	/* Set vector (VBAR) */
223	write_vbar r0
224	bx	lr
225UNWIND(	.fnend)
226END_FUNC thread_init_vbar
227DECLARE_KEEP_PAGER thread_init_vbar
228
229/*
230 * Below are low level routines handling entry and return from user mode.
231 *
232 * thread_enter_user_mode() saves all that registers user mode can change
233 * so kernel mode can restore needed registers when resuming execution
234 * after the call to thread_enter_user_mode() has returned.
235 * thread_enter_user_mode() doesn't return directly since it enters user
236 * mode instead, it's thread_unwind_user_mode() that does the
237 * returning by restoring the registers saved by thread_enter_user_mode().
238 *
239 * There's three ways for thread_enter_user_mode() to return to caller,
240 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
241 *
242 * Calls to _utee_return or _utee_panic are handled as:
243 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
244 * calls syscall_return() or syscall_panic().
245 *
246 * These function calls returns normally except thread_svc_handler() which
247 * which is an exception handling routine so it reads return address and
248 * SPSR to restore from the stack. syscall_return() and syscall_panic()
249 * changes return address and SPSR used by thread_svc_handler() to instead of
250 * returning into user mode as with other syscalls it returns into
251 * thread_unwind_user_mode() in kernel mode instead.  When
252 * thread_svc_handler() returns the stack pointer at the point where
253 * thread_enter_user_mode() left it so this is where
254 * thread_unwind_user_mode() can operate.
255 *
256 * Aborts are handled in a similar way but by thread_abort_handler()
257 * instead, when the pager sees that it's an abort from user mode that
258 * can't be handled it updates SPSR and return address used by
259 * thread_abort_handler() to return into thread_unwind_user_mode()
260 * instead.
261 */
262
263/*
264 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
265 *				     uint32_t *exit_status0,
266 *				     uint32_t *exit_status1);
267 *
268 * This function depends on being called with exceptions masked.
269 */
270FUNC __thread_enter_user_mode , :
271UNWIND(	.fnstart)
272UNWIND(	.cantunwind)
273	/*
274	 * Save all registers to allow syscall_return() to resume execution
275	 * as if this function would have returned. This is also used in
276	 * syscall_panic().
277	 *
278	 * If stack usage of this function is changed
279	 * thread_unwind_user_mode() has to be updated.
280	 */
281	push    {r4-r12,lr}
282
283	/*
284	 * Save old user sp and set new user sp.
285	 */
286	cps	#CPSR_MODE_SYS
287	mov	r4, sp
288	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
289	cps	#CPSR_MODE_SVC
290
291	push	{r1, r2, r4, r5}
292
293	/* Prepare user mode entry via eret_to_user_mode */
294	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
295	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
296	msr     spsr_fsxc, r4
297
298	ldm	r0, {r0-r12}
299
300	b	eret_to_user_mode
301UNWIND(	.fnend)
302END_FUNC __thread_enter_user_mode
303
304/*
305 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
306 *              uint32_t exit_status1);
307 * See description in thread.h
308 */
309FUNC thread_unwind_user_mode , :
310UNWIND(	.fnstart)
311UNWIND(	.cantunwind)
312	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
313	pop	{r4-r7}
314	str	r1, [r4]
315	str	r2, [r5]
316
317	/* Restore old user sp */
318	cps	#CPSR_MODE_SYS
319	mov	sp, r6
320	cps	#CPSR_MODE_SVC
321
322	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
323	pop     {r4-r12,pc}
324UNWIND(	.fnend)
325END_FUNC thread_unwind_user_mode
326
327	.macro maybe_restore_mapping
328		/*
329		 * This macro is a bit hard to read due to all the ifdefs,
330		 * we're testing for two different configs which makes four
331		 * different combinations.
332		 *
333		 * - With LPAE, and then some extra code if with
334		 *   CFG_CORE_UNMAP_CORE_AT_EL0
335		 * - Without LPAE, and then some extra code if with
336		 *   CFG_CORE_UNMAP_CORE_AT_EL0
337		 */
338
339		/*
340		 * At this point we can't rely on any memory being writable
341		 * yet, so we're using TPIDRPRW to store r0, and if with
342		 * LPAE TPIDRURO to store r1 too.
343		 */
344		write_tpidrprw r0
345#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
346		write_tpidruro r1
347#endif
348
349#ifdef CFG_WITH_LPAE
350		read_ttbr0_64bit r0, r1
351		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
352		beq	11f
353
354#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
355		/*
356		 * Update the mapping to use the full kernel mode mapping.
357		 * Since the translation table could reside above 4GB we'll
358		 * have to use 64-bit arithmetics.
359		 */
360		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
361		sbc	r1, r1, #0
362#endif
363		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
364		write_ttbr0_64bit r0, r1
365		isb
366
367#else /*!CFG_WITH_LPAE*/
368		read_contextidr r0
369		tst	r0, #1
370		beq	11f
371
372		/* Update the mapping to use the full kernel mode mapping. */
373		bic	r0, r0, #1
374		write_contextidr r0
375		isb
376#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
377		read_ttbcr r0
378		bic	r0, r0, #TTBCR_PD1
379		write_ttbcr r0
380		isb
381#endif
382
383#endif /*!CFG_WITH_LPAE*/
384
385#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
386		ldr	r0, =thread_user_kcode_offset
387		ldr	r0, [r0]
388		read_vbar r1
389		add	r1, r1, r0
390		write_vbar r1
391		isb
392
393	11:	/*
394		 * The PC is adjusted unconditionally to guard against the
395		 * case there was an FIQ just before we did the "cpsid aif".
396		 */
397		ldr	r0, =22f
398		bx	r0
399	22:
400#else
401	11:
402#endif
403		read_tpidrprw r0
404#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
405		read_tpidruro r1
406#endif
407	.endm
408
409/* The handler of native interrupt. */
410.macro	native_intr_handler mode:req
411	cpsid	aif
412	maybe_restore_mapping
413
414	/*
415	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
416	 * address
417	 */
418	sub     lr, lr, #4
419
420	/*
421	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
422	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
423	 * because the secure monitor doesn't save those. The treatment of
424	 * the banked fiq registers is somewhat analogous to the lazy save
425	 * of VFP registers.
426	 */
427	.ifc	\mode\(),fiq
428	push	{r0-r3, r8-r12, lr}
429	.else
430	push	{r0-r3, r12, lr}
431	.endif
432
433	bl	thread_check_canaries
434	bl	itr_core_handler
435
436	mrs	r0, spsr
437	cmp_spsr_user_mode r0
438
439	.ifc	\mode\(),fiq
440	pop	{r0-r3, r8-r12, lr}
441	.else
442	pop	{r0-r3, r12, lr}
443	.endif
444
445	movsne	pc, lr
446	b	eret_to_user_mode
447.endm
448
449/* The handler of foreign interrupt. */
450.macro foreign_intr_handler mode:req
451	cpsid	aif
452	maybe_restore_mapping
453
454	sub	lr, lr, #4
455	push	{r12}
456
457	.ifc	\mode\(),fiq
458	/*
459	 * If a foreign (non-secure) interrupt is received as a FIQ we need
460	 * to check that we're in a saveable state or if we need to mask
461	 * the interrupt to be handled later.
462	 *
463	 * The window when this is needed is quite narrow, it's between
464	 * entering the exception vector and until the "cpsid" instruction
465	 * of the handler has been executed.
466	 *
467	 * Currently we can save the state properly if the FIQ is received
468	 * while in user or svc (kernel) mode.
469	 *
470	 * If we're returning to abort, undef or irq mode we're returning
471	 * with the mapping restored. This is OK since before the handler
472	 * we're returning to eventually returns to user mode the reduced
473	 * mapping will be restored.
474	 */
475	mrs	r12, spsr
476	and	r12, r12, #ARM32_CPSR_MODE_MASK
477	cmp	r12, #ARM32_CPSR_MODE_USR
478	cmpne	r12, #ARM32_CPSR_MODE_SVC
479	beq	1f
480	mrs	r12, spsr
481	orr	r12, r12, #ARM32_CPSR_F
482	msr	spsr_fsxc, r12
483	pop	{r12}
484	movs	pc, lr
4851:
486	.endif
487
488	push	{lr}
489
490	.ifc	\mode\(),fiq
491	bl	thread_save_state_fiq
492	.else
493	bl	thread_save_state
494	.endif
495
496#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
497	/*
498	 * Prevent leaking information about which entries has been used in
499	 * cache. We're relying on the secure monitor/dispatcher to take
500	 * care of the BTB.
501	 */
502	mov	r0, #DCACHE_OP_CLEAN_INV
503	bl	dcache_op_louis
504	write_iciallu
505#endif
506
507	/*
508	 * Use SP_abt to update core local flags.
509	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
510	 */
511	cps     #CPSR_MODE_ABT
512	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
513	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
514	orr     r1, r1, #THREAD_CLF_TMP
515	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
516	.ifc    \mode\(),fiq
517	cps     #CPSR_MODE_FIQ
518	.else
519	cps     #CPSR_MODE_IRQ
520	.endif
521
522	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
523	mrs	r1, spsr
524	pop	{r2}
525	pop	{r12}
526	blx	thread_state_suspend
527
528	/*
529	 * Switch to SVC mode and copy current stack pointer as it already
530	 * is the tmp stack.
531	 */
532	mov	r1, sp
533	cps	#CPSR_MODE_SVC
534	mov	sp, r1
535
536	/* Passing thread index in r0 */
537	b	thread_foreign_intr_exit
538.endm
539
540        .align	5
541FUNC thread_excp_vect , :
542UNWIND(	.fnstart)
543UNWIND(	.cantunwind)
544	b	.			/* Reset			*/
545	b	__thread_und_handler	/* Undefined instruction	*/
546	b	__thread_svc_handler	/* System call			*/
547	b	__thread_pabort_handler	/* Prefetch abort		*/
548	b	__thread_dabort_handler	/* Data abort			*/
549	b	.			/* Reserved			*/
550	b	__thread_irq_handler	/* IRQ				*/
551	b	__thread_fiq_handler	/* FIQ				*/
552#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
553	.macro vector_prologue_spectre
554		/*
555		 * This depends on SP being 8 byte aligned, that is, the
556		 * lowest three bits in SP are zero.
557		 *
558		 * To avoid unexpected speculation we need to invalidate
559		 * the branch predictor before we do the first branch. It
560		 * doesn't matter if it's a conditional or an unconditional
561		 * branch speculation can still occur.
562		 *
563		 * The idea is to form a specific bit pattern in the lowest
564		 * three bits of SP depending on which entry in the vector
565		 * we enter via.  This is done by adding 1 to SP in each
566		 * entry but the last.
567		 */
568		add	sp, sp, #1	/* 7:111 Reset			*/
569		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
570		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
571		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
572		add	sp, sp, #1	/* 3:011 Data abort		*/
573		add	sp, sp, #1	/* 2:010 Reserved		*/
574		add	sp, sp, #1	/* 1:001 IRQ			*/
575		cpsid   aif		/* 0:000 FIQ			*/
576	.endm
577
578        .align	5
579	.global thread_excp_vect_workaround_a15
580thread_excp_vect_workaround_a15:
581	vector_prologue_spectre
582	write_tpidrprw r0
583	mrs	r0, spsr
584	cmp_spsr_user_mode r0
585	bne	1f
586	/*
587	 * Invalidate the branch predictor for the current processor.
588	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
589	 * effective.
590	 * Note that the BPIALL instruction is not effective in
591	 * invalidating the branch predictor on Cortex-A15. For that CPU,
592	 * set ACTLR[0] to 1 during early processor initialisation, and
593	 * invalidate the branch predictor by performing an ICIALLU
594	 * instruction. See also:
595	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
596	 */
597	write_iciallu
598	isb
599	b	1f
600
601        .align	5
602	.global thread_excp_vect_workaround
603thread_excp_vect_workaround:
604	vector_prologue_spectre
605	write_tpidrprw r0
606	mrs	r0, spsr
607	cmp_spsr_user_mode r0
608	bne	1f
609	/* Invalidate the branch predictor for the current processor. */
610	write_bpiall
611	isb
612
6131:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
614	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
615	add	pc, pc, r0, LSL #3
616	nop
617
618	read_tpidrprw r0
619	b	__thread_fiq_handler	/* FIQ				*/
620	read_tpidrprw r0
621	b	__thread_irq_handler	/* IRQ				*/
622	read_tpidrprw r0
623	b	.			/* Reserved			*/
624	read_tpidrprw r0
625	b	__thread_dabort_handler	/* Data abort			*/
626	read_tpidrprw r0
627	b	__thread_pabort_handler	/* Prefetch abort		*/
628	read_tpidrprw r0
629	b	__thread_svc_handler	/* System call			*/
630	read_tpidrprw r0
631	b	__thread_und_handler	/* Undefined instruction	*/
632	read_tpidrprw r0
633	b	.			/* Reset			*/
634#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
635
636__thread_und_handler:
637	cpsid	aif
638	maybe_restore_mapping
639	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
640	mrs	r1, spsr
641	tst	r1, #CPSR_T
642	subne	lr, lr, #2
643	subeq	lr, lr, #4
644	mov	r0, #ABORT_TYPE_UNDEF
645	b	__thread_abort_common
646
647__thread_dabort_handler:
648	cpsid	aif
649	maybe_restore_mapping
650	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
651	sub	lr, lr, #8
652	mov	r0, #ABORT_TYPE_DATA
653	b	__thread_abort_common
654
655__thread_pabort_handler:
656	cpsid	aif
657	maybe_restore_mapping
658	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
659	sub	lr, lr, #4
660	mov	r0, #ABORT_TYPE_PREFETCH
661
662__thread_abort_common:
663	/*
664	 * At this label:
665	 * cpsr is in mode undef or abort
666	 * sp is still pointing to struct thread_core_local belonging to
667	 * this core.
668	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
669	 * {r2-r11, ip} are untouched.
670	 * r0 holds the first argument for abort_handler()
671	 */
672
673	/*
674	 * Update core local flags.
675	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
676	 */
677	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
678	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
679	orr	r1, r1, #THREAD_CLF_ABORT
680
681	/*
682	 * Select stack and update flags accordingly
683	 *
684	 * Normal case:
685	 * If the abort stack is unused select that.
686	 *
687	 * Fatal error handling:
688	 * If we're already using the abort stack as noted by bit
689	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
690	 * field we're selecting the temporary stack instead to be able to
691	 * make a stack trace of the abort in abort mode.
692	 *
693	 * r1 is initialized as a temporary stack pointer until we've
694	 * switched to system mode.
695	 */
696	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
697	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
698	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
699	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
700	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
701
702	/*
703	 * Store registers on stack fitting struct thread_abort_regs
704	 * start from the end of the struct
705	 * {r2-r11, ip}
706	 * Load content of previously saved {r0-r1} and stores
707	 * it up to the pad field.
708	 * After this is only {usr_sp, usr_lr} missing in the struct
709	 */
710	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
711	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
712	/* Push the original {r0-r1} on the selected stack */
713	stmdb	r1!, {r2-r3}
714	mrs	r3, spsr
715	/* Push {pad, spsr, elr} on the selected stack */
716	stmdb	r1!, {r2, r3, lr}
717
718	cps	#CPSR_MODE_SYS
719	str	lr, [r1, #-4]!
720	str	sp, [r1, #-4]!
721	mov	sp, r1
722
723	bl	abort_handler
724
725	mov	ip, sp
726	ldr	sp, [ip], #4
727	ldr	lr, [ip], #4
728
729	/*
730	 * Even if we entered via CPSR_MODE_UND, we are returning via
731	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
732	 * here.
733	 */
734	cps	#CPSR_MODE_ABT
735	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
736	msr	spsr_fsxc, r1
737
738	/* Update core local flags */
739	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
740	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
741	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
742
743	cmp_spsr_user_mode r1
744	ldm	ip, {r0-r11, ip}
745	movsne	pc, lr
746	b	eret_to_user_mode
747	/* end thread_abort_common */
748
749__thread_svc_handler:
750	cpsid	aif
751
752	maybe_restore_mapping
753
754	push	{r0-r7, lr}
755	mrs	r0, spsr
756	push	{r0}
757	mov	r0, sp
758	bl	thread_svc_handler
759	cpsid	aif	/* In case something was unmasked */
760	pop	{r0}
761	msr	spsr_fsxc, r0
762	cmp_spsr_user_mode r0
763	pop	{r0-r7, lr}
764	movsne	pc, lr
765	b	eret_to_user_mode
766	/* end thread_svc_handler */
767
768__thread_fiq_handler:
769#if defined(CFG_ARM_GICV3)
770	foreign_intr_handler	fiq
771#else
772	native_intr_handler	fiq
773#endif
774	/* end thread_fiq_handler */
775
776__thread_irq_handler:
777#if defined(CFG_ARM_GICV3)
778	native_intr_handler	irq
779#else
780	foreign_intr_handler	irq
781#endif
782	/* end thread_irq_handler */
783
784	/*
785	 * Returns to user mode.
786	 * Expects to be jumped to with lr pointing to the user space
787	 * address to jump to and spsr holding the desired cpsr. Async
788	 * abort, irq and fiq should be masked.
789	 */
790eret_to_user_mode:
791	write_tpidrprw r0
792#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
793	write_tpidruro r1
794#endif
795
796#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
797	ldr	r0, =thread_user_kcode_offset
798	ldr	r0, [r0]
799	read_vbar r1
800	sub	r1, r1, r0
801	write_vbar r1
802	isb
803
804	/* Jump into the reduced mapping before the full mapping is removed */
805	ldr	r1, =1f
806	sub	r1, r1, r0
807	bx	r1
8081:
809#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
810
811#ifdef CFG_WITH_LPAE
812	read_ttbr0_64bit r0, r1
813#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
814	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
815#endif
816	/* switch to user ASID */
817	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
818	write_ttbr0_64bit r0, r1
819	isb
820#else /*!CFG_WITH_LPAE*/
821#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
822	read_ttbcr r0
823	orr	r0, r0, #TTBCR_PD1
824	write_ttbcr r0
825	isb
826#endif
827	read_contextidr r0
828	orr	r0, r0, #BIT(0)
829	write_contextidr r0
830	isb
831#endif /*!CFG_WITH_LPAE*/
832
833	read_tpidrprw r0
834#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
835	read_tpidruro r1
836#endif
837
838	movs	pc, lr
839
840	/*
841	 * void icache_inv_user_range(void *addr, size_t size);
842	 *
843	 * This function has to execute with the user space ASID active,
844	 * this means executing with reduced mapping and the code needs
845	 * to be located here together with the vector.
846	 */
847	.global icache_inv_user_range
848	.type icache_inv_user_range , %function
849icache_inv_user_range:
850	push	{r4-r7}
851
852	/* Mask all exceptions */
853	mrs	r4, cpsr	/* This register must be preserved */
854	cpsid	aif
855
856#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
857	ldr	r2, =thread_user_kcode_offset
858	ldr	r2, [r2]
859	read_vbar r5		/* This register must be preserved */
860	sub	r3, r5, r2
861	write_vbar r3
862	isb
863
864	/* Jump into the reduced mapping before the full mapping is removed */
865	ldr	r3, =1f
866	sub	r3, r3, r2
867	bx	r3
8681:
869#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
870
871#ifdef CFG_WITH_LPAE
872	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
873#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
874	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
875#endif
876	/* switch to user ASID */
877	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
878	write_ttbr0_64bit r2, r3
879	isb
880#else /*!CFG_WITH_LPAE*/
881#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
882	read_ttbcr r6	/* This register must be preserved */
883	orr	r2, r6, #TTBCR_PD1
884	write_ttbcr r2
885	isb
886#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
887	read_contextidr r7	/* This register must be preserved */
888	orr	r2, r7, #BIT(0)
889	write_contextidr r2
890	isb
891#endif /*!CFG_WITH_LPAE*/
892
893	/*
894	 * Do the actual icache invalidation
895	 */
896
897	/* Calculate minimum icache line size, result in r2 */
898	read_ctr r3
899	and     r3, r3, #CTR_IMINLINE_MASK
900	mov     r2, #CTR_WORD_SIZE
901	lsl     r2, r2, r3
902
903	add	r1, r0, r1
904	sub	r3, r2, #1
905	bic	r0, r0, r3
9061:
907	write_icimvau r0
908	add	r0, r0, r2
909	cmp	r0, r1
910	blo	1b
911
912	/* Invalidate entire branch predictor array inner shareable */
913	write_bpiallis
914
915	dsb	ishst
916	isb
917
918#ifdef CFG_WITH_LPAE
919	write_ttbr0_64bit r6, r7
920	isb
921#else /*!CFG_WITH_LPAE*/
922	write_contextidr r7
923	isb
924#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
925	write_ttbcr r6
926	isb
927#endif
928#endif /*!CFG_WITH_LPAE*/
929
930#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
931	write_vbar r5
932	isb
933	/*
934	 * The PC is adjusted unconditionally to guard against the
935	 * case there was an FIQ just before we did the "cpsid aif".
936	 */
937	ldr	r0, =1f
938	bx	r0
9391:
940#endif
941
942	msr	cpsr_fsxc, r4	/* Restore exceptions */
943	pop	{r4-r7}
944	bx	lr		/* End of icache_inv_user_range() */
945
946	/*
947	 * Make sure that literals are placed before the
948	 * thread_excp_vect_end label.
949	 */
950	.pool
951UNWIND(	.fnend)
952	.global thread_excp_vect_end
953thread_excp_vect_end:
954END_FUNC thread_excp_vect
955