xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 98ada65e9e6db4ac1b8c5bd3faa7a398ee410f7e)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17
18	.syntax unified
19	.arch_extension sec
20
21	.macro cmp_spsr_user_mode reg:req
22		/*
23		 * We're only testing the lower 4 bits as bit 5 (0x10)
24		 * always is set.
25		 */
26		tst	\reg, #0x0f
27	.endm
28
29FUNC thread_set_abt_sp , :
30UNWIND(	.cantunwind)
31	mrs	r1, cpsr
32	cps	#CPSR_MODE_ABT
33	mov	sp, r0
34	msr	cpsr, r1
35	bx	lr
36END_FUNC thread_set_abt_sp
37
38FUNC thread_set_und_sp , :
39UNWIND(	.cantunwind)
40	mrs	r1, cpsr
41	cps	#CPSR_MODE_UND
42	mov	sp, r0
43	msr	cpsr, r1
44	bx	lr
45END_FUNC thread_set_und_sp
46
47FUNC thread_set_irq_sp , :
48UNWIND(	.cantunwind)
49	mrs	r1, cpsr
50	cps	#CPSR_MODE_IRQ
51	mov	sp, r0
52	msr	cpsr, r1
53	bx	lr
54END_FUNC thread_set_irq_sp
55
56FUNC thread_set_fiq_sp , :
57UNWIND(	.cantunwind)
58	mrs	r1, cpsr
59	cps	#CPSR_MODE_FIQ
60	mov	sp, r0
61	msr	cpsr, r1
62	bx	lr
63END_FUNC thread_set_fiq_sp
64
65FUNC thread_get_usr_sp , :
66	mrs	r1, cpsr
67	cpsid	aif
68	cps	#CPSR_MODE_SYS
69	mov	r0, sp
70	msr	cpsr, r1
71	bx	lr
72END_FUNC thread_get_usr_sp
73
74FUNC thread_get_usr_lr , :
75	mrs	r1, cpsr
76	cpsid	aif
77	cps	#CPSR_MODE_SYS
78	mov	r0, lr
79	msr	cpsr, r1
80	bx	lr
81END_FUNC thread_get_usr_lr
82
83FUNC thread_set_usr_lr , :
84	mrs	r1, cpsr
85	cpsid	aif
86	cps	#CPSR_MODE_SYS
87	mov	lr, r0
88	msr	cpsr, r1
89	bx	lr
90END_FUNC thread_set_usr_lr
91
92/* void thread_resume(struct thread_ctx_regs *regs) */
93FUNC thread_resume , :
94UNWIND(	.cantunwind)
95	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
96
97	cps	#CPSR_MODE_SYS
98	ldr	sp, [r12], #4
99	ldr	lr, [r12], #4
100
101	cps	#CPSR_MODE_SVC
102	ldr	r1, [r12], #4
103	ldr	sp, [r12], #4
104	ldr	lr, [r12], #4
105	msr	spsr_fsxc, r1
106
107	ldm	r12, {r1, r2}
108
109	/*
110	 * Switching to some other mode than SVC as we need to set spsr in
111	 * order to return into the old state properly and it may be SVC
112	 * mode we're returning to.
113	 */
114	cps	#CPSR_MODE_ABT
115	cmp_spsr_user_mode r2
116	mov	lr, r1
117	msr	spsr_fsxc, r2
118	ldm	r0, {r0-r12}
119	movsne	pc, lr
120	b	eret_to_user_mode
121END_FUNC thread_resume
122
123/*
124 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
125 * the banked r8-r12 registers, returns original CPSR.
126 */
127LOCAL_FUNC thread_save_state_fiq , :
128UNWIND(	.cantunwind)
129	mov	r9, lr
130
131	/*
132	 * Uses stack for temporary storage, while storing needed
133	 * context in the thread context struct.
134	 */
135
136	mrs	r8, cpsr
137
138	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
139
140	push	{r4-r7}
141	push	{r0-r3}
142
143	mrs	r6, cpsr		/* Save current CPSR */
144
145	bl	thread_get_ctx_regs
146
147	pop	{r1-r4}			/* r0-r3 pushed above */
148	stm	r0!, {r1-r4}
149	pop	{r1-r4}			/* r4-r7 pushed above */
150	stm	r0!, {r1-r4}
151
152	cps     #CPSR_MODE_SYS
153	stm	r0!, {r8-r12}
154	str	sp, [r0], #4
155	str	lr, [r0], #4
156
157	cps     #CPSR_MODE_SVC
158	mrs     r1, spsr
159	str	r1, [r0], #4
160	str	sp, [r0], #4
161	str	lr, [r0], #4
162
163	/* back to fiq mode */
164	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
165	msr	cpsr, r6		/* Restore mode */
166
167	mov	r0, r8			/* Return original CPSR */
168	bx	r9
169END_FUNC thread_save_state_fiq
170
171/*
172 * Disables IRQ and FIQ and saves state of thread, returns original
173 * CPSR.
174 */
175FUNC thread_save_state , :
176UNWIND(	.cantunwind)
177	push	{r12, lr}
178	/*
179	 * Uses stack for temporary storage, while storing needed
180	 * context in the thread context struct.
181	 */
182
183	mrs	r12, cpsr
184
185	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
186
187	push	{r4-r7}
188	push	{r0-r3}
189
190	mov	r5, r12			/* Save CPSR in a preserved register */
191	mrs	r6, cpsr		/* Save current CPSR */
192
193	bl	thread_get_ctx_regs
194
195	pop	{r1-r4}			/* r0-r3 pushed above */
196	stm	r0!, {r1-r4}
197	pop	{r1-r4}			/* r4-r7 pushed above */
198	stm	r0!, {r1-r4}
199	stm	r0!, {r8-r11}
200
201	pop	{r12, lr}
202	stm	r0!, {r12}
203
204        cps     #CPSR_MODE_SYS
205	str	sp, [r0], #4
206	str	lr, [r0], #4
207
208        cps     #CPSR_MODE_SVC
209        mrs     r1, spsr
210	str	r1, [r0], #4
211	str	sp, [r0], #4
212	str	lr, [r0], #4
213
214	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
215	msr	cpsr, r6		/* Restore mode */
216
217	mov	r0, r5			/* Return original CPSR */
218	bx	lr
219END_FUNC thread_save_state
220
221/*
222 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
223 *			    unsigned long a2, unsigned long a3)
224 */
225FUNC thread_smc , :
226	push	{r4-r7}
227UNWIND(	.save	{r4-r7})
228	smc	#0
229	pop	{r4-r7}
230	bx	lr
231END_FUNC thread_smc
232
233/* void thread_smccc(struct thread_smc_args *arg_res) */
234FUNC thread_smccc , :
235	push	{r4-r7}
236	push	{r0, lr}
237	ldm	r0, {r0-r7}
238#ifdef CFG_CORE_SEL2_SPMC
239	hvc	#0
240#else
241	smc	#0
242#endif
243	pop	{r12, lr}
244	stm	r12, {r0-r7}
245	pop	{r4-r7}
246	bx	lr
247END_FUNC thread_smccc
248
249FUNC thread_init_vbar , :
250	/* Set vector (VBAR) */
251	write_vbar r0
252	bx	lr
253END_FUNC thread_init_vbar
254DECLARE_KEEP_PAGER thread_init_vbar
255
256/*
257 * Below are low level routines handling entry and return from user mode.
258 *
259 * thread_enter_user_mode() saves all that registers user mode can change
260 * so kernel mode can restore needed registers when resuming execution
261 * after the call to thread_enter_user_mode() has returned.
262 * thread_enter_user_mode() doesn't return directly since it enters user
263 * mode instead, it's thread_unwind_user_mode() that does the
264 * returning by restoring the registers saved by thread_enter_user_mode().
265 *
266 * There's three ways for thread_enter_user_mode() to return to caller,
267 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
268 *
269 * Calls to _utee_return or _utee_panic are handled as:
270 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
271 * calls syscall_return() or syscall_panic().
272 *
273 * These function calls returns normally except thread_svc_handler() which
274 * which is an exception handling routine so it reads return address and
275 * SPSR to restore from the stack. syscall_return() and syscall_panic()
276 * changes return address and SPSR used by thread_svc_handler() to instead of
277 * returning into user mode as with other syscalls it returns into
278 * thread_unwind_user_mode() in kernel mode instead.  When
279 * thread_svc_handler() returns the stack pointer at the point where
280 * thread_enter_user_mode() left it so this is where
281 * thread_unwind_user_mode() can operate.
282 *
283 * Aborts are handled in a similar way but by thread_abort_handler()
284 * instead, when the pager sees that it's an abort from user mode that
285 * can't be handled it updates SPSR and return address used by
286 * thread_abort_handler() to return into thread_unwind_user_mode()
287 * instead.
288 */
289
290/*
291 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
292 *				     uint32_t *exit_status0,
293 *				     uint32_t *exit_status1);
294 *
295 * This function depends on being called with exceptions masked.
296 */
297FUNC __thread_enter_user_mode , :
298UNWIND(	.cantunwind)
299	/*
300	 * Save all registers to allow syscall_return() to resume execution
301	 * as if this function would have returned. This is also used in
302	 * syscall_panic().
303	 *
304	 * If stack usage of this function is changed
305	 * thread_unwind_user_mode() has to be updated.
306	 */
307	push    {r4-r12,lr}
308
309	/*
310	 * Save old user sp and set new user sp.
311	 */
312	cps	#CPSR_MODE_SYS
313	mov	r4, sp
314	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
315	cps	#CPSR_MODE_SVC
316
317	push	{r1, r2, r4, r5}
318
319	/* Prepare user mode entry via eret_to_user_mode */
320	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
321	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
322	msr     spsr_fsxc, r4
323
324	ldm	r0, {r0-r12}
325
326	b	eret_to_user_mode
327END_FUNC __thread_enter_user_mode
328
329/*
330 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
331 *              uint32_t exit_status1);
332 * See description in thread.h
333 */
334FUNC thread_unwind_user_mode , :
335UNWIND(	.cantunwind)
336	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
337	pop	{r4-r7}
338	str	r1, [r4]
339	str	r2, [r5]
340
341	/* Restore old user sp */
342	cps	#CPSR_MODE_SYS
343	mov	sp, r6
344	cps	#CPSR_MODE_SVC
345
346	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
347	pop     {r4-r12,pc}
348END_FUNC thread_unwind_user_mode
349
350	.macro maybe_restore_mapping
351		/*
352		 * This macro is a bit hard to read due to all the ifdefs,
353		 * we're testing for two different configs which makes four
354		 * different combinations.
355		 *
356		 * - With LPAE, and then some extra code if with
357		 *   CFG_CORE_UNMAP_CORE_AT_EL0
358		 * - Without LPAE, and then some extra code if with
359		 *   CFG_CORE_UNMAP_CORE_AT_EL0
360		 */
361
362		/*
363		 * At this point we can't rely on any memory being writable
364		 * yet, so we're using TPIDRPRW to store r0, and if with
365		 * LPAE TPIDRURO to store r1 too.
366		 */
367		write_tpidrprw r0
368#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
369		write_tpidruro r1
370#endif
371
372#ifdef CFG_WITH_LPAE
373		read_ttbr0_64bit r0, r1
374		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
375		beq	11f
376
377#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
378		/*
379		 * Update the mapping to use the full kernel mode mapping.
380		 * Since the translation table could reside above 4GB we'll
381		 * have to use 64-bit arithmetics.
382		 */
383		subs	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
384		sbc	r1, r1, #0
385#endif
386		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
387		write_ttbr0_64bit r0, r1
388		isb
389
390#else /*!CFG_WITH_LPAE*/
391		read_contextidr r0
392		tst	r0, #1
393		beq	11f
394
395		/* Update the mapping to use the full kernel mode mapping. */
396		bic	r0, r0, #1
397		write_contextidr r0
398		isb
399#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
400		read_ttbcr r0
401		bic	r0, r0, #TTBCR_PD1
402		write_ttbcr r0
403		isb
404#endif
405
406#endif /*!CFG_WITH_LPAE*/
407
408#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
409		ldr	r0, =thread_user_kcode_offset
410		ldr	r0, [r0]
411		read_vbar r1
412		add	r1, r1, r0
413		write_vbar r1
414		isb
415
416	11:	/*
417		 * The PC is adjusted unconditionally to guard against the
418		 * case there was an FIQ just before we did the "cpsid aif".
419		 */
420		ldr	r0, =22f
421		bx	r0
422	22:
423#else
424	11:
425#endif
426		read_tpidrprw r0
427#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
428		read_tpidruro r1
429#endif
430	.endm
431
432/* The handler of native interrupt. */
433.macro	native_intr_handler mode:req
434	cpsid	aif
435	maybe_restore_mapping
436
437	/*
438	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
439	 * address
440	 */
441	sub     lr, lr, #4
442
443	/*
444	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
445	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
446	 * because the secure monitor doesn't save those. The treatment of
447	 * the banked fiq registers is somewhat analogous to the lazy save
448	 * of VFP registers.
449	 */
450	.ifc	\mode\(),fiq
451	push	{r0-r3, r8-r12, lr}
452	.else
453	push	{r0-r3, r12, lr}
454	.endif
455
456	bl	thread_check_canaries
457	bl	itr_core_handler
458
459	mrs	r0, spsr
460	cmp_spsr_user_mode r0
461
462	.ifc	\mode\(),fiq
463	pop	{r0-r3, r8-r12, lr}
464	.else
465	pop	{r0-r3, r12, lr}
466	.endif
467
468	movsne	pc, lr
469	b	eret_to_user_mode
470.endm
471
472/* The handler of foreign interrupt. */
473.macro foreign_intr_handler mode:req
474	cpsid	aif
475	maybe_restore_mapping
476
477	sub	lr, lr, #4
478	push	{r12}
479
480	.ifc	\mode\(),fiq
481	/*
482	 * If a foreign (non-secure) interrupt is received as a FIQ we need
483	 * to check that we're in a saveable state or if we need to mask
484	 * the interrupt to be handled later.
485	 *
486	 * The window when this is needed is quite narrow, it's between
487	 * entering the exception vector and until the "cpsid" instruction
488	 * of the handler has been executed.
489	 *
490	 * Currently we can save the state properly if the FIQ is received
491	 * while in user or svc (kernel) mode.
492	 *
493	 * If we're returning to abort, undef or irq mode we're returning
494	 * with the mapping restored. This is OK since before the handler
495	 * we're returning to eventually returns to user mode the reduced
496	 * mapping will be restored.
497	 */
498	mrs	r12, spsr
499	and	r12, r12, #ARM32_CPSR_MODE_MASK
500	cmp	r12, #ARM32_CPSR_MODE_USR
501	cmpne	r12, #ARM32_CPSR_MODE_SVC
502	beq	1f
503	mrs	r12, spsr
504	orr	r12, r12, #ARM32_CPSR_F
505	msr	spsr_fsxc, r12
506	pop	{r12}
507	movs	pc, lr
5081:
509	.endif
510
511	push	{lr}
512
513	.ifc	\mode\(),fiq
514	bl	thread_save_state_fiq
515	.else
516	bl	thread_save_state
517	.endif
518
519#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
520	/*
521	 * Prevent leaking information about which entries has been used in
522	 * cache. We're relying on the secure monitor/dispatcher to take
523	 * care of the BTB.
524	 */
525	mov	r0, #DCACHE_OP_CLEAN_INV
526	bl	dcache_op_louis
527	write_iciallu
528#endif
529
530	/*
531	 * Use SP_abt to update core local flags.
532	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
533	 */
534	cps     #CPSR_MODE_ABT
535	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
536	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
537	orr     r1, r1, #THREAD_CLF_TMP
538	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
539	.ifc    \mode\(),fiq
540	cps     #CPSR_MODE_FIQ
541	.else
542	cps     #CPSR_MODE_IRQ
543	.endif
544
545	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
546	mrs	r1, spsr
547	pop	{r2}
548	pop	{r12}
549	blx	thread_state_suspend
550
551	/*
552	 * Switch to SVC mode and copy current stack pointer as it already
553	 * is the tmp stack.
554	 */
555	mov	r1, sp
556	cps	#CPSR_MODE_SVC
557	mov	sp, r1
558
559	/* Passing thread index in r0 */
560	b	thread_foreign_intr_exit
561.endm
562
563FUNC thread_excp_vect , :, align=32
564UNWIND(	.cantunwind)
565	b	.			/* Reset			*/
566	b	__thread_und_handler	/* Undefined instruction	*/
567	b	__thread_svc_handler	/* System call			*/
568	b	__thread_pabort_handler	/* Prefetch abort		*/
569	b	__thread_dabort_handler	/* Data abort			*/
570	b	.			/* Reserved			*/
571	b	__thread_irq_handler	/* IRQ				*/
572	b	__thread_fiq_handler	/* FIQ				*/
573#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
574	.macro vector_prologue_spectre
575		/*
576		 * This depends on SP being 8 byte aligned, that is, the
577		 * lowest three bits in SP are zero.
578		 *
579		 * To avoid unexpected speculation we need to invalidate
580		 * the branch predictor before we do the first branch. It
581		 * doesn't matter if it's a conditional or an unconditional
582		 * branch speculation can still occur.
583		 *
584		 * The idea is to form a specific bit pattern in the lowest
585		 * three bits of SP depending on which entry in the vector
586		 * we enter via.  This is done by adding 1 to SP in each
587		 * entry but the last.
588		 */
589		add	sp, sp, #1	/* 7:111 Reset			*/
590		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
591		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
592		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
593		add	sp, sp, #1	/* 3:011 Data abort		*/
594		add	sp, sp, #1	/* 2:010 Reserved		*/
595		add	sp, sp, #1	/* 1:001 IRQ			*/
596		cpsid   aif		/* 0:000 FIQ			*/
597	.endm
598
599        .balign	32
600	.global thread_excp_vect_wa_a15_spectre_v2
601thread_excp_vect_wa_a15_spectre_v2:
602	vector_prologue_spectre
603	write_tpidrprw r0
604	mrs	r0, spsr
605	cmp_spsr_user_mode r0
606	bne	1f
607	/*
608	 * Invalidate the branch predictor for the current processor.
609	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
610	 * effective.
611	 * Note that the BPIALL instruction is not effective in
612	 * invalidating the branch predictor on Cortex-A15. For that CPU,
613	 * set ACTLR[0] to 1 during early processor initialisation, and
614	 * invalidate the branch predictor by performing an ICIALLU
615	 * instruction. See also:
616	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
617	 */
618	write_iciallu
619	isb
620	b	1f
621
622        .balign	32
623	.global thread_excp_vect_wa_spectre_v2
624thread_excp_vect_wa_spectre_v2:
625	vector_prologue_spectre
626	write_tpidrprw r0
627	mrs	r0, spsr
628	cmp_spsr_user_mode r0
629	bne	1f
630	/* Invalidate the branch predictor for the current processor. */
631	write_bpiall
632	isb
633
6341:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
635	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
636	add	pc, pc, r0, LSL #3
637	nop
638
639	read_tpidrprw r0
640	b	__thread_fiq_handler	/* FIQ				*/
641	read_tpidrprw r0
642	b	__thread_irq_handler	/* IRQ				*/
643	read_tpidrprw r0
644	b	.			/* Reserved			*/
645	read_tpidrprw r0
646	b	__thread_dabort_handler	/* Data abort			*/
647	read_tpidrprw r0
648	b	__thread_pabort_handler	/* Prefetch abort		*/
649	read_tpidrprw r0
650	b	__thread_svc_handler	/* System call			*/
651	read_tpidrprw r0
652	b	__thread_und_handler	/* Undefined instruction	*/
653	read_tpidrprw r0
654	b	.			/* Reset			*/
655#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
656
657__thread_und_handler:
658	cpsid	aif
659	maybe_restore_mapping
660	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
661	mrs	r1, spsr
662	tst	r1, #CPSR_T
663	subne	lr, lr, #2
664	subeq	lr, lr, #4
665	mov	r0, #ABORT_TYPE_UNDEF
666	b	__thread_abort_common
667
668__thread_dabort_handler:
669	cpsid	aif
670	maybe_restore_mapping
671	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
672	sub	lr, lr, #8
673	mov	r0, #ABORT_TYPE_DATA
674	b	__thread_abort_common
675
676__thread_pabort_handler:
677	cpsid	aif
678	maybe_restore_mapping
679	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
680	sub	lr, lr, #4
681	mov	r0, #ABORT_TYPE_PREFETCH
682
683__thread_abort_common:
684	/*
685	 * At this label:
686	 * cpsr is in mode undef or abort
687	 * sp is still pointing to struct thread_core_local belonging to
688	 * this core.
689	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
690	 * {r2-r11, ip} are untouched.
691	 * r0 holds the first argument for abort_handler()
692	 */
693
694	/*
695	 * Update core local flags.
696	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
697	 */
698	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
699	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
700	orr	r1, r1, #THREAD_CLF_ABORT
701
702	/*
703	 * Select stack and update flags accordingly
704	 *
705	 * Normal case:
706	 * If the abort stack is unused select that.
707	 *
708	 * Fatal error handling:
709	 * If we're already using the abort stack as noted by bit
710	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
711	 * field we're selecting the temporary stack instead to be able to
712	 * make a stack trace of the abort in abort mode.
713	 *
714	 * r1 is initialized as a temporary stack pointer until we've
715	 * switched to system mode.
716	 */
717	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
718	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
719	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
720	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
721	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
722
723	/*
724	 * Store registers on stack fitting struct thread_abort_regs
725	 * start from the end of the struct
726	 * {r2-r11, ip}
727	 * Load content of previously saved {r0-r1} and stores
728	 * it up to the pad field.
729	 * After this is only {usr_sp, usr_lr} missing in the struct
730	 */
731	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
732	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
733	/* Push the original {r0-r1} on the selected stack */
734	stmdb	r1!, {r2-r3}
735	mrs	r3, spsr
736	/* Push {pad, spsr, elr} on the selected stack */
737	stmdb	r1!, {r2, r3, lr}
738
739	cps	#CPSR_MODE_SYS
740	str	lr, [r1, #-4]!
741	str	sp, [r1, #-4]!
742	mov	sp, r1
743
744	bl	abort_handler
745
746	mov	ip, sp
747	ldr	sp, [ip], #4
748	ldr	lr, [ip], #4
749
750	/*
751	 * Even if we entered via CPSR_MODE_UND, we are returning via
752	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
753	 * here.
754	 */
755	cps	#CPSR_MODE_ABT
756	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
757	msr	spsr_fsxc, r1
758
759	/* Update core local flags */
760	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
761	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
762	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
763
764	cmp_spsr_user_mode r1
765	ldm	ip, {r0-r11, ip}
766	movsne	pc, lr
767	b	eret_to_user_mode
768	/* end thread_abort_common */
769
770__thread_svc_handler:
771	cpsid	aif
772
773	maybe_restore_mapping
774
775	push	{r0-r7, lr}
776	mrs	r0, spsr
777	push	{r0}
778	mov	r0, sp
779	bl	thread_svc_handler
780	cpsid	aif	/* In case something was unmasked */
781	pop	{r0}
782	msr	spsr_fsxc, r0
783	cmp_spsr_user_mode r0
784	pop	{r0-r7, lr}
785	movsne	pc, lr
786	b	eret_to_user_mode
787	/* end thread_svc_handler */
788
789__thread_fiq_handler:
790#if defined(CFG_ARM_GICV3)
791	foreign_intr_handler	fiq
792#else
793	native_intr_handler	fiq
794#endif
795	/* end thread_fiq_handler */
796
797__thread_irq_handler:
798#if defined(CFG_ARM_GICV3)
799	native_intr_handler	irq
800#else
801	foreign_intr_handler	irq
802#endif
803	/* end thread_irq_handler */
804
805	/*
806	 * Returns to user mode.
807	 * Expects to be jumped to with lr pointing to the user space
808	 * address to jump to and spsr holding the desired cpsr. Async
809	 * abort, irq and fiq should be masked.
810	 */
811eret_to_user_mode:
812	write_tpidrprw r0
813#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
814	write_tpidruro r1
815#endif
816
817#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
818	ldr	r0, =thread_user_kcode_offset
819	ldr	r0, [r0]
820	read_vbar r1
821	sub	r1, r1, r0
822	write_vbar r1
823	isb
824
825	/* Jump into the reduced mapping before the full mapping is removed */
826	ldr	r1, =1f
827	sub	r1, r1, r0
828	bx	r1
8291:
830#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
831
832#ifdef CFG_WITH_LPAE
833	read_ttbr0_64bit r0, r1
834#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
835	add	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
836#endif
837	/* switch to user ASID */
838	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
839	write_ttbr0_64bit r0, r1
840	isb
841#else /*!CFG_WITH_LPAE*/
842#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
843	read_ttbcr r0
844	orr	r0, r0, #TTBCR_PD1
845	write_ttbcr r0
846	isb
847#endif
848	read_contextidr r0
849	orr	r0, r0, #BIT(0)
850	write_contextidr r0
851	isb
852#endif /*!CFG_WITH_LPAE*/
853
854	read_tpidrprw r0
855#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
856	read_tpidruro r1
857#endif
858
859	movs	pc, lr
860
861	/*
862	 * void icache_inv_user_range(void *addr, size_t size);
863	 *
864	 * This function has to execute with the user space ASID active,
865	 * this means executing with reduced mapping and the code needs
866	 * to be located here together with the vector.
867	 */
868	.global icache_inv_user_range
869	.type icache_inv_user_range , %function
870icache_inv_user_range:
871	push	{r4-r7}
872
873	/* Mask all exceptions */
874	mrs	r4, cpsr	/* This register must be preserved */
875	cpsid	aif
876
877#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
878	ldr	r2, =thread_user_kcode_offset
879	ldr	r2, [r2]
880	read_vbar r5		/* This register must be preserved */
881	sub	r3, r5, r2
882	write_vbar r3
883	isb
884
885	/* Jump into the reduced mapping before the full mapping is removed */
886	ldr	r3, =1f
887	sub	r3, r3, r2
888	bx	r3
8891:
890#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
891
892#ifdef CFG_WITH_LPAE
893	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
894	/* switch to user ASID */
895	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
896#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
897	add	r2, r6, #CORE_MMU_BASE_TABLE_OFFSET
898	write_ttbr0_64bit r2, r3
899#else
900	write_ttbr0_64bit r6, r3
901#endif
902	isb
903#else /*!CFG_WITH_LPAE*/
904#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
905	read_ttbcr r6	/* This register must be preserved */
906	orr	r2, r6, #TTBCR_PD1
907	write_ttbcr r2
908	isb
909#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
910	read_contextidr r7	/* This register must be preserved */
911	orr	r2, r7, #BIT(0)
912	write_contextidr r2
913	isb
914#endif /*!CFG_WITH_LPAE*/
915
916	/*
917	 * Do the actual icache invalidation
918	 */
919
920	/* Calculate minimum icache line size, result in r2 */
921	read_ctr r3
922	and     r3, r3, #CTR_IMINLINE_MASK
923	mov     r2, #CTR_WORD_SIZE
924	lsl     r2, r2, r3
925
926	add	r1, r0, r1
927	sub	r3, r2, #1
928	bic	r0, r0, r3
9291:
930	write_icimvau r0
931	add	r0, r0, r2
932	cmp	r0, r1
933	blo	1b
934
935	/* Invalidate entire branch predictor array inner shareable */
936	write_bpiallis
937
938	dsb	ishst
939	isb
940
941#ifdef CFG_WITH_LPAE
942	write_ttbr0_64bit r6, r7
943	isb
944#else /*!CFG_WITH_LPAE*/
945	write_contextidr r7
946	isb
947#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
948	write_ttbcr r6
949	isb
950#endif
951#endif /*!CFG_WITH_LPAE*/
952
953#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
954	write_vbar r5
955	isb
956	/*
957	 * The PC is adjusted unconditionally to guard against the
958	 * case there was an FIQ just before we did the "cpsid aif".
959	 */
960	ldr	r0, =1f
961	bx	r0
9621:
963#endif
964
965	msr	cpsr_fsxc, r4	/* Restore exceptions */
966	pop	{r4-r7}
967	bx	lr		/* End of icache_inv_user_range() */
968
969	/*
970	 * Make sure that literals are placed before the
971	 * thread_excp_vect_end label.
972	 */
973	.pool
974	.global thread_excp_vect_end
975thread_excp_vect_end:
976END_FUNC thread_excp_vect
977