xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 62590f087806970861736f2d980cf5de3f0f46ea)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <mm/core_mmu.h>
16
17#include "thread_private.h"
18
19	.syntax unified
20	.arch_extension sec
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30FUNC thread_set_abt_sp , :
31UNWIND(	.cantunwind)
32	mrs	r1, cpsr
33	cps	#CPSR_MODE_ABT
34	mov	sp, r0
35	msr	cpsr, r1
36	bx	lr
37END_FUNC thread_set_abt_sp
38
39FUNC thread_set_und_sp , :
40UNWIND(	.cantunwind)
41	mrs	r1, cpsr
42	cps	#CPSR_MODE_UND
43	mov	sp, r0
44	msr	cpsr, r1
45	bx	lr
46END_FUNC thread_set_und_sp
47
48FUNC thread_set_irq_sp , :
49UNWIND(	.cantunwind)
50	mrs	r1, cpsr
51	cps	#CPSR_MODE_IRQ
52	mov	sp, r0
53	msr	cpsr, r1
54	bx	lr
55END_FUNC thread_set_irq_sp
56
57FUNC thread_set_fiq_sp , :
58UNWIND(	.cantunwind)
59	mrs	r1, cpsr
60	cps	#CPSR_MODE_FIQ
61	mov	sp, r0
62	msr	cpsr, r1
63	bx	lr
64END_FUNC thread_set_fiq_sp
65
66FUNC thread_get_usr_sp , :
67	mrs	r1, cpsr
68	cpsid	aif
69	cps	#CPSR_MODE_SYS
70	mov	r0, sp
71	msr	cpsr, r1
72	bx	lr
73END_FUNC thread_get_usr_sp
74
75/* void thread_resume(struct thread_ctx_regs *regs) */
76FUNC thread_resume , :
77UNWIND(	.cantunwind)
78	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
79
80	cps	#CPSR_MODE_SYS
81	ldr	sp, [r12], #4
82	ldr	lr, [r12], #4
83
84	cps	#CPSR_MODE_SVC
85	ldr	r1, [r12], #4
86	ldr	sp, [r12], #4
87	ldr	lr, [r12], #4
88	msr	spsr_fsxc, r1
89
90	ldm	r12, {r1, r2}
91
92	/*
93	 * Switching to some other mode than SVC as we need to set spsr in
94	 * order to return into the old state properly and it may be SVC
95	 * mode we're returning to.
96	 */
97	cps	#CPSR_MODE_ABT
98	cmp_spsr_user_mode r2
99	mov	lr, r1
100	msr	spsr_fsxc, r2
101	ldm	r0, {r0-r12}
102	movsne	pc, lr
103	b	eret_to_user_mode
104END_FUNC thread_resume
105
106/*
107 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
108 * the banked r8-r12 registers, returns original CPSR.
109 */
110LOCAL_FUNC thread_save_state_fiq , :
111UNWIND(	.cantunwind)
112	mov	r9, lr
113
114	/*
115	 * Uses stack for temporary storage, while storing needed
116	 * context in the thread context struct.
117	 */
118
119	mrs	r8, cpsr
120
121	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
122
123	push	{r4-r7}
124	push	{r0-r3}
125
126	mrs	r6, cpsr		/* Save current CPSR */
127
128	bl	thread_get_ctx_regs
129
130	pop	{r1-r4}			/* r0-r3 pushed above */
131	stm	r0!, {r1-r4}
132	pop	{r1-r4}			/* r4-r7 pushed above */
133	stm	r0!, {r1-r4}
134
135	cps     #CPSR_MODE_SYS
136	stm	r0!, {r8-r12}
137	str	sp, [r0], #4
138	str	lr, [r0], #4
139
140	cps     #CPSR_MODE_SVC
141	mrs     r1, spsr
142	str	r1, [r0], #4
143	str	sp, [r0], #4
144	str	lr, [r0], #4
145
146	/* back to fiq mode */
147	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
148	msr	cpsr, r6		/* Restore mode */
149
150	mov	r0, r8			/* Return original CPSR */
151	bx	r9
152END_FUNC thread_save_state_fiq
153
154/*
155 * Disables IRQ and FIQ and saves state of thread, returns original
156 * CPSR.
157 */
158FUNC thread_save_state , :
159UNWIND(	.cantunwind)
160	push	{r12, lr}
161	/*
162	 * Uses stack for temporary storage, while storing needed
163	 * context in the thread context struct.
164	 */
165
166	mrs	r12, cpsr
167
168	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
169
170	push	{r4-r7}
171	push	{r0-r3}
172
173	mov	r5, r12			/* Save CPSR in a preserved register */
174	mrs	r6, cpsr		/* Save current CPSR */
175
176	bl	thread_get_ctx_regs
177
178	pop	{r1-r4}			/* r0-r3 pushed above */
179	stm	r0!, {r1-r4}
180	pop	{r1-r4}			/* r4-r7 pushed above */
181	stm	r0!, {r1-r4}
182	stm	r0!, {r8-r11}
183
184	pop	{r12, lr}
185	stm	r0!, {r12}
186
187        cps     #CPSR_MODE_SYS
188	str	sp, [r0], #4
189	str	lr, [r0], #4
190
191        cps     #CPSR_MODE_SVC
192        mrs     r1, spsr
193	str	r1, [r0], #4
194	str	sp, [r0], #4
195	str	lr, [r0], #4
196
197	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
198	msr	cpsr, r6		/* Restore mode */
199
200	mov	r0, r5			/* Return original CPSR */
201	bx	lr
202END_FUNC thread_save_state
203
204/*
205 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
206 *			    unsigned long a2, unsigned long a3)
207 */
208FUNC thread_smc , :
209	smc	#0
210	bx	lr
211END_FUNC thread_smc
212
213/* void thread_smccc(struct thread_smc_args *arg_res) */
214FUNC thread_smccc , :
215	push	{r4-r7}
216	push	{r0, lr}
217	ldm	r0, {r0-r7}
218#ifdef CFG_CORE_SEL2_SPMC
219	hvc	#0
220#else
221	smc	#0
222#endif
223	pop	{r12, lr}
224	stm	r12, {r0-r7}
225	pop	{r4-r7}
226	bx	lr
227END_FUNC thread_smccc
228
229FUNC thread_init_vbar , :
230	/* Set vector (VBAR) */
231	write_vbar r0
232	bx	lr
233END_FUNC thread_init_vbar
234DECLARE_KEEP_PAGER thread_init_vbar
235
236/*
237 * Below are low level routines handling entry and return from user mode.
238 *
239 * thread_enter_user_mode() saves all that registers user mode can change
240 * so kernel mode can restore needed registers when resuming execution
241 * after the call to thread_enter_user_mode() has returned.
242 * thread_enter_user_mode() doesn't return directly since it enters user
243 * mode instead, it's thread_unwind_user_mode() that does the
244 * returning by restoring the registers saved by thread_enter_user_mode().
245 *
246 * There's three ways for thread_enter_user_mode() to return to caller,
247 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
248 *
249 * Calls to _utee_return or _utee_panic are handled as:
250 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
251 * calls syscall_return() or syscall_panic().
252 *
253 * These function calls returns normally except thread_svc_handler() which
254 * which is an exception handling routine so it reads return address and
255 * SPSR to restore from the stack. syscall_return() and syscall_panic()
256 * changes return address and SPSR used by thread_svc_handler() to instead of
257 * returning into user mode as with other syscalls it returns into
258 * thread_unwind_user_mode() in kernel mode instead.  When
259 * thread_svc_handler() returns the stack pointer at the point where
260 * thread_enter_user_mode() left it so this is where
261 * thread_unwind_user_mode() can operate.
262 *
263 * Aborts are handled in a similar way but by thread_abort_handler()
264 * instead, when the pager sees that it's an abort from user mode that
265 * can't be handled it updates SPSR and return address used by
266 * thread_abort_handler() to return into thread_unwind_user_mode()
267 * instead.
268 */
269
270/*
271 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
272 *				     uint32_t *exit_status0,
273 *				     uint32_t *exit_status1);
274 *
275 * This function depends on being called with exceptions masked.
276 */
277FUNC __thread_enter_user_mode , :
278UNWIND(	.cantunwind)
279	/*
280	 * Save all registers to allow syscall_return() to resume execution
281	 * as if this function would have returned. This is also used in
282	 * syscall_panic().
283	 *
284	 * If stack usage of this function is changed
285	 * thread_unwind_user_mode() has to be updated.
286	 */
287	push    {r4-r12,lr}
288
289	/*
290	 * Save old user sp and set new user sp.
291	 */
292	cps	#CPSR_MODE_SYS
293	mov	r4, sp
294	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
295	cps	#CPSR_MODE_SVC
296
297	push	{r1, r2, r4, r5}
298
299	/* Prepare user mode entry via eret_to_user_mode */
300	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
301	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
302	msr     spsr_fsxc, r4
303
304	ldm	r0, {r0-r12}
305
306	b	eret_to_user_mode
307END_FUNC __thread_enter_user_mode
308
309/*
310 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
311 *              uint32_t exit_status1);
312 * See description in thread.h
313 */
314FUNC thread_unwind_user_mode , :
315UNWIND(	.cantunwind)
316	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
317	pop	{r4-r7}
318	str	r1, [r4]
319	str	r2, [r5]
320
321	/* Restore old user sp */
322	cps	#CPSR_MODE_SYS
323	mov	sp, r6
324	cps	#CPSR_MODE_SVC
325
326	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
327	pop     {r4-r12,pc}
328END_FUNC thread_unwind_user_mode
329
330	.macro maybe_restore_mapping
331		/*
332		 * This macro is a bit hard to read due to all the ifdefs,
333		 * we're testing for two different configs which makes four
334		 * different combinations.
335		 *
336		 * - With LPAE, and then some extra code if with
337		 *   CFG_CORE_UNMAP_CORE_AT_EL0
338		 * - Without LPAE, and then some extra code if with
339		 *   CFG_CORE_UNMAP_CORE_AT_EL0
340		 */
341
342		/*
343		 * At this point we can't rely on any memory being writable
344		 * yet, so we're using TPIDRPRW to store r0, and if with
345		 * LPAE TPIDRURO to store r1 too.
346		 */
347		write_tpidrprw r0
348#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
349		write_tpidruro r1
350#endif
351
352#ifdef CFG_WITH_LPAE
353		read_ttbr0_64bit r0, r1
354		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
355		beq	11f
356
357#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
358		/*
359		 * Update the mapping to use the full kernel mode mapping.
360		 * Since the translation table could reside above 4GB we'll
361		 * have to use 64-bit arithmetics.
362		 */
363		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
364		sbc	r1, r1, #0
365#endif
366		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
367		write_ttbr0_64bit r0, r1
368		isb
369
370#else /*!CFG_WITH_LPAE*/
371		read_contextidr r0
372		tst	r0, #1
373		beq	11f
374
375		/* Update the mapping to use the full kernel mode mapping. */
376		bic	r0, r0, #1
377		write_contextidr r0
378		isb
379#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
380		read_ttbcr r0
381		bic	r0, r0, #TTBCR_PD1
382		write_ttbcr r0
383		isb
384#endif
385
386#endif /*!CFG_WITH_LPAE*/
387
388#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
389		ldr	r0, =thread_user_kcode_offset
390		ldr	r0, [r0]
391		read_vbar r1
392		add	r1, r1, r0
393		write_vbar r1
394		isb
395
396	11:	/*
397		 * The PC is adjusted unconditionally to guard against the
398		 * case there was an FIQ just before we did the "cpsid aif".
399		 */
400		ldr	r0, =22f
401		bx	r0
402	22:
403#else
404	11:
405#endif
406		read_tpidrprw r0
407#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
408		read_tpidruro r1
409#endif
410	.endm
411
412/* The handler of native interrupt. */
413.macro	native_intr_handler mode:req
414	cpsid	aif
415	maybe_restore_mapping
416
417	/*
418	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
419	 * address
420	 */
421	sub     lr, lr, #4
422
423	/*
424	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
425	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
426	 * because the secure monitor doesn't save those. The treatment of
427	 * the banked fiq registers is somewhat analogous to the lazy save
428	 * of VFP registers.
429	 */
430	.ifc	\mode\(),fiq
431	push	{r0-r3, r8-r12, lr}
432	.else
433	push	{r0-r3, r12, lr}
434	.endif
435
436	bl	thread_check_canaries
437	bl	itr_core_handler
438
439	mrs	r0, spsr
440	cmp_spsr_user_mode r0
441
442	.ifc	\mode\(),fiq
443	pop	{r0-r3, r8-r12, lr}
444	.else
445	pop	{r0-r3, r12, lr}
446	.endif
447
448	movsne	pc, lr
449	b	eret_to_user_mode
450.endm
451
452/* The handler of foreign interrupt. */
453.macro foreign_intr_handler mode:req
454	cpsid	aif
455	maybe_restore_mapping
456
457	sub	lr, lr, #4
458	push	{r12}
459
460	.ifc	\mode\(),fiq
461	/*
462	 * If a foreign (non-secure) interrupt is received as a FIQ we need
463	 * to check that we're in a saveable state or if we need to mask
464	 * the interrupt to be handled later.
465	 *
466	 * The window when this is needed is quite narrow, it's between
467	 * entering the exception vector and until the "cpsid" instruction
468	 * of the handler has been executed.
469	 *
470	 * Currently we can save the state properly if the FIQ is received
471	 * while in user or svc (kernel) mode.
472	 *
473	 * If we're returning to abort, undef or irq mode we're returning
474	 * with the mapping restored. This is OK since before the handler
475	 * we're returning to eventually returns to user mode the reduced
476	 * mapping will be restored.
477	 */
478	mrs	r12, spsr
479	and	r12, r12, #ARM32_CPSR_MODE_MASK
480	cmp	r12, #ARM32_CPSR_MODE_USR
481	cmpne	r12, #ARM32_CPSR_MODE_SVC
482	beq	1f
483	mrs	r12, spsr
484	orr	r12, r12, #ARM32_CPSR_F
485	msr	spsr_fsxc, r12
486	pop	{r12}
487	movs	pc, lr
4881:
489	.endif
490
491	push	{lr}
492
493	.ifc	\mode\(),fiq
494	bl	thread_save_state_fiq
495	.else
496	bl	thread_save_state
497	.endif
498
499#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
500	/*
501	 * Prevent leaking information about which entries has been used in
502	 * cache. We're relying on the secure monitor/dispatcher to take
503	 * care of the BTB.
504	 */
505	mov	r0, #DCACHE_OP_CLEAN_INV
506	bl	dcache_op_louis
507	write_iciallu
508#endif
509
510	/*
511	 * Use SP_abt to update core local flags.
512	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
513	 */
514	cps     #CPSR_MODE_ABT
515	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
516	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
517	orr     r1, r1, #THREAD_CLF_TMP
518	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
519	.ifc    \mode\(),fiq
520	cps     #CPSR_MODE_FIQ
521	.else
522	cps     #CPSR_MODE_IRQ
523	.endif
524
525	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
526	mrs	r1, spsr
527	pop	{r2}
528	pop	{r12}
529	blx	thread_state_suspend
530
531	/*
532	 * Switch to SVC mode and copy current stack pointer as it already
533	 * is the tmp stack.
534	 */
535	mov	r1, sp
536	cps	#CPSR_MODE_SVC
537	mov	sp, r1
538
539	/* Passing thread index in r0 */
540	b	thread_foreign_intr_exit
541.endm
542
543FUNC thread_excp_vect , :, align=32
544UNWIND(	.cantunwind)
545	b	.			/* Reset			*/
546	b	__thread_und_handler	/* Undefined instruction	*/
547	b	__thread_svc_handler	/* System call			*/
548	b	__thread_pabort_handler	/* Prefetch abort		*/
549	b	__thread_dabort_handler	/* Data abort			*/
550	b	.			/* Reserved			*/
551	b	__thread_irq_handler	/* IRQ				*/
552	b	__thread_fiq_handler	/* FIQ				*/
553#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
554	.macro vector_prologue_spectre
555		/*
556		 * This depends on SP being 8 byte aligned, that is, the
557		 * lowest three bits in SP are zero.
558		 *
559		 * To avoid unexpected speculation we need to invalidate
560		 * the branch predictor before we do the first branch. It
561		 * doesn't matter if it's a conditional or an unconditional
562		 * branch speculation can still occur.
563		 *
564		 * The idea is to form a specific bit pattern in the lowest
565		 * three bits of SP depending on which entry in the vector
566		 * we enter via.  This is done by adding 1 to SP in each
567		 * entry but the last.
568		 */
569		add	sp, sp, #1	/* 7:111 Reset			*/
570		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
571		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
572		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
573		add	sp, sp, #1	/* 3:011 Data abort		*/
574		add	sp, sp, #1	/* 2:010 Reserved		*/
575		add	sp, sp, #1	/* 1:001 IRQ			*/
576		cpsid   aif		/* 0:000 FIQ			*/
577	.endm
578
579        .balign	32
580	.global thread_excp_vect_workaround_a15
581thread_excp_vect_workaround_a15:
582	vector_prologue_spectre
583	write_tpidrprw r0
584	mrs	r0, spsr
585	cmp_spsr_user_mode r0
586	bne	1f
587	/*
588	 * Invalidate the branch predictor for the current processor.
589	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
590	 * effective.
591	 * Note that the BPIALL instruction is not effective in
592	 * invalidating the branch predictor on Cortex-A15. For that CPU,
593	 * set ACTLR[0] to 1 during early processor initialisation, and
594	 * invalidate the branch predictor by performing an ICIALLU
595	 * instruction. See also:
596	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
597	 */
598	write_iciallu
599	isb
600	b	1f
601
602        .balign	32
603	.global thread_excp_vect_workaround
604thread_excp_vect_workaround:
605	vector_prologue_spectre
606	write_tpidrprw r0
607	mrs	r0, spsr
608	cmp_spsr_user_mode r0
609	bne	1f
610	/* Invalidate the branch predictor for the current processor. */
611	write_bpiall
612	isb
613
6141:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
615	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
616	add	pc, pc, r0, LSL #3
617	nop
618
619	read_tpidrprw r0
620	b	__thread_fiq_handler	/* FIQ				*/
621	read_tpidrprw r0
622	b	__thread_irq_handler	/* IRQ				*/
623	read_tpidrprw r0
624	b	.			/* Reserved			*/
625	read_tpidrprw r0
626	b	__thread_dabort_handler	/* Data abort			*/
627	read_tpidrprw r0
628	b	__thread_pabort_handler	/* Prefetch abort		*/
629	read_tpidrprw r0
630	b	__thread_svc_handler	/* System call			*/
631	read_tpidrprw r0
632	b	__thread_und_handler	/* Undefined instruction	*/
633	read_tpidrprw r0
634	b	.			/* Reset			*/
635#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
636
637__thread_und_handler:
638	cpsid	aif
639	maybe_restore_mapping
640	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
641	mrs	r1, spsr
642	tst	r1, #CPSR_T
643	subne	lr, lr, #2
644	subeq	lr, lr, #4
645	mov	r0, #ABORT_TYPE_UNDEF
646	b	__thread_abort_common
647
648__thread_dabort_handler:
649	cpsid	aif
650	maybe_restore_mapping
651	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
652	sub	lr, lr, #8
653	mov	r0, #ABORT_TYPE_DATA
654	b	__thread_abort_common
655
656__thread_pabort_handler:
657	cpsid	aif
658	maybe_restore_mapping
659	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
660	sub	lr, lr, #4
661	mov	r0, #ABORT_TYPE_PREFETCH
662
663__thread_abort_common:
664	/*
665	 * At this label:
666	 * cpsr is in mode undef or abort
667	 * sp is still pointing to struct thread_core_local belonging to
668	 * this core.
669	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
670	 * {r2-r11, ip} are untouched.
671	 * r0 holds the first argument for abort_handler()
672	 */
673
674	/*
675	 * Update core local flags.
676	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
677	 */
678	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
679	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
680	orr	r1, r1, #THREAD_CLF_ABORT
681
682	/*
683	 * Select stack and update flags accordingly
684	 *
685	 * Normal case:
686	 * If the abort stack is unused select that.
687	 *
688	 * Fatal error handling:
689	 * If we're already using the abort stack as noted by bit
690	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
691	 * field we're selecting the temporary stack instead to be able to
692	 * make a stack trace of the abort in abort mode.
693	 *
694	 * r1 is initialized as a temporary stack pointer until we've
695	 * switched to system mode.
696	 */
697	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
698	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
699	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
700	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
701	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
702
703	/*
704	 * Store registers on stack fitting struct thread_abort_regs
705	 * start from the end of the struct
706	 * {r2-r11, ip}
707	 * Load content of previously saved {r0-r1} and stores
708	 * it up to the pad field.
709	 * After this is only {usr_sp, usr_lr} missing in the struct
710	 */
711	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
712	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
713	/* Push the original {r0-r1} on the selected stack */
714	stmdb	r1!, {r2-r3}
715	mrs	r3, spsr
716	/* Push {pad, spsr, elr} on the selected stack */
717	stmdb	r1!, {r2, r3, lr}
718
719	cps	#CPSR_MODE_SYS
720	str	lr, [r1, #-4]!
721	str	sp, [r1, #-4]!
722	mov	sp, r1
723
724	bl	abort_handler
725
726	mov	ip, sp
727	ldr	sp, [ip], #4
728	ldr	lr, [ip], #4
729
730	/*
731	 * Even if we entered via CPSR_MODE_UND, we are returning via
732	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
733	 * here.
734	 */
735	cps	#CPSR_MODE_ABT
736	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
737	msr	spsr_fsxc, r1
738
739	/* Update core local flags */
740	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
741	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
742	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
743
744	cmp_spsr_user_mode r1
745	ldm	ip, {r0-r11, ip}
746	movsne	pc, lr
747	b	eret_to_user_mode
748	/* end thread_abort_common */
749
750__thread_svc_handler:
751	cpsid	aif
752
753	maybe_restore_mapping
754
755	push	{r0-r7, lr}
756	mrs	r0, spsr
757	push	{r0}
758	mov	r0, sp
759	bl	thread_svc_handler
760	cpsid	aif	/* In case something was unmasked */
761	pop	{r0}
762	msr	spsr_fsxc, r0
763	cmp_spsr_user_mode r0
764	pop	{r0-r7, lr}
765	movsne	pc, lr
766	b	eret_to_user_mode
767	/* end thread_svc_handler */
768
769__thread_fiq_handler:
770#if defined(CFG_ARM_GICV3)
771	foreign_intr_handler	fiq
772#else
773	native_intr_handler	fiq
774#endif
775	/* end thread_fiq_handler */
776
777__thread_irq_handler:
778#if defined(CFG_ARM_GICV3)
779	native_intr_handler	irq
780#else
781	foreign_intr_handler	irq
782#endif
783	/* end thread_irq_handler */
784
785	/*
786	 * Returns to user mode.
787	 * Expects to be jumped to with lr pointing to the user space
788	 * address to jump to and spsr holding the desired cpsr. Async
789	 * abort, irq and fiq should be masked.
790	 */
791eret_to_user_mode:
792	write_tpidrprw r0
793#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
794	write_tpidruro r1
795#endif
796
797#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
798	ldr	r0, =thread_user_kcode_offset
799	ldr	r0, [r0]
800	read_vbar r1
801	sub	r1, r1, r0
802	write_vbar r1
803	isb
804
805	/* Jump into the reduced mapping before the full mapping is removed */
806	ldr	r1, =1f
807	sub	r1, r1, r0
808	bx	r1
8091:
810#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
811
812#ifdef CFG_WITH_LPAE
813	read_ttbr0_64bit r0, r1
814#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
815	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
816#endif
817	/* switch to user ASID */
818	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
819	write_ttbr0_64bit r0, r1
820	isb
821#else /*!CFG_WITH_LPAE*/
822#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
823	read_ttbcr r0
824	orr	r0, r0, #TTBCR_PD1
825	write_ttbcr r0
826	isb
827#endif
828	read_contextidr r0
829	orr	r0, r0, #BIT(0)
830	write_contextidr r0
831	isb
832#endif /*!CFG_WITH_LPAE*/
833
834	read_tpidrprw r0
835#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
836	read_tpidruro r1
837#endif
838
839	movs	pc, lr
840
841	/*
842	 * void icache_inv_user_range(void *addr, size_t size);
843	 *
844	 * This function has to execute with the user space ASID active,
845	 * this means executing with reduced mapping and the code needs
846	 * to be located here together with the vector.
847	 */
848	.global icache_inv_user_range
849	.type icache_inv_user_range , %function
850icache_inv_user_range:
851	push	{r4-r7}
852
853	/* Mask all exceptions */
854	mrs	r4, cpsr	/* This register must be preserved */
855	cpsid	aif
856
857#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
858	ldr	r2, =thread_user_kcode_offset
859	ldr	r2, [r2]
860	read_vbar r5		/* This register must be preserved */
861	sub	r3, r5, r2
862	write_vbar r3
863	isb
864
865	/* Jump into the reduced mapping before the full mapping is removed */
866	ldr	r3, =1f
867	sub	r3, r3, r2
868	bx	r3
8691:
870#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
871
872#ifdef CFG_WITH_LPAE
873	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
874	/* switch to user ASID */
875	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
876#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
877	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
878	write_ttbr0_64bit r2, r3
879#else
880	write_ttbr0_64bit r6, r3
881#endif
882	isb
883#else /*!CFG_WITH_LPAE*/
884#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
885	read_ttbcr r6	/* This register must be preserved */
886	orr	r2, r6, #TTBCR_PD1
887	write_ttbcr r2
888	isb
889#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
890	read_contextidr r7	/* This register must be preserved */
891	orr	r2, r7, #BIT(0)
892	write_contextidr r2
893	isb
894#endif /*!CFG_WITH_LPAE*/
895
896	/*
897	 * Do the actual icache invalidation
898	 */
899
900	/* Calculate minimum icache line size, result in r2 */
901	read_ctr r3
902	and     r3, r3, #CTR_IMINLINE_MASK
903	mov     r2, #CTR_WORD_SIZE
904	lsl     r2, r2, r3
905
906	add	r1, r0, r1
907	sub	r3, r2, #1
908	bic	r0, r0, r3
9091:
910	write_icimvau r0
911	add	r0, r0, r2
912	cmp	r0, r1
913	blo	1b
914
915	/* Invalidate entire branch predictor array inner shareable */
916	write_bpiallis
917
918	dsb	ishst
919	isb
920
921#ifdef CFG_WITH_LPAE
922	write_ttbr0_64bit r6, r7
923	isb
924#else /*!CFG_WITH_LPAE*/
925	write_contextidr r7
926	isb
927#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
928	write_ttbcr r6
929	isb
930#endif
931#endif /*!CFG_WITH_LPAE*/
932
933#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
934	write_vbar r5
935	isb
936	/*
937	 * The PC is adjusted unconditionally to guard against the
938	 * case there was an FIQ just before we did the "cpsid aif".
939	 */
940	ldr	r0, =1f
941	bx	r0
9421:
943#endif
944
945	msr	cpsr_fsxc, r4	/* Restore exceptions */
946	pop	{r4-r7}
947	bx	lr		/* End of icache_inv_user_range() */
948
949	/*
950	 * Make sure that literals are placed before the
951	 * thread_excp_vect_end label.
952	 */
953	.pool
954	.global thread_excp_vect_end
955thread_excp_vect_end:
956END_FUNC thread_excp_vect
957