xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 8411e6ad673d20c4742ed30c785e3f5cdea54dfa)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17
18	.syntax unified
19	.arch_extension sec
20
21	.macro cmp_spsr_user_mode reg:req
22		/*
23		 * We're only testing the lower 4 bits as bit 5 (0x10)
24		 * always is set.
25		 */
26		tst	\reg, #0x0f
27	.endm
28
29FUNC thread_set_abt_sp , :
30UNWIND(	.cantunwind)
31	mrs	r1, cpsr
32	cps	#CPSR_MODE_ABT
33	mov	sp, r0
34	msr	cpsr, r1
35	bx	lr
36END_FUNC thread_set_abt_sp
37
38FUNC thread_set_und_sp , :
39UNWIND(	.cantunwind)
40	mrs	r1, cpsr
41	cps	#CPSR_MODE_UND
42	mov	sp, r0
43	msr	cpsr, r1
44	bx	lr
45END_FUNC thread_set_und_sp
46
47FUNC thread_set_irq_sp , :
48UNWIND(	.cantunwind)
49	mrs	r1, cpsr
50	cps	#CPSR_MODE_IRQ
51	mov	sp, r0
52	msr	cpsr, r1
53	bx	lr
54END_FUNC thread_set_irq_sp
55
56FUNC thread_set_fiq_sp , :
57UNWIND(	.cantunwind)
58	mrs	r1, cpsr
59	cps	#CPSR_MODE_FIQ
60	mov	sp, r0
61	msr	cpsr, r1
62	bx	lr
63END_FUNC thread_set_fiq_sp
64
65FUNC thread_get_usr_sp , :
66	mrs	r1, cpsr
67	cpsid	aif
68	cps	#CPSR_MODE_SYS
69	mov	r0, sp
70	msr	cpsr, r1
71	bx	lr
72END_FUNC thread_get_usr_sp
73
74/* void thread_resume(struct thread_ctx_regs *regs) */
75FUNC thread_resume , :
76UNWIND(	.cantunwind)
77	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
78
79	cps	#CPSR_MODE_SYS
80	ldr	sp, [r12], #4
81	ldr	lr, [r12], #4
82
83	cps	#CPSR_MODE_SVC
84	ldr	r1, [r12], #4
85	ldr	sp, [r12], #4
86	ldr	lr, [r12], #4
87	msr	spsr_fsxc, r1
88
89	ldm	r12, {r1, r2}
90
91	/*
92	 * Switching to some other mode than SVC as we need to set spsr in
93	 * order to return into the old state properly and it may be SVC
94	 * mode we're returning to.
95	 */
96	cps	#CPSR_MODE_ABT
97	cmp_spsr_user_mode r2
98	mov	lr, r1
99	msr	spsr_fsxc, r2
100	ldm	r0, {r0-r12}
101	movsne	pc, lr
102	b	eret_to_user_mode
103END_FUNC thread_resume
104
105/*
106 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
107 * the banked r8-r12 registers, returns original CPSR.
108 */
109LOCAL_FUNC thread_save_state_fiq , :
110UNWIND(	.cantunwind)
111	mov	r9, lr
112
113	/*
114	 * Uses stack for temporary storage, while storing needed
115	 * context in the thread context struct.
116	 */
117
118	mrs	r8, cpsr
119
120	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
121
122	push	{r4-r7}
123	push	{r0-r3}
124
125	mrs	r6, cpsr		/* Save current CPSR */
126
127	bl	thread_get_ctx_regs
128
129	pop	{r1-r4}			/* r0-r3 pushed above */
130	stm	r0!, {r1-r4}
131	pop	{r1-r4}			/* r4-r7 pushed above */
132	stm	r0!, {r1-r4}
133
134	cps     #CPSR_MODE_SYS
135	stm	r0!, {r8-r12}
136	str	sp, [r0], #4
137	str	lr, [r0], #4
138
139	cps     #CPSR_MODE_SVC
140	mrs     r1, spsr
141	str	r1, [r0], #4
142	str	sp, [r0], #4
143	str	lr, [r0], #4
144
145	/* back to fiq mode */
146	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
147	msr	cpsr, r6		/* Restore mode */
148
149	mov	r0, r8			/* Return original CPSR */
150	bx	r9
151END_FUNC thread_save_state_fiq
152
153/*
154 * Disables IRQ and FIQ and saves state of thread, returns original
155 * CPSR.
156 */
157FUNC thread_save_state , :
158UNWIND(	.cantunwind)
159	push	{r12, lr}
160	/*
161	 * Uses stack for temporary storage, while storing needed
162	 * context in the thread context struct.
163	 */
164
165	mrs	r12, cpsr
166
167	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
168
169	push	{r4-r7}
170	push	{r0-r3}
171
172	mov	r5, r12			/* Save CPSR in a preserved register */
173	mrs	r6, cpsr		/* Save current CPSR */
174
175	bl	thread_get_ctx_regs
176
177	pop	{r1-r4}			/* r0-r3 pushed above */
178	stm	r0!, {r1-r4}
179	pop	{r1-r4}			/* r4-r7 pushed above */
180	stm	r0!, {r1-r4}
181	stm	r0!, {r8-r11}
182
183	pop	{r12, lr}
184	stm	r0!, {r12}
185
186        cps     #CPSR_MODE_SYS
187	str	sp, [r0], #4
188	str	lr, [r0], #4
189
190        cps     #CPSR_MODE_SVC
191        mrs     r1, spsr
192	str	r1, [r0], #4
193	str	sp, [r0], #4
194	str	lr, [r0], #4
195
196	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
197	msr	cpsr, r6		/* Restore mode */
198
199	mov	r0, r5			/* Return original CPSR */
200	bx	lr
201END_FUNC thread_save_state
202
203/*
204 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
205 *			    unsigned long a2, unsigned long a3)
206 */
207FUNC thread_smc , :
208	push	{r4-r7}
209UNWIND(	.save	{r4-r7})
210	smc	#0
211	pop	{r4-r7}
212	bx	lr
213END_FUNC thread_smc
214
215/* void thread_smccc(struct thread_smc_args *arg_res) */
216FUNC thread_smccc , :
217	push	{r4-r7}
218	push	{r0, lr}
219	ldm	r0, {r0-r7}
220#ifdef CFG_CORE_SEL2_SPMC
221	hvc	#0
222#else
223	smc	#0
224#endif
225	pop	{r12, lr}
226	stm	r12, {r0-r7}
227	pop	{r4-r7}
228	bx	lr
229END_FUNC thread_smccc
230
231FUNC thread_init_vbar , :
232	/* Set vector (VBAR) */
233	write_vbar r0
234	bx	lr
235END_FUNC thread_init_vbar
236DECLARE_KEEP_PAGER thread_init_vbar
237
238/*
239 * Below are low level routines handling entry and return from user mode.
240 *
241 * thread_enter_user_mode() saves all that registers user mode can change
242 * so kernel mode can restore needed registers when resuming execution
243 * after the call to thread_enter_user_mode() has returned.
244 * thread_enter_user_mode() doesn't return directly since it enters user
245 * mode instead, it's thread_unwind_user_mode() that does the
246 * returning by restoring the registers saved by thread_enter_user_mode().
247 *
248 * There's three ways for thread_enter_user_mode() to return to caller,
249 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
250 *
251 * Calls to _utee_return or _utee_panic are handled as:
252 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
253 * calls syscall_return() or syscall_panic().
254 *
255 * These function calls returns normally except thread_svc_handler() which
256 * which is an exception handling routine so it reads return address and
257 * SPSR to restore from the stack. syscall_return() and syscall_panic()
258 * changes return address and SPSR used by thread_svc_handler() to instead of
259 * returning into user mode as with other syscalls it returns into
260 * thread_unwind_user_mode() in kernel mode instead.  When
261 * thread_svc_handler() returns the stack pointer at the point where
262 * thread_enter_user_mode() left it so this is where
263 * thread_unwind_user_mode() can operate.
264 *
265 * Aborts are handled in a similar way but by thread_abort_handler()
266 * instead, when the pager sees that it's an abort from user mode that
267 * can't be handled it updates SPSR and return address used by
268 * thread_abort_handler() to return into thread_unwind_user_mode()
269 * instead.
270 */
271
272/*
273 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
274 *				     uint32_t *exit_status0,
275 *				     uint32_t *exit_status1);
276 *
277 * This function depends on being called with exceptions masked.
278 */
279FUNC __thread_enter_user_mode , :
280UNWIND(	.cantunwind)
281	/*
282	 * Save all registers to allow syscall_return() to resume execution
283	 * as if this function would have returned. This is also used in
284	 * syscall_panic().
285	 *
286	 * If stack usage of this function is changed
287	 * thread_unwind_user_mode() has to be updated.
288	 */
289	push    {r4-r12,lr}
290
291	/*
292	 * Save old user sp and set new user sp.
293	 */
294	cps	#CPSR_MODE_SYS
295	mov	r4, sp
296	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
297	cps	#CPSR_MODE_SVC
298
299	push	{r1, r2, r4, r5}
300
301	/* Prepare user mode entry via eret_to_user_mode */
302	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
303	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
304	msr     spsr_fsxc, r4
305
306	ldm	r0, {r0-r12}
307
308	b	eret_to_user_mode
309END_FUNC __thread_enter_user_mode
310
311/*
312 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
313 *              uint32_t exit_status1);
314 * See description in thread.h
315 */
316FUNC thread_unwind_user_mode , :
317UNWIND(	.cantunwind)
318	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
319	pop	{r4-r7}
320	str	r1, [r4]
321	str	r2, [r5]
322
323	/* Restore old user sp */
324	cps	#CPSR_MODE_SYS
325	mov	sp, r6
326	cps	#CPSR_MODE_SVC
327
328	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
329	pop     {r4-r12,pc}
330END_FUNC thread_unwind_user_mode
331
332	.macro maybe_restore_mapping
333		/*
334		 * This macro is a bit hard to read due to all the ifdefs,
335		 * we're testing for two different configs which makes four
336		 * different combinations.
337		 *
338		 * - With LPAE, and then some extra code if with
339		 *   CFG_CORE_UNMAP_CORE_AT_EL0
340		 * - Without LPAE, and then some extra code if with
341		 *   CFG_CORE_UNMAP_CORE_AT_EL0
342		 */
343
344		/*
345		 * At this point we can't rely on any memory being writable
346		 * yet, so we're using TPIDRPRW to store r0, and if with
347		 * LPAE TPIDRURO to store r1 too.
348		 */
349		write_tpidrprw r0
350#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
351		write_tpidruro r1
352#endif
353
354#ifdef CFG_WITH_LPAE
355		read_ttbr0_64bit r0, r1
356		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
357		beq	11f
358
359#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
360		/*
361		 * Update the mapping to use the full kernel mode mapping.
362		 * Since the translation table could reside above 4GB we'll
363		 * have to use 64-bit arithmetics.
364		 */
365		subs	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
366		sbc	r1, r1, #0
367#endif
368		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
369		write_ttbr0_64bit r0, r1
370		isb
371
372#else /*!CFG_WITH_LPAE*/
373		read_contextidr r0
374		tst	r0, #1
375		beq	11f
376
377		/* Update the mapping to use the full kernel mode mapping. */
378		bic	r0, r0, #1
379		write_contextidr r0
380		isb
381#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
382		read_ttbcr r0
383		bic	r0, r0, #TTBCR_PD1
384		write_ttbcr r0
385		isb
386#endif
387
388#endif /*!CFG_WITH_LPAE*/
389
390#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
391		ldr	r0, =thread_user_kcode_offset
392		ldr	r0, [r0]
393		read_vbar r1
394		add	r1, r1, r0
395		write_vbar r1
396		isb
397
398	11:	/*
399		 * The PC is adjusted unconditionally to guard against the
400		 * case there was an FIQ just before we did the "cpsid aif".
401		 */
402		ldr	r0, =22f
403		bx	r0
404	22:
405#else
406	11:
407#endif
408		read_tpidrprw r0
409#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
410		read_tpidruro r1
411#endif
412	.endm
413
414/* The handler of native interrupt. */
415.macro	native_intr_handler mode:req
416	cpsid	aif
417	maybe_restore_mapping
418
419	/*
420	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
421	 * address
422	 */
423	sub     lr, lr, #4
424
425	/*
426	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
427	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
428	 * because the secure monitor doesn't save those. The treatment of
429	 * the banked fiq registers is somewhat analogous to the lazy save
430	 * of VFP registers.
431	 */
432	.ifc	\mode\(),fiq
433	push	{r0-r3, r8-r12, lr}
434	.else
435	push	{r0-r3, r12, lr}
436	.endif
437
438	bl	thread_check_canaries
439	bl	itr_core_handler
440
441	mrs	r0, spsr
442	cmp_spsr_user_mode r0
443
444	.ifc	\mode\(),fiq
445	pop	{r0-r3, r8-r12, lr}
446	.else
447	pop	{r0-r3, r12, lr}
448	.endif
449
450	movsne	pc, lr
451	b	eret_to_user_mode
452.endm
453
454/* The handler of foreign interrupt. */
455.macro foreign_intr_handler mode:req
456	cpsid	aif
457	maybe_restore_mapping
458
459	sub	lr, lr, #4
460	push	{r12}
461
462	.ifc	\mode\(),fiq
463	/*
464	 * If a foreign (non-secure) interrupt is received as a FIQ we need
465	 * to check that we're in a saveable state or if we need to mask
466	 * the interrupt to be handled later.
467	 *
468	 * The window when this is needed is quite narrow, it's between
469	 * entering the exception vector and until the "cpsid" instruction
470	 * of the handler has been executed.
471	 *
472	 * Currently we can save the state properly if the FIQ is received
473	 * while in user or svc (kernel) mode.
474	 *
475	 * If we're returning to abort, undef or irq mode we're returning
476	 * with the mapping restored. This is OK since before the handler
477	 * we're returning to eventually returns to user mode the reduced
478	 * mapping will be restored.
479	 */
480	mrs	r12, spsr
481	and	r12, r12, #ARM32_CPSR_MODE_MASK
482	cmp	r12, #ARM32_CPSR_MODE_USR
483	cmpne	r12, #ARM32_CPSR_MODE_SVC
484	beq	1f
485	mrs	r12, spsr
486	orr	r12, r12, #ARM32_CPSR_F
487	msr	spsr_fsxc, r12
488	pop	{r12}
489	movs	pc, lr
4901:
491	.endif
492
493	push	{lr}
494
495	.ifc	\mode\(),fiq
496	bl	thread_save_state_fiq
497	.else
498	bl	thread_save_state
499	.endif
500
501#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
502	/*
503	 * Prevent leaking information about which entries has been used in
504	 * cache. We're relying on the secure monitor/dispatcher to take
505	 * care of the BTB.
506	 */
507	mov	r0, #DCACHE_OP_CLEAN_INV
508	bl	dcache_op_louis
509	write_iciallu
510#endif
511
512	/*
513	 * Use SP_abt to update core local flags.
514	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
515	 */
516	cps     #CPSR_MODE_ABT
517	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
518	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
519	orr     r1, r1, #THREAD_CLF_TMP
520	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
521	.ifc    \mode\(),fiq
522	cps     #CPSR_MODE_FIQ
523	.else
524	cps     #CPSR_MODE_IRQ
525	.endif
526
527	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
528	mrs	r1, spsr
529	pop	{r2}
530	pop	{r12}
531	blx	thread_state_suspend
532
533	/*
534	 * Switch to SVC mode and copy current stack pointer as it already
535	 * is the tmp stack.
536	 */
537	mov	r1, sp
538	cps	#CPSR_MODE_SVC
539	mov	sp, r1
540
541	/* Passing thread index in r0 */
542	b	thread_foreign_intr_exit
543.endm
544
545FUNC thread_excp_vect , :, align=32
546UNWIND(	.cantunwind)
547	b	.			/* Reset			*/
548	b	__thread_und_handler	/* Undefined instruction	*/
549	b	__thread_svc_handler	/* System call			*/
550	b	__thread_pabort_handler	/* Prefetch abort		*/
551	b	__thread_dabort_handler	/* Data abort			*/
552	b	.			/* Reserved			*/
553	b	__thread_irq_handler	/* IRQ				*/
554	b	__thread_fiq_handler	/* FIQ				*/
555#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
556	.macro vector_prologue_spectre
557		/*
558		 * This depends on SP being 8 byte aligned, that is, the
559		 * lowest three bits in SP are zero.
560		 *
561		 * To avoid unexpected speculation we need to invalidate
562		 * the branch predictor before we do the first branch. It
563		 * doesn't matter if it's a conditional or an unconditional
564		 * branch speculation can still occur.
565		 *
566		 * The idea is to form a specific bit pattern in the lowest
567		 * three bits of SP depending on which entry in the vector
568		 * we enter via.  This is done by adding 1 to SP in each
569		 * entry but the last.
570		 */
571		add	sp, sp, #1	/* 7:111 Reset			*/
572		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
573		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
574		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
575		add	sp, sp, #1	/* 3:011 Data abort		*/
576		add	sp, sp, #1	/* 2:010 Reserved		*/
577		add	sp, sp, #1	/* 1:001 IRQ			*/
578		cpsid   aif		/* 0:000 FIQ			*/
579	.endm
580
581        .balign	32
582	.global thread_excp_vect_wa_a15_spectre_v2
583thread_excp_vect_wa_a15_spectre_v2:
584	vector_prologue_spectre
585	write_tpidrprw r0
586	mrs	r0, spsr
587	cmp_spsr_user_mode r0
588	bne	1f
589	/*
590	 * Invalidate the branch predictor for the current processor.
591	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
592	 * effective.
593	 * Note that the BPIALL instruction is not effective in
594	 * invalidating the branch predictor on Cortex-A15. For that CPU,
595	 * set ACTLR[0] to 1 during early processor initialisation, and
596	 * invalidate the branch predictor by performing an ICIALLU
597	 * instruction. See also:
598	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
599	 */
600	write_iciallu
601	isb
602	b	1f
603
604        .balign	32
605	.global thread_excp_vect_wa_spectre_v2
606thread_excp_vect_wa_spectre_v2:
607	vector_prologue_spectre
608	write_tpidrprw r0
609	mrs	r0, spsr
610	cmp_spsr_user_mode r0
611	bne	1f
612	/* Invalidate the branch predictor for the current processor. */
613	write_bpiall
614	isb
615
6161:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
617	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
618	add	pc, pc, r0, LSL #3
619	nop
620
621	read_tpidrprw r0
622	b	__thread_fiq_handler	/* FIQ				*/
623	read_tpidrprw r0
624	b	__thread_irq_handler	/* IRQ				*/
625	read_tpidrprw r0
626	b	.			/* Reserved			*/
627	read_tpidrprw r0
628	b	__thread_dabort_handler	/* Data abort			*/
629	read_tpidrprw r0
630	b	__thread_pabort_handler	/* Prefetch abort		*/
631	read_tpidrprw r0
632	b	__thread_svc_handler	/* System call			*/
633	read_tpidrprw r0
634	b	__thread_und_handler	/* Undefined instruction	*/
635	read_tpidrprw r0
636	b	.			/* Reset			*/
637#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
638
639__thread_und_handler:
640	cpsid	aif
641	maybe_restore_mapping
642	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
643	mrs	r1, spsr
644	tst	r1, #CPSR_T
645	subne	lr, lr, #2
646	subeq	lr, lr, #4
647	mov	r0, #ABORT_TYPE_UNDEF
648	b	__thread_abort_common
649
650__thread_dabort_handler:
651	cpsid	aif
652	maybe_restore_mapping
653	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
654	sub	lr, lr, #8
655	mov	r0, #ABORT_TYPE_DATA
656	b	__thread_abort_common
657
658__thread_pabort_handler:
659	cpsid	aif
660	maybe_restore_mapping
661	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
662	sub	lr, lr, #4
663	mov	r0, #ABORT_TYPE_PREFETCH
664
665__thread_abort_common:
666	/*
667	 * At this label:
668	 * cpsr is in mode undef or abort
669	 * sp is still pointing to struct thread_core_local belonging to
670	 * this core.
671	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
672	 * {r2-r11, ip} are untouched.
673	 * r0 holds the first argument for abort_handler()
674	 */
675
676	/*
677	 * Update core local flags.
678	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
679	 */
680	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
681	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
682	orr	r1, r1, #THREAD_CLF_ABORT
683
684	/*
685	 * Select stack and update flags accordingly
686	 *
687	 * Normal case:
688	 * If the abort stack is unused select that.
689	 *
690	 * Fatal error handling:
691	 * If we're already using the abort stack as noted by bit
692	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
693	 * field we're selecting the temporary stack instead to be able to
694	 * make a stack trace of the abort in abort mode.
695	 *
696	 * r1 is initialized as a temporary stack pointer until we've
697	 * switched to system mode.
698	 */
699	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
700	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
701	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
702	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
703	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
704
705	/*
706	 * Store registers on stack fitting struct thread_abort_regs
707	 * start from the end of the struct
708	 * {r2-r11, ip}
709	 * Load content of previously saved {r0-r1} and stores
710	 * it up to the pad field.
711	 * After this is only {usr_sp, usr_lr} missing in the struct
712	 */
713	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
714	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
715	/* Push the original {r0-r1} on the selected stack */
716	stmdb	r1!, {r2-r3}
717	mrs	r3, spsr
718	/* Push {pad, spsr, elr} on the selected stack */
719	stmdb	r1!, {r2, r3, lr}
720
721	cps	#CPSR_MODE_SYS
722	str	lr, [r1, #-4]!
723	str	sp, [r1, #-4]!
724	mov	sp, r1
725
726	bl	abort_handler
727
728	mov	ip, sp
729	ldr	sp, [ip], #4
730	ldr	lr, [ip], #4
731
732	/*
733	 * Even if we entered via CPSR_MODE_UND, we are returning via
734	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
735	 * here.
736	 */
737	cps	#CPSR_MODE_ABT
738	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
739	msr	spsr_fsxc, r1
740
741	/* Update core local flags */
742	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
743	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
744	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
745
746	cmp_spsr_user_mode r1
747	ldm	ip, {r0-r11, ip}
748	movsne	pc, lr
749	b	eret_to_user_mode
750	/* end thread_abort_common */
751
752__thread_svc_handler:
753	cpsid	aif
754
755	maybe_restore_mapping
756
757	push	{r0-r7, lr}
758	mrs	r0, spsr
759	push	{r0}
760	mov	r0, sp
761	bl	thread_svc_handler
762	cpsid	aif	/* In case something was unmasked */
763	pop	{r0}
764	msr	spsr_fsxc, r0
765	cmp_spsr_user_mode r0
766	pop	{r0-r7, lr}
767	movsne	pc, lr
768	b	eret_to_user_mode
769	/* end thread_svc_handler */
770
771__thread_fiq_handler:
772#if defined(CFG_ARM_GICV3)
773	foreign_intr_handler	fiq
774#else
775	native_intr_handler	fiq
776#endif
777	/* end thread_fiq_handler */
778
779__thread_irq_handler:
780#if defined(CFG_ARM_GICV3)
781	native_intr_handler	irq
782#else
783	foreign_intr_handler	irq
784#endif
785	/* end thread_irq_handler */
786
787	/*
788	 * Returns to user mode.
789	 * Expects to be jumped to with lr pointing to the user space
790	 * address to jump to and spsr holding the desired cpsr. Async
791	 * abort, irq and fiq should be masked.
792	 */
793eret_to_user_mode:
794	write_tpidrprw r0
795#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
796	write_tpidruro r1
797#endif
798
799#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
800	ldr	r0, =thread_user_kcode_offset
801	ldr	r0, [r0]
802	read_vbar r1
803	sub	r1, r1, r0
804	write_vbar r1
805	isb
806
807	/* Jump into the reduced mapping before the full mapping is removed */
808	ldr	r1, =1f
809	sub	r1, r1, r0
810	bx	r1
8111:
812#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
813
814#ifdef CFG_WITH_LPAE
815	read_ttbr0_64bit r0, r1
816#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
817	add	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
818#endif
819	/* switch to user ASID */
820	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
821	write_ttbr0_64bit r0, r1
822	isb
823#else /*!CFG_WITH_LPAE*/
824#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
825	read_ttbcr r0
826	orr	r0, r0, #TTBCR_PD1
827	write_ttbcr r0
828	isb
829#endif
830	read_contextidr r0
831	orr	r0, r0, #BIT(0)
832	write_contextidr r0
833	isb
834#endif /*!CFG_WITH_LPAE*/
835
836	read_tpidrprw r0
837#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
838	read_tpidruro r1
839#endif
840
841	movs	pc, lr
842
843	/*
844	 * void icache_inv_user_range(void *addr, size_t size);
845	 *
846	 * This function has to execute with the user space ASID active,
847	 * this means executing with reduced mapping and the code needs
848	 * to be located here together with the vector.
849	 */
850	.global icache_inv_user_range
851	.type icache_inv_user_range , %function
852icache_inv_user_range:
853	push	{r4-r7}
854
855	/* Mask all exceptions */
856	mrs	r4, cpsr	/* This register must be preserved */
857	cpsid	aif
858
859#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
860	ldr	r2, =thread_user_kcode_offset
861	ldr	r2, [r2]
862	read_vbar r5		/* This register must be preserved */
863	sub	r3, r5, r2
864	write_vbar r3
865	isb
866
867	/* Jump into the reduced mapping before the full mapping is removed */
868	ldr	r3, =1f
869	sub	r3, r3, r2
870	bx	r3
8711:
872#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
873
874#ifdef CFG_WITH_LPAE
875	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
876	/* switch to user ASID */
877	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
878#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
879	add	r2, r6, #CORE_MMU_BASE_TABLE_OFFSET
880	write_ttbr0_64bit r2, r3
881#else
882	write_ttbr0_64bit r6, r3
883#endif
884	isb
885#else /*!CFG_WITH_LPAE*/
886#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
887	read_ttbcr r6	/* This register must be preserved */
888	orr	r2, r6, #TTBCR_PD1
889	write_ttbcr r2
890	isb
891#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
892	read_contextidr r7	/* This register must be preserved */
893	orr	r2, r7, #BIT(0)
894	write_contextidr r2
895	isb
896#endif /*!CFG_WITH_LPAE*/
897
898	/*
899	 * Do the actual icache invalidation
900	 */
901
902	/* Calculate minimum icache line size, result in r2 */
903	read_ctr r3
904	and     r3, r3, #CTR_IMINLINE_MASK
905	mov     r2, #CTR_WORD_SIZE
906	lsl     r2, r2, r3
907
908	add	r1, r0, r1
909	sub	r3, r2, #1
910	bic	r0, r0, r3
9111:
912	write_icimvau r0
913	add	r0, r0, r2
914	cmp	r0, r1
915	blo	1b
916
917	/* Invalidate entire branch predictor array inner shareable */
918	write_bpiallis
919
920	dsb	ishst
921	isb
922
923#ifdef CFG_WITH_LPAE
924	write_ttbr0_64bit r6, r7
925	isb
926#else /*!CFG_WITH_LPAE*/
927	write_contextidr r7
928	isb
929#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
930	write_ttbcr r6
931	isb
932#endif
933#endif /*!CFG_WITH_LPAE*/
934
935#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
936	write_vbar r5
937	isb
938	/*
939	 * The PC is adjusted unconditionally to guard against the
940	 * case there was an FIQ just before we did the "cpsid aif".
941	 */
942	ldr	r0, =1f
943	bx	r0
9441:
945#endif
946
947	msr	cpsr_fsxc, r4	/* Restore exceptions */
948	pop	{r4-r7}
949	bx	lr		/* End of icache_inv_user_range() */
950
951	/*
952	 * Make sure that literals are placed before the
953	 * thread_excp_vect_end label.
954	 */
955	.pool
956	.global thread_excp_vect_end
957thread_excp_vect_end:
958END_FUNC thread_excp_vect
959