xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision ba2a6adb764f1310ad3c3091d89de84274f86b02)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17
18	.syntax unified
19	.arch_extension sec
20
21	.macro cmp_spsr_user_mode reg:req
22		/*
23		 * We're only testing the lower 4 bits as bit 5 (0x10)
24		 * always is set.
25		 */
26		tst	\reg, #0x0f
27	.endm
28
29FUNC thread_set_abt_sp , :
30UNWIND(	.cantunwind)
31	mrs	r1, cpsr
32	cps	#CPSR_MODE_ABT
33	mov	sp, r0
34	msr	cpsr, r1
35	bx	lr
36END_FUNC thread_set_abt_sp
37
38FUNC thread_set_und_sp , :
39UNWIND(	.cantunwind)
40	mrs	r1, cpsr
41	cps	#CPSR_MODE_UND
42	mov	sp, r0
43	msr	cpsr, r1
44	bx	lr
45END_FUNC thread_set_und_sp
46
47FUNC thread_set_irq_sp , :
48UNWIND(	.cantunwind)
49	mrs	r1, cpsr
50	cps	#CPSR_MODE_IRQ
51	mov	sp, r0
52	msr	cpsr, r1
53	bx	lr
54END_FUNC thread_set_irq_sp
55
56FUNC thread_set_fiq_sp , :
57UNWIND(	.cantunwind)
58	mrs	r1, cpsr
59	cps	#CPSR_MODE_FIQ
60	mov	sp, r0
61	msr	cpsr, r1
62	bx	lr
63END_FUNC thread_set_fiq_sp
64
65FUNC thread_get_usr_sp , :
66	mrs	r1, cpsr
67	cpsid	aif
68	cps	#CPSR_MODE_SYS
69	mov	r0, sp
70	msr	cpsr, r1
71	bx	lr
72END_FUNC thread_get_usr_sp
73
74FUNC thread_get_usr_lr , :
75	mrs	r1, cpsr
76	cpsid	aif
77	cps	#CPSR_MODE_SYS
78	mov	r0, lr
79	msr	cpsr, r1
80	bx	lr
81END_FUNC thread_get_usr_lr
82
83FUNC thread_set_usr_lr , :
84	mrs	r1, cpsr
85	cpsid	aif
86	cps	#CPSR_MODE_SYS
87	mov	lr, r0
88	msr	cpsr, r1
89	bx	lr
90END_FUNC thread_set_usr_lr
91
92/* void thread_resume(struct thread_ctx_regs *regs) */
93FUNC thread_resume , :
94UNWIND(	.cantunwind)
95	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
96
97	cps	#CPSR_MODE_SYS
98	ldr	sp, [r12], #4
99	ldr	lr, [r12], #4
100
101	cps	#CPSR_MODE_SVC
102	ldr	r1, [r12], #4
103	ldr	sp, [r12], #4
104	ldr	lr, [r12], #4
105	msr	spsr_fsxc, r1
106
107	ldm	r12, {r1, r2}
108
109	/*
110	 * Switching to some other mode than SVC as we need to set spsr in
111	 * order to return into the old state properly and it may be SVC
112	 * mode we're returning to.
113	 */
114	cps	#CPSR_MODE_ABT
115	cmp_spsr_user_mode r2
116	mov	lr, r1
117	msr	spsr_fsxc, r2
118	ldm	r0, {r0-r12}
119	movsne	pc, lr
120	b	eret_to_user_mode
121END_FUNC thread_resume
122
123/*
124 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
125 * the banked r8-r12 registers, returns original CPSR.
126 */
127LOCAL_FUNC thread_save_state_fiq , :
128UNWIND(	.cantunwind)
129	mov	r9, lr
130
131	/*
132	 * Uses stack for temporary storage, while storing needed
133	 * context in the thread context struct.
134	 */
135
136	mrs	r8, cpsr
137
138	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
139
140	push	{r4-r7}
141	push	{r0-r3}
142
143	mrs	r6, cpsr		/* Save current CPSR */
144
145	bl	thread_get_ctx_regs
146
147	pop	{r1-r4}			/* r0-r3 pushed above */
148	stm	r0!, {r1-r4}
149	pop	{r1-r4}			/* r4-r7 pushed above */
150	stm	r0!, {r1-r4}
151
152	cps     #CPSR_MODE_SYS
153	stm	r0!, {r8-r12}
154	str	sp, [r0], #4
155	str	lr, [r0], #4
156
157	cps     #CPSR_MODE_SVC
158	mrs     r1, spsr
159	str	r1, [r0], #4
160	str	sp, [r0], #4
161	str	lr, [r0], #4
162
163	/* back to fiq mode */
164	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
165	msr	cpsr, r6		/* Restore mode */
166
167	mov	r0, r8			/* Return original CPSR */
168	bx	r9
169END_FUNC thread_save_state_fiq
170
171/*
172 * Disables IRQ and FIQ and saves state of thread, returns original
173 * CPSR.
174 */
175FUNC thread_save_state , :
176UNWIND(	.cantunwind)
177	push	{r12, lr}
178	/*
179	 * Uses stack for temporary storage, while storing needed
180	 * context in the thread context struct.
181	 */
182
183	mrs	r12, cpsr
184
185	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
186
187	push	{r4-r7}
188	push	{r0-r3}
189
190	mov	r5, r12			/* Save CPSR in a preserved register */
191	mrs	r6, cpsr		/* Save current CPSR */
192
193	bl	thread_get_ctx_regs
194
195	pop	{r1-r4}			/* r0-r3 pushed above */
196	stm	r0!, {r1-r4}
197	pop	{r1-r4}			/* r4-r7 pushed above */
198	stm	r0!, {r1-r4}
199	stm	r0!, {r8-r11}
200
201	pop	{r12, lr}
202	stm	r0!, {r12}
203
204        cps     #CPSR_MODE_SYS
205	str	sp, [r0], #4
206	str	lr, [r0], #4
207
208        cps     #CPSR_MODE_SVC
209        mrs     r1, spsr
210	str	r1, [r0], #4
211	str	sp, [r0], #4
212	str	lr, [r0], #4
213
214	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
215	msr	cpsr, r6		/* Restore mode */
216
217	mov	r0, r5			/* Return original CPSR */
218	bx	lr
219END_FUNC thread_save_state
220
221#ifdef CFG_CORE_SEL2_SPMC
222/*
223 * unsigned long thread_hvc(unsigned long func_id, unsigned long a1,
224 *			    unsigned long a2, unsigned long a3)
225 */
226FUNC thread_hvc , :
227	push	{r4-r7}
228UNWIND(	.save	{r4-r7})
229	hvc	#0
230	pop	{r4-r7}
231	bx	lr
232END_FUNC thread_hvc
233#endif /*CFG_CORE_SEL2_SPMC*/
234
235/*
236 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
237 *			    unsigned long a2, unsigned long a3)
238 */
239FUNC thread_smc , :
240	push	{r4-r7}
241UNWIND(	.save	{r4-r7})
242	smc	#0
243	pop	{r4-r7}
244	bx	lr
245END_FUNC thread_smc
246
247/* void thread_smccc(struct thread_smc_args *arg_res) */
248FUNC thread_smccc , :
249	push	{r4-r7}
250	push	{r0, lr}
251	ldm	r0, {r0-r7}
252#ifdef CFG_CORE_SEL2_SPMC
253	hvc	#0
254#else
255	smc	#0
256#endif
257	pop	{r12, lr}
258	stm	r12, {r0-r7}
259	pop	{r4-r7}
260	bx	lr
261END_FUNC thread_smccc
262
263FUNC thread_init_vbar , :
264	/* Set vector (VBAR) */
265	write_vbar r0
266	bx	lr
267END_FUNC thread_init_vbar
268DECLARE_KEEP_PAGER thread_init_vbar
269
270/*
271 * Below are low level routines handling entry and return from user mode.
272 *
273 * thread_enter_user_mode() saves all that registers user mode can change
274 * so kernel mode can restore needed registers when resuming execution
275 * after the call to thread_enter_user_mode() has returned.
276 * thread_enter_user_mode() doesn't return directly since it enters user
277 * mode instead, it's thread_unwind_user_mode() that does the
278 * returning by restoring the registers saved by thread_enter_user_mode().
279 *
280 * There's three ways for thread_enter_user_mode() to return to caller,
281 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
282 *
283 * Calls to _utee_return or _utee_panic are handled as:
284 * __thread_svc_handler() -> thread_scall_handler() -> scall_do_call() which
285 * calls syscall_return() or syscall_panic().
286 *
287 * These function calls returns normally except thread_scall_handler() which
288 * which is an exception handling routine so it reads return address and
289 * SPSR to restore from the stack. syscall_return() and syscall_panic()
290 * changes return address and SPSR used by thread_scall_handler() to instead of
291 * returning into user mode as with other syscalls it returns into
292 * thread_unwind_user_mode() in kernel mode instead.  When
293 * thread_scall_handler() returns the stack pointer at the point where
294 * thread_enter_user_mode() left it so this is where
295 * thread_unwind_user_mode() can operate.
296 *
297 * Aborts are handled in a similar way but by thread_abort_handler()
298 * instead, when the pager sees that it's an abort from user mode that
299 * can't be handled it updates SPSR and return address used by
300 * thread_abort_handler() to return into thread_unwind_user_mode()
301 * instead.
302 */
303
304/*
305 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
306 *				     uint32_t *exit_status0,
307 *				     uint32_t *exit_status1);
308 *
309 * This function depends on being called with exceptions masked.
310 */
311FUNC __thread_enter_user_mode , :
312UNWIND(	.cantunwind)
313	/*
314	 * Save all registers to allow syscall_return() to resume execution
315	 * as if this function would have returned. This is also used in
316	 * syscall_panic().
317	 *
318	 * If stack usage of this function is changed
319	 * thread_unwind_user_mode() has to be updated.
320	 */
321	push    {r4-r12,lr}
322
323	/*
324	 * Save old user sp and set new user sp.
325	 */
326	cps	#CPSR_MODE_SYS
327	mov	r4, sp
328	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
329	cps	#CPSR_MODE_SVC
330
331	push	{r1, r2, r4, r5}
332
333	/* Prepare user mode entry via eret_to_user_mode */
334	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
335	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
336	msr     spsr_fsxc, r4
337
338	ldm	r0, {r0-r12}
339
340	b	eret_to_user_mode
341END_FUNC __thread_enter_user_mode
342
343/*
344 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
345 *              uint32_t exit_status1);
346 * See description in thread.h
347 */
348FUNC thread_unwind_user_mode , :
349UNWIND(	.cantunwind)
350	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
351	pop	{r4-r7}
352	str	r1, [r4]
353	str	r2, [r5]
354
355	/* Restore old user sp */
356	cps	#CPSR_MODE_SYS
357	mov	sp, r6
358	cps	#CPSR_MODE_SVC
359
360	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
361	pop     {r4-r12,pc}
362END_FUNC thread_unwind_user_mode
363
364	.macro maybe_restore_mapping
365		/*
366		 * This macro is a bit hard to read due to all the ifdefs,
367		 * we're testing for two different configs which makes four
368		 * different combinations.
369		 *
370		 * - With LPAE, and then some extra code if with
371		 *   CFG_CORE_UNMAP_CORE_AT_EL0
372		 * - Without LPAE, and then some extra code if with
373		 *   CFG_CORE_UNMAP_CORE_AT_EL0
374		 */
375
376		/*
377		 * At this point we can't rely on any memory being writable
378		 * yet, so we're using TPIDRPRW to store r0, and if with
379		 * LPAE TPIDRURO to store r1 too.
380		 */
381		write_tpidrprw r0
382#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
383		write_tpidruro r1
384#endif
385
386#ifdef CFG_WITH_LPAE
387		read_ttbr0_64bit r0, r1
388		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
389		beq	11f
390
391#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
392		/*
393		 * Update the mapping to use the full kernel mode mapping.
394		 * Since the translation table could reside above 4GB we'll
395		 * have to use 64-bit arithmetics.
396		 */
397		subs	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
398		sbc	r1, r1, #0
399#endif
400		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
401		write_ttbr0_64bit r0, r1
402		isb
403
404#else /*!CFG_WITH_LPAE*/
405		read_contextidr r0
406		tst	r0, #1
407		beq	11f
408
409		/* Update the mapping to use the full kernel mode mapping. */
410		bic	r0, r0, #1
411		write_contextidr r0
412		isb
413#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
414		read_ttbcr r0
415		bic	r0, r0, #TTBCR_PD1
416		write_ttbcr r0
417		isb
418#endif
419
420#endif /*!CFG_WITH_LPAE*/
421
422#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
423		ldr	r0, =thread_user_kcode_offset
424		ldr	r0, [r0]
425		read_vbar r1
426		add	r1, r1, r0
427		write_vbar r1
428		isb
429
430	11:	/*
431		 * The PC is adjusted unconditionally to guard against the
432		 * case there was an FIQ just before we did the "cpsid aif".
433		 */
434		ldr	r0, =22f
435		bx	r0
436	22:
437#else
438	11:
439#endif
440		read_tpidrprw r0
441#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
442		read_tpidruro r1
443#endif
444	.endm
445
446/* The handler of native interrupt. */
447.macro	native_intr_handler mode:req
448	cpsid	aif
449	maybe_restore_mapping
450
451	/*
452	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
453	 * address
454	 */
455	sub     lr, lr, #4
456
457	/*
458	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
459	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
460	 * because the secure monitor doesn't save those. The treatment of
461	 * the banked fiq registers is somewhat analogous to the lazy save
462	 * of VFP registers.
463	 */
464	.ifc	\mode\(),fiq
465	push	{r0-r3, r8-r12, lr}
466	.else
467	push	{r0-r3, r12, lr}
468	.endif
469
470	bl	thread_check_canaries
471	bl	interrupt_main_handler
472
473	mrs	r0, spsr
474	cmp_spsr_user_mode r0
475
476	.ifc	\mode\(),fiq
477	pop	{r0-r3, r8-r12, lr}
478	.else
479	pop	{r0-r3, r12, lr}
480	.endif
481
482	movsne	pc, lr
483	b	eret_to_user_mode
484.endm
485
486/* The handler of foreign interrupt. */
487.macro foreign_intr_handler mode:req
488	cpsid	aif
489	maybe_restore_mapping
490
491	sub	lr, lr, #4
492	push	{r12}
493
494	.ifc	\mode\(),fiq
495	/*
496	 * If a foreign (non-secure) interrupt is received as a FIQ we need
497	 * to check that we're in a saveable state or if we need to mask
498	 * the interrupt to be handled later.
499	 *
500	 * The window when this is needed is quite narrow, it's between
501	 * entering the exception vector and until the "cpsid" instruction
502	 * of the handler has been executed.
503	 *
504	 * Currently we can save the state properly if the FIQ is received
505	 * while in user or svc (kernel) mode.
506	 *
507	 * If we're returning to abort, undef or irq mode we're returning
508	 * with the mapping restored. This is OK since before the handler
509	 * we're returning to eventually returns to user mode the reduced
510	 * mapping will be restored.
511	 */
512	mrs	r12, spsr
513	and	r12, r12, #ARM32_CPSR_MODE_MASK
514	cmp	r12, #ARM32_CPSR_MODE_USR
515	cmpne	r12, #ARM32_CPSR_MODE_SVC
516	beq	1f
517	mrs	r12, spsr
518	orr	r12, r12, #ARM32_CPSR_F
519	msr	spsr_fsxc, r12
520	pop	{r12}
521	movs	pc, lr
5221:
523	.endif
524
525	push	{lr}
526
527	.ifc	\mode\(),fiq
528	bl	thread_save_state_fiq
529	.else
530	bl	thread_save_state
531	.endif
532
533#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
534	/*
535	 * Prevent leaking information about which entries has been used in
536	 * cache. We're relying on the secure monitor/dispatcher to take
537	 * care of the BTB.
538	 */
539	mov	r0, #DCACHE_OP_CLEAN_INV
540	bl	dcache_op_louis
541	write_iciallu
542#endif
543
544	/*
545	 * Use SP_abt to update core local flags.
546	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
547	 */
548	cps     #CPSR_MODE_ABT
549	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
550	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
551	orr     r1, r1, #THREAD_CLF_TMP
552	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
553	.ifc    \mode\(),fiq
554	cps     #CPSR_MODE_FIQ
555	.else
556	cps     #CPSR_MODE_IRQ
557	.endif
558
559	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
560	mrs	r1, spsr
561	pop	{r2}
562	pop	{r12}
563	blx	thread_state_suspend
564
565	/*
566	 * Switch to SVC mode and copy current stack pointer as it already
567	 * is the tmp stack.
568	 */
569	mov	r1, sp
570	cps	#CPSR_MODE_SVC
571	mov	sp, r1
572
573	/* Passing thread index in r0 */
574	b	thread_foreign_intr_exit
575.endm
576
577FUNC thread_excp_vect , :, align=32
578UNWIND(	.cantunwind)
579	b	.			/* Reset			*/
580	b	__thread_und_handler	/* Undefined instruction	*/
581	b	__thread_svc_handler	/* System call			*/
582	b	__thread_pabort_handler	/* Prefetch abort		*/
583	b	__thread_dabort_handler	/* Data abort			*/
584	b	.			/* Reserved			*/
585	b	__thread_irq_handler	/* IRQ				*/
586	b	__thread_fiq_handler	/* FIQ				*/
587#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
588	.macro vector_prologue_spectre
589		/*
590		 * This depends on SP being 8 byte aligned, that is, the
591		 * lowest three bits in SP are zero.
592		 *
593		 * To avoid unexpected speculation we need to invalidate
594		 * the branch predictor before we do the first branch. It
595		 * doesn't matter if it's a conditional or an unconditional
596		 * branch speculation can still occur.
597		 *
598		 * The idea is to form a specific bit pattern in the lowest
599		 * three bits of SP depending on which entry in the vector
600		 * we enter via.  This is done by adding 1 to SP in each
601		 * entry but the last.
602		 */
603		add	sp, sp, #1	/* 7:111 Reset			*/
604		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
605		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
606		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
607		add	sp, sp, #1	/* 3:011 Data abort		*/
608		add	sp, sp, #1	/* 2:010 Reserved		*/
609		add	sp, sp, #1	/* 1:001 IRQ			*/
610		cpsid   aif		/* 0:000 FIQ			*/
611	.endm
612
613        .balign	32
614	.global thread_excp_vect_wa_a15_spectre_v2
615thread_excp_vect_wa_a15_spectre_v2:
616	vector_prologue_spectre
617	write_tpidrprw r0
618	mrs	r0, spsr
619	cmp_spsr_user_mode r0
620	bne	1f
621	/*
622	 * Invalidate the branch predictor for the current processor.
623	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
624	 * effective.
625	 * Note that the BPIALL instruction is not effective in
626	 * invalidating the branch predictor on Cortex-A15. For that CPU,
627	 * set ACTLR[0] to 1 during early processor initialisation, and
628	 * invalidate the branch predictor by performing an ICIALLU
629	 * instruction. See also:
630	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
631	 */
632	write_iciallu
633	isb
634	b	1f
635
636        .balign	32
637	.global thread_excp_vect_wa_spectre_v2
638thread_excp_vect_wa_spectre_v2:
639	vector_prologue_spectre
640	write_tpidrprw r0
641	mrs	r0, spsr
642	cmp_spsr_user_mode r0
643	bne	1f
644	/* Invalidate the branch predictor for the current processor. */
645	write_bpiall
646	isb
647
6481:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
649	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
650	add	pc, pc, r0, LSL #3
651	nop
652
653	read_tpidrprw r0
654	b	__thread_fiq_handler	/* FIQ				*/
655	read_tpidrprw r0
656	b	__thread_irq_handler	/* IRQ				*/
657	read_tpidrprw r0
658	b	.			/* Reserved			*/
659	read_tpidrprw r0
660	b	__thread_dabort_handler	/* Data abort			*/
661	read_tpidrprw r0
662	b	__thread_pabort_handler	/* Prefetch abort		*/
663	read_tpidrprw r0
664	b	__thread_svc_handler	/* System call			*/
665	read_tpidrprw r0
666	b	__thread_und_handler	/* Undefined instruction	*/
667	read_tpidrprw r0
668	b	.			/* Reset			*/
669#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
670
671__thread_und_handler:
672	cpsid	aif
673	maybe_restore_mapping
674	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
675	mrs	r1, spsr
676	tst	r1, #CPSR_T
677	subne	lr, lr, #2
678	subeq	lr, lr, #4
679	mov	r0, #ABORT_TYPE_UNDEF
680	b	__thread_abort_common
681
682__thread_dabort_handler:
683	cpsid	aif
684	maybe_restore_mapping
685	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
686	sub	lr, lr, #8
687	mov	r0, #ABORT_TYPE_DATA
688	b	__thread_abort_common
689
690__thread_pabort_handler:
691	cpsid	aif
692	maybe_restore_mapping
693	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
694	sub	lr, lr, #4
695	mov	r0, #ABORT_TYPE_PREFETCH
696
697__thread_abort_common:
698	/*
699	 * At this label:
700	 * cpsr is in mode undef or abort
701	 * sp is still pointing to struct thread_core_local belonging to
702	 * this core.
703	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
704	 * {r2-r11, ip} are untouched.
705	 * r0 holds the first argument for abort_handler()
706	 */
707
708	/*
709	 * Update core local flags.
710	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
711	 */
712	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
713	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
714	orr	r1, r1, #THREAD_CLF_ABORT
715
716	/*
717	 * Select stack and update flags accordingly
718	 *
719	 * Normal case:
720	 * If the abort stack is unused select that.
721	 *
722	 * Fatal error handling:
723	 * If we're already using the abort stack as noted by bit
724	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
725	 * field we're selecting the temporary stack instead to be able to
726	 * make a stack trace of the abort in abort mode.
727	 *
728	 * r1 is initialized as a temporary stack pointer until we've
729	 * switched to system mode.
730	 */
731	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
732	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
733	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
734	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
735	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
736
737	/*
738	 * Store registers on stack fitting struct thread_abort_regs
739	 * start from the end of the struct
740	 * {r2-r11, ip}
741	 * Load content of previously saved {r0-r1} and stores
742	 * it up to the pad field.
743	 * After this is only {usr_sp, usr_lr} missing in the struct
744	 */
745	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
746	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
747	/* Push the original {r0-r1} on the selected stack */
748	stmdb	r1!, {r2-r3}
749	mrs	r3, spsr
750	/* Push {pad, spsr, elr} on the selected stack */
751	stmdb	r1!, {r2, r3, lr}
752
753	cps	#CPSR_MODE_SYS
754	str	lr, [r1, #-4]!
755	str	sp, [r1, #-4]!
756	mov	sp, r1
757
758	bl	abort_handler
759
760	mov	ip, sp
761	ldr	sp, [ip], #4
762	ldr	lr, [ip], #4
763
764	/*
765	 * Even if we entered via CPSR_MODE_UND, we are returning via
766	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
767	 * here.
768	 */
769	cps	#CPSR_MODE_ABT
770	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
771	msr	spsr_fsxc, r1
772
773	/* Update core local flags */
774	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
775	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
776	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
777
778	cmp_spsr_user_mode r1
779	ldm	ip, {r0-r11, ip}
780	movsne	pc, lr
781	b	eret_to_user_mode
782	/* end thread_abort_common */
783
784__thread_svc_handler:
785	cpsid	aif
786
787	maybe_restore_mapping
788
789	push	{r0-r7, lr}
790	mrs	r0, spsr
791	push	{r0}
792	mov	r0, sp
793	bl	thread_scall_handler
794	cpsid	aif	/* In case something was unmasked */
795	pop	{r0}
796	msr	spsr_fsxc, r0
797	cmp_spsr_user_mode r0
798	pop	{r0-r7, lr}
799	movsne	pc, lr
800	b	eret_to_user_mode
801	/* end thread_svc_handler */
802
803__thread_fiq_handler:
804#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
805	foreign_intr_handler	fiq
806#else
807	native_intr_handler	fiq
808#endif
809	/* end thread_fiq_handler */
810
811__thread_irq_handler:
812#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
813	native_intr_handler	irq
814#else
815	foreign_intr_handler	irq
816#endif
817	/* end thread_irq_handler */
818
819	/*
820	 * Returns to user mode.
821	 * Expects to be jumped to with lr pointing to the user space
822	 * address to jump to and spsr holding the desired cpsr. Async
823	 * abort, irq and fiq should be masked.
824	 */
825eret_to_user_mode:
826	write_tpidrprw r0
827#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
828	write_tpidruro r1
829#endif
830
831#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
832	ldr	r0, =thread_user_kcode_offset
833	ldr	r0, [r0]
834	read_vbar r1
835	sub	r1, r1, r0
836	write_vbar r1
837	isb
838
839	/* Jump into the reduced mapping before the full mapping is removed */
840	ldr	r1, =1f
841	sub	r1, r1, r0
842	bx	r1
8431:
844#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
845
846#ifdef CFG_WITH_LPAE
847	read_ttbr0_64bit r0, r1
848#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
849	add	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
850#endif
851	/* switch to user ASID */
852	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
853	write_ttbr0_64bit r0, r1
854	isb
855#else /*!CFG_WITH_LPAE*/
856#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
857	read_ttbcr r0
858	orr	r0, r0, #TTBCR_PD1
859	write_ttbcr r0
860	isb
861#endif
862	read_contextidr r0
863	orr	r0, r0, #BIT(0)
864	write_contextidr r0
865	isb
866#endif /*!CFG_WITH_LPAE*/
867
868	read_tpidrprw r0
869#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
870	read_tpidruro r1
871#endif
872
873	movs	pc, lr
874
875	/*
876	 * void icache_inv_user_range(void *addr, size_t size);
877	 *
878	 * This function has to execute with the user space ASID active,
879	 * this means executing with reduced mapping and the code needs
880	 * to be located here together with the vector.
881	 */
882	.global icache_inv_user_range
883	.type icache_inv_user_range , %function
884icache_inv_user_range:
885	push	{r4-r7}
886
887	/* Mask all exceptions */
888	mrs	r4, cpsr	/* This register must be preserved */
889	cpsid	aif
890
891#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
892	ldr	r2, =thread_user_kcode_offset
893	ldr	r2, [r2]
894	read_vbar r5		/* This register must be preserved */
895	sub	r3, r5, r2
896	write_vbar r3
897	isb
898
899	/* Jump into the reduced mapping before the full mapping is removed */
900	ldr	r3, =1f
901	sub	r3, r3, r2
902	bx	r3
9031:
904#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
905
906#ifdef CFG_WITH_LPAE
907	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
908	/* switch to user ASID */
909	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
910#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
911	add	r2, r6, #CORE_MMU_BASE_TABLE_OFFSET
912	write_ttbr0_64bit r2, r3
913#else
914	write_ttbr0_64bit r6, r3
915#endif
916	isb
917#else /*!CFG_WITH_LPAE*/
918#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
919	read_ttbcr r6	/* This register must be preserved */
920	orr	r2, r6, #TTBCR_PD1
921	write_ttbcr r2
922	isb
923#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
924	read_contextidr r7	/* This register must be preserved */
925	orr	r2, r7, #BIT(0)
926	write_contextidr r2
927	isb
928#endif /*!CFG_WITH_LPAE*/
929
930	/*
931	 * Do the actual icache invalidation
932	 */
933
934	/* Calculate minimum icache line size, result in r2 */
935	read_ctr r3
936	and     r3, r3, #CTR_IMINLINE_MASK
937	mov     r2, #CTR_WORD_SIZE
938	lsl     r2, r2, r3
939
940	add	r1, r0, r1
941	sub	r3, r2, #1
942	bic	r0, r0, r3
9431:
944	write_icimvau r0
945	add	r0, r0, r2
946	cmp	r0, r1
947	blo	1b
948
949	/* Invalidate entire branch predictor array inner shareable */
950	write_bpiallis
951
952	dsb	ishst
953	isb
954
955#ifdef CFG_WITH_LPAE
956	write_ttbr0_64bit r6, r7
957	isb
958#else /*!CFG_WITH_LPAE*/
959	write_contextidr r7
960	isb
961#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
962	write_ttbcr r6
963	isb
964#endif
965#endif /*!CFG_WITH_LPAE*/
966
967#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
968	write_vbar r5
969	isb
970	/*
971	 * The PC is adjusted unconditionally to guard against the
972	 * case there was an FIQ just before we did the "cpsid aif".
973	 */
974	ldr	r0, =1f
975	bx	r0
9761:
977#endif
978
979	msr	cpsr_fsxc, r4	/* Restore exceptions */
980	pop	{r4-r7}
981	bx	lr		/* End of icache_inv_user_range() */
982
983	/*
984	 * Make sure that literals are placed before the
985	 * thread_excp_vect_end label.
986	 */
987	.pool
988	.global thread_excp_vect_end
989thread_excp_vect_end:
990END_FUNC thread_excp_vect
991