xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 817466cb476de705a8e3dabe1ef165fe27a18c2f)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/thread_defs.h>
14#include <kernel/unwind.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20#include "thread_private.h"
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30LOCAL_FUNC vector_std_smc_entry , :
31UNWIND(	.fnstart)
32UNWIND(	.cantunwind)
33	push	{r0-r7}
34	mov	r0, sp
35	bl	thread_handle_std_smc
36	/*
37	 * Normally thread_handle_std_smc() should return via
38	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
39	 * hasn't switched stack (error detected) it will do a normal "C"
40	 * return.
41	 */
42	pop	{r1-r8}
43	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
44	smc	#0
45	b	.	/* SMC should not return */
46UNWIND(	.fnend)
47END_FUNC vector_std_smc_entry
48
49LOCAL_FUNC vector_fast_smc_entry , :
50UNWIND(	.fnstart)
51UNWIND(	.cantunwind)
52	push	{r0-r7}
53	mov	r0, sp
54	bl	thread_handle_fast_smc
55	pop	{r1-r8}
56	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
57	smc	#0
58	b	.	/* SMC should not return */
59UNWIND(	.fnend)
60END_FUNC vector_fast_smc_entry
61
62LOCAL_FUNC vector_fiq_entry , :
63UNWIND(	.fnstart)
64UNWIND(	.cantunwind)
65 	/* Secure Monitor received a FIQ and passed control to us. */
66	bl	thread_check_canaries
67	ldr	lr, =thread_nintr_handler_ptr
68 	ldr	lr, [lr]
69 	blx	lr
70	mov	r1, r0
71	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
72	smc	#0
73	b	.	/* SMC should not return */
74UNWIND(	.fnend)
75END_FUNC vector_fiq_entry
76
77LOCAL_FUNC vector_cpu_on_entry , :
78UNWIND(	.fnstart)
79UNWIND(	.cantunwind)
80	ldr	lr, =thread_cpu_on_handler_ptr
81	ldr	lr, [lr]
82	blx	lr
83	mov	r1, r0
84	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
85	smc	#0
86	b	.	/* SMC should not return */
87UNWIND(	.fnend)
88END_FUNC vector_cpu_on_entry
89
90LOCAL_FUNC vector_cpu_off_entry , :
91UNWIND(	.fnstart)
92UNWIND(	.cantunwind)
93	ldr	lr, =thread_cpu_off_handler_ptr
94	ldr	lr, [lr]
95	blx	lr
96	mov	r1, r0
97	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
98	smc	#0
99	b	.	/* SMC should not return */
100UNWIND(	.fnend)
101END_FUNC vector_cpu_off_entry
102
103LOCAL_FUNC vector_cpu_suspend_entry , :
104UNWIND(	.fnstart)
105UNWIND(	.cantunwind)
106	ldr	lr, =thread_cpu_suspend_handler_ptr
107	ldr	lr, [lr]
108	blx	lr
109	mov	r1, r0
110	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
111	smc	#0
112	b	.	/* SMC should not return */
113UNWIND(	.fnend)
114END_FUNC vector_cpu_suspend_entry
115
116LOCAL_FUNC vector_cpu_resume_entry , :
117UNWIND(	.fnstart)
118UNWIND(	.cantunwind)
119	ldr	lr, =thread_cpu_resume_handler_ptr
120	ldr	lr, [lr]
121	blx	lr
122	mov	r1, r0
123	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126UNWIND(	.fnend)
127END_FUNC vector_cpu_resume_entry
128
129LOCAL_FUNC vector_system_off_entry , :
130UNWIND(	.fnstart)
131UNWIND(	.cantunwind)
132	ldr	lr, =thread_system_off_handler_ptr
133	ldr	lr, [lr]
134	blx	lr
135	mov	r1, r0
136	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
137	smc	#0
138	b	.	/* SMC should not return */
139UNWIND(	.fnend)
140END_FUNC vector_system_off_entry
141
142LOCAL_FUNC vector_system_reset_entry , :
143UNWIND(	.fnstart)
144UNWIND(	.cantunwind)
145	ldr	lr, =thread_system_reset_handler_ptr
146	ldr	lr, [lr]
147	blx	lr
148	mov	r1, r0
149	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
150	smc	#0
151	b	.	/* SMC should not return */
152UNWIND(	.fnend)
153END_FUNC vector_system_reset_entry
154
155/*
156 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
157 * initialization.  Also used when compiled with the internal monitor, but
158 * the cpu_*_entry and system_*_entry are not used then.
159 *
160 * Note that ARM-TF depends on the layout of this vector table, any change
161 * in layout has to be synced with ARM-TF.
162 */
163FUNC thread_vector_table , :
164UNWIND(	.fnstart)
165UNWIND(	.cantunwind)
166	b	vector_std_smc_entry
167	b	vector_fast_smc_entry
168	b	vector_cpu_on_entry
169	b	vector_cpu_off_entry
170	b	vector_cpu_resume_entry
171	b	vector_cpu_suspend_entry
172	b	vector_fiq_entry
173	b	vector_system_off_entry
174	b	vector_system_reset_entry
175UNWIND(	.fnend)
176END_FUNC thread_vector_table
177KEEP_PAGER thread_vector_table
178
179FUNC thread_set_abt_sp , :
180UNWIND(	.fnstart)
181UNWIND(	.cantunwind)
182	mrs	r1, cpsr
183	cps	#CPSR_MODE_ABT
184	mov	sp, r0
185	msr	cpsr, r1
186	bx	lr
187UNWIND(	.fnend)
188END_FUNC thread_set_abt_sp
189
190FUNC thread_set_und_sp , :
191UNWIND(	.fnstart)
192UNWIND(	.cantunwind)
193	mrs	r1, cpsr
194	cps	#CPSR_MODE_UND
195	mov	sp, r0
196	msr	cpsr, r1
197	bx	lr
198UNWIND(	.fnend)
199END_FUNC thread_set_und_sp
200
201FUNC thread_set_irq_sp , :
202UNWIND(	.fnstart)
203UNWIND(	.cantunwind)
204	mrs	r1, cpsr
205	cps	#CPSR_MODE_IRQ
206	mov	sp, r0
207	msr	cpsr, r1
208	bx	lr
209UNWIND(	.fnend)
210END_FUNC thread_set_irq_sp
211
212FUNC thread_set_fiq_sp , :
213UNWIND(	.fnstart)
214UNWIND(	.cantunwind)
215	mrs	r1, cpsr
216	cps	#CPSR_MODE_FIQ
217	mov	sp, r0
218	msr	cpsr, r1
219	bx	lr
220UNWIND(	.fnend)
221END_FUNC thread_set_fiq_sp
222
223/* void thread_resume(struct thread_ctx_regs *regs) */
224FUNC thread_resume , :
225UNWIND(	.fnstart)
226UNWIND(	.cantunwind)
227	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
228
229	cps	#CPSR_MODE_SYS
230	ldm	r12!, {sp, lr}
231
232	cps	#CPSR_MODE_SVC
233	ldm	r12!, {r1, sp, lr}
234	msr	spsr_fsxc, r1
235
236	ldm	r12, {r1, r2}
237
238	/*
239	 * Switching to some other mode than SVC as we need to set spsr in
240	 * order to return into the old state properly and it may be SVC
241	 * mode we're returning to.
242	 */
243	cps	#CPSR_MODE_ABT
244	cmp_spsr_user_mode r2
245	mov	lr, r1
246	msr	spsr_fsxc, r2
247	ldm	r0, {r0-r12}
248	movnes	pc, lr
249	b	eret_to_user_mode
250UNWIND(	.fnend)
251END_FUNC thread_resume
252
253/*
254 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
255 * the banked r8-r12 registers, returns original CPSR.
256 */
257LOCAL_FUNC thread_save_state_fiq , :
258UNWIND(	.fnstart)
259UNWIND(	.cantunwind)
260	mov	r9, lr
261
262	/*
263	 * Uses stack for temporary storage, while storing needed
264	 * context in the thread context struct.
265	 */
266
267	mrs	r8, cpsr
268
269	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
270
271	push	{r4-r7}
272	push	{r0-r3}
273
274	mrs	r6, cpsr		/* Save current CPSR */
275
276	bl	thread_get_ctx_regs
277
278	pop	{r1-r4}			/* r0-r3 pushed above */
279	stm	r0!, {r1-r4}
280	pop	{r1-r4}			/* r4-r7 pushed above */
281	stm	r0!, {r1-r4}
282
283	cps     #CPSR_MODE_SYS
284	stm	r0!, {r8-r12}
285	stm     r0!, {sp, lr}
286
287	cps     #CPSR_MODE_SVC
288	mrs     r1, spsr
289	stm     r0!, {r1, sp, lr}
290
291	/* back to fiq mode */
292	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
293	msr	cpsr, r6		/* Restore mode */
294
295	mov	r0, r8			/* Return original CPSR */
296	bx	r9
297UNWIND(	.fnend)
298END_FUNC thread_save_state_fiq
299
300/*
301 * Disables IRQ and FIQ and saves state of thread, returns original
302 * CPSR.
303 */
304LOCAL_FUNC thread_save_state , :
305UNWIND(	.fnstart)
306UNWIND(	.cantunwind)
307	push	{r12, lr}
308	/*
309	 * Uses stack for temporary storage, while storing needed
310	 * context in the thread context struct.
311	 */
312
313	mrs	r12, cpsr
314
315	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
316
317	push	{r4-r7}
318	push	{r0-r3}
319
320	mov	r5, r12			/* Save CPSR in a preserved register */
321	mrs	r6, cpsr		/* Save current CPSR */
322
323	bl	thread_get_ctx_regs
324
325	pop	{r1-r4}			/* r0-r3 pushed above */
326	stm	r0!, {r1-r4}
327	pop	{r1-r4}			/* r4-r7 pushed above */
328	stm	r0!, {r1-r4}
329	stm	r0!, {r8-r11}
330
331	pop	{r12, lr}
332	stm	r0!, {r12}
333
334        cps     #CPSR_MODE_SYS
335        stm     r0!, {sp, lr}
336
337        cps     #CPSR_MODE_SVC
338        mrs     r1, spsr
339        stm     r0!, {r1, sp, lr}
340
341	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
342	msr	cpsr, r6		/* Restore mode */
343
344	mov	r0, r5			/* Return original CPSR */
345	bx	lr
346UNWIND(	.fnend)
347END_FUNC thread_save_state
348
349FUNC thread_std_smc_entry , :
350UNWIND(	.fnstart)
351UNWIND(	.cantunwind)
352	/* Pass r0-r7 in a struct thread_smc_args */
353	push	{r0-r7}
354	mov	r0, sp
355	bl	__thread_std_smc_entry
356	/*
357	 * Load the returned r0-r3 into preserved registers and skip the
358	 * "returned" r4-r7 since they will not be returned to normal
359	 * world.
360	 */
361	pop	{r4-r7}
362	add	sp, #(4 * 4)
363
364	/* Disable interrupts before switching to temporary stack */
365	cpsid	aif
366	bl	thread_get_tmp_sp
367	mov	sp, r0
368
369	bl	thread_state_free
370
371	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
372	mov	r1, r4
373	mov	r2, r5
374	mov	r3, r6
375	mov	r4, r7
376	smc	#0
377	b	.	/* SMC should not return */
378UNWIND(	.fnend)
379END_FUNC thread_std_smc_entry
380
381
382/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
383FUNC thread_rpc , :
384/*
385 * r0-r2 are used to pass parameters to normal world
386 * r0-r5 are used to pass return vaule back from normal world
387 *
388 * note that r3 is used to pass "resume information", that is, which
389 * thread it is that should resume.
390 *
391 * Since the this function is following AAPCS we need to preserve r4-r5
392 * which are otherwise modified when returning back from normal world.
393 */
394UNWIND(	.fnstart)
395	push	{r4-r5, lr}
396UNWIND(	.save	{r4-r5, lr})
397	push	{r0}
398UNWIND(	.save	{r0})
399
400	bl	thread_save_state
401	mov	r4, r0			/* Save original CPSR */
402
403	/*
404 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
405	 */
406	bl	thread_get_tmp_sp
407	ldr	r5, [sp]		/* Get pointer to rv[] */
408	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
409	mov	sp, r0			/* Switch to tmp stack */
410
411	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
412	mov	r1, r4			/* CPSR to restore */
413	ldr	r2, =.thread_rpc_return
414	bl	thread_state_suspend
415	mov	r4, r0			/* Supply thread index */
416	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
417	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
418	smc	#0
419	b	.	/* SMC should not return */
420
421.thread_rpc_return:
422	/*
423	 * At this point has the stack pointer been restored to the value
424	 * it had when thread_save_state() was called above.
425	 *
426	 * Jumps here from thread_resume above when RPC has returned. The
427	 * IRQ and FIQ bits are restored to what they where when this
428	 * function was originally entered.
429	 */
430	pop	{r12}			/* Get pointer to rv[] */
431	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
432	pop	{r4-r5, pc}
433UNWIND(	.fnend)
434END_FUNC thread_rpc
435KEEP_PAGER thread_rpc
436
437/*
438 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
439 *			    unsigned long a2, unsigned long a3)
440 */
441FUNC thread_smc , :
442UNWIND(	.fnstart)
443	smc	#0
444	bx	lr
445UNWIND(	.fnend)
446END_FUNC thread_smc
447
448FUNC thread_init_vbar , :
449UNWIND(	.fnstart)
450	/* Set vector (VBAR) */
451	write_vbar r0
452	bx	lr
453UNWIND(	.fnend)
454END_FUNC thread_init_vbar
455KEEP_PAGER thread_init_vbar
456
457/*
458 * Below are low level routines handling entry and return from user mode.
459 *
460 * thread_enter_user_mode() saves all that registers user mode can change
461 * so kernel mode can restore needed registers when resuming execution
462 * after the call to thread_enter_user_mode() has returned.
463 * thread_enter_user_mode() doesn't return directly since it enters user
464 * mode instead, it's thread_unwind_user_mode() that does the
465 * returning by restoring the registers saved by thread_enter_user_mode().
466 *
467 * There's three ways for thread_enter_user_mode() to return to caller,
468 * user TA calls utee_return, user TA calls utee_panic or through an abort.
469 *
470 * Calls to utee_return or utee_panic are handled as:
471 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
472 * calls syscall_return() or syscall_panic().
473 *
474 * These function calls returns normally except thread_svc_handler() which
475 * which is an exception handling routine so it reads return address and
476 * SPSR to restore from the stack. syscall_return() and syscall_panic()
477 * changes return address and SPSR used by thread_svc_handler() to instead of
478 * returning into user mode as with other syscalls it returns into
479 * thread_unwind_user_mode() in kernel mode instead.  When
480 * thread_svc_handler() returns the stack pointer at the point where
481 * thread_enter_user_mode() left it so this is where
482 * thread_unwind_user_mode() can operate.
483 *
484 * Aborts are handled in a similar way but by thread_abort_handler()
485 * instead, when the pager sees that it's an abort from user mode that
486 * can't be handled it updates SPSR and return address used by
487 * thread_abort_handler() to return into thread_unwind_user_mode()
488 * instead.
489 */
490
491/*
492 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
493 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
494 *               unsigned long user_func, unsigned long spsr,
495 *               uint32_t *exit_status0, uint32_t *exit_status1)
496 *
497 */
498FUNC __thread_enter_user_mode , :
499UNWIND(	.fnstart)
500UNWIND(	.cantunwind)
501	/*
502	 * Save all registers to allow syscall_return() to resume execution
503	 * as if this function would have returned. This is also used in
504	 * syscall_panic().
505	 *
506	 * If stack usage of this function is changed
507	 * thread_unwind_user_mode() has to be updated.
508	 */
509	push    {r4-r12,lr}
510
511	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
512	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
513	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
514
515	/*
516	 * Save old user sp and set new user sp.
517	 */
518	cps	#CPSR_MODE_SYS
519	mov	r7, sp
520	mov     sp, r4
521	cps	#CPSR_MODE_SVC
522	push	{r7,r8}
523
524	/* Prepare user mode entry via eret_to_user_mode */
525	cpsid	aif
526	msr     spsr_fsxc, r6
527	mov	lr, r5
528
529	b	eret_to_user_mode
530UNWIND(	.fnend)
531END_FUNC __thread_enter_user_mode
532
533/*
534 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
535 *              uint32_t exit_status1);
536 * See description in thread.h
537 */
538FUNC thread_unwind_user_mode , :
539UNWIND(	.fnstart)
540UNWIND(	.cantunwind)
541	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
542	str	r1, [ip]
543	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
544	str	r2, [ip]
545
546	/* Restore old user sp */
547	pop	{r4,r7}
548	cps	#CPSR_MODE_SYS
549	mov	sp, r4
550	cps	#CPSR_MODE_SVC
551
552	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
553UNWIND(	.fnend)
554END_FUNC thread_unwind_user_mode
555
556	.macro maybe_restore_mapping
557		/*
558		 * This macro is a bit hard to read due to all the ifdefs,
559		 * we're testing for two different configs which makes four
560		 * different combinations.
561		 *
562		 * - With LPAE, and then some extra code if with
563		 *   CFG_CORE_UNMAP_CORE_AT_EL0
564		 * - Without LPAE, and then some extra code if with
565		 *   CFG_CORE_UNMAP_CORE_AT_EL0
566		 */
567
568		/*
569		 * At this point we can't rely on any memory being writable
570		 * yet, so we're using TPIDRPRW to store r0, and if with
571		 * LPAE TPIDRURO to store r1 too.
572		 */
573		write_tpidrprw r0
574#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
575		write_tpidruro r1
576#endif
577
578#ifdef CFG_WITH_LPAE
579		read_ttbr0_64bit r0, r1
580		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
581		beq	11f
582
583#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
584		/*
585		 * Update the mapping to use the full kernel mode mapping.
586		 * Since the translation table could reside above 4GB we'll
587		 * have to use 64-bit arithmetics.
588		 */
589		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
590		sbc	r1, r1, #0
591#endif
592		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
593		write_ttbr0_64bit r0, r1
594		isb
595
596#else /*!CFG_WITH_LPAE*/
597		read_contextidr r0
598		tst	r0, #1
599		beq	11f
600
601		/* Update the mapping to use the full kernel mode mapping. */
602		bic	r0, r0, #1
603		write_contextidr r0
604		isb
605#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
606		read_ttbr1 r0
607		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
608		write_ttbr1 r0
609		isb
610#endif
611
612#endif /*!CFG_WITH_LPAE*/
613
614#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
615		ldr	r0, =thread_user_kcode_offset
616		ldr	r0, [r0]
617		read_vbar r1
618		add	r1, r1, r0
619		write_vbar r1
620		isb
621
622	11:	/*
623		 * The PC is adjusted unconditionally to guard against the
624		 * case there was an FIQ just before we did the "cpsid aif".
625		 */
626		ldr	r0, =22f
627		bx	r0
628	22:
629#else
630	11:
631#endif
632		read_tpidrprw r0
633#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
634		read_tpidruro r1
635#endif
636	.endm
637
638/* The handler of native interrupt. */
639.macro	native_intr_handler mode:req
640	cpsid	aif
641	maybe_restore_mapping
642
643	/*
644	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
645	 * address
646	 */
647	sub     lr, lr, #4
648
649	/*
650	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
651	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
652	 * because the secure monitor doesn't save those. The treatment of
653	 * the banked fiq registers is somewhat analogous to the lazy save
654	 * of VFP registers.
655	 */
656	.ifc	\mode\(),fiq
657	push	{r0-r3, r8-r12, lr}
658	.else
659	push	{r0-r3, r12, lr}
660	.endif
661
662	bl	thread_check_canaries
663	ldr	lr, =thread_nintr_handler_ptr
664	ldr	lr, [lr]
665	blx	lr
666
667	mrs	r0, spsr
668	cmp_spsr_user_mode r0
669
670	.ifc	\mode\(),fiq
671	pop	{r0-r3, r8-r12, lr}
672	.else
673	pop	{r0-r3, r12, lr}
674	.endif
675
676	movnes	pc, lr
677	b	eret_to_user_mode
678.endm
679
680/* The handler of foreign interrupt. */
681.macro foreign_intr_handler mode:req
682	cpsid	aif
683	maybe_restore_mapping
684
685	sub	lr, lr, #4
686	push	{lr}
687	push	{r12}
688
689	.ifc	\mode\(),fiq
690	bl	thread_save_state_fiq
691	.else
692	bl	thread_save_state
693	.endif
694
695	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
696	mrs	r1, spsr
697	pop	{r12}
698	pop	{r2}
699	blx	thread_state_suspend
700	mov	r4, r0		/* Supply thread index */
701
702	/*
703	 * Switch to SVC mode and copy current stack pointer as it already
704	 * is the tmp stack.
705	 */
706	mov	r0, sp
707	cps	#CPSR_MODE_SVC
708	mov	sp, r0
709
710	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
711	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
712	mov	r2, #0
713	mov	r3, #0
714	/* r4 is already filled in above */
715	smc	#0
716	b	.	/* SMC should not return */
717.endm
718
719	.section .text.thread_excp_vect
720        .align	5
721FUNC thread_excp_vect , :
722UNWIND(	.fnstart)
723UNWIND(	.cantunwind)
724	b	.			/* Reset			*/
725	b	thread_und_handler	/* Undefined instruction	*/
726	b	thread_svc_handler	/* System call			*/
727	b	thread_pabort_handler	/* Prefetch abort		*/
728	b	thread_dabort_handler	/* Data abort			*/
729	b	.			/* Reserved			*/
730	b	thread_irq_handler	/* IRQ				*/
731	b	thread_fiq_handler	/* FIQ				*/
732#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
733	.macro vector_prologue_spectre
734		/*
735		 * This depends on SP being 8 byte aligned, that is, the
736		 * lowest three bits in SP are zero.
737		 *
738		 * To avoid unexpected speculation we need to invalidate
739		 * the branch predictor before we do the first branch. It
740		 * doesn't matter if it's a conditional or an unconditional
741		 * branch speculation can still occur.
742		 *
743		 * The idea is to form a specific bit pattern in the lowest
744		 * three bits of SP depending on which entry in the vector
745		 * we enter via.  This is done by adding 1 to SP in each
746		 * entry but the last.
747		 */
748		add	sp, sp, #1	/* 7:111 Reset			*/
749		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
750		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
751		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
752		add	sp, sp, #1	/* 3:011 Data abort		*/
753		add	sp, sp, #1	/* 2:010 Reserved		*/
754		add	sp, sp, #1	/* 1:001 IRQ			*/
755		write_tpidrprw r0	/* 0:000 FIQ			*/
756	.endm
757
758        .align	5
759	.global thread_excp_vect_workaround_a15
760thread_excp_vect_workaround_a15:
761	vector_prologue_spectre
762	mrs	r0, spsr
763	cmp_spsr_user_mode r0
764	bne	1f
765	/*
766	 * Invalidate the branch predictor for the current processor.
767	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
768	 * effective.
769	 * Note that the BPIALL instruction is not effective in
770	 * invalidating the branch predictor on Cortex-A15. For that CPU,
771	 * set ACTLR[0] to 1 during early processor initialisation, and
772	 * invalidate the branch predictor by performing an ICIALLU
773	 * instruction. See also:
774	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
775	 */
776	write_iciallu
777	isb
778	b	1f
779
780        .align	5
781	.global thread_excp_vect_workaround
782thread_excp_vect_workaround:
783	vector_prologue_spectre
784	mrs	r0, spsr
785	cmp_spsr_user_mode r0
786	bne	1f
787	/* Invalidate the branch predictor for the current processor. */
788	write_bpiall
789	isb
790
7911:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
792	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
793	add	pc, pc, r0, LSL #3
794	nop
795
796	read_tpidrprw r0
797	b	thread_fiq_handler	/* FIQ				*/
798	read_tpidrprw r0
799	b	thread_irq_handler	/* IRQ				*/
800	read_tpidrprw r0
801	b	.			/* Reserved			*/
802	read_tpidrprw r0
803	b	thread_dabort_handler	/* Data abort			*/
804	read_tpidrprw r0
805	b	thread_pabort_handler	/* Prefetch abort		*/
806	read_tpidrprw r0
807	b	thread_svc_handler	/* System call			*/
808	read_tpidrprw r0
809	b	thread_und_handler	/* Undefined instruction	*/
810	read_tpidrprw r0
811	b	.			/* Reset			*/
812#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
813
814thread_und_handler:
815	cpsid	aif
816	maybe_restore_mapping
817	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
818	mrs	r1, spsr
819	tst	r1, #CPSR_T
820	subne	lr, lr, #2
821	subeq	lr, lr, #4
822	mov	r0, #ABORT_TYPE_UNDEF
823	b	thread_abort_common
824
825thread_dabort_handler:
826	cpsid	aif
827	maybe_restore_mapping
828	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
829	sub	lr, lr, #8
830	mov	r0, #ABORT_TYPE_DATA
831	b	thread_abort_common
832
833thread_pabort_handler:
834	cpsid	aif
835	maybe_restore_mapping
836	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
837	sub	lr, lr, #4
838	mov	r0, #ABORT_TYPE_PREFETCH
839
840thread_abort_common:
841	/*
842	 * At this label:
843	 * cpsr is in mode undef or abort
844	 * sp is still pointing to struct thread_core_local belonging to
845	 * this core.
846	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
847	 * {r2-r11, ip} are untouched.
848	 * r0 holds the first argument for abort_handler()
849	 */
850
851	/*
852	 * Update core local flags.
853	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
854	 */
855	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
856	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
857	orr	r1, r1, #THREAD_CLF_ABORT
858
859	/*
860	 * Select stack and update flags accordingly
861	 *
862	 * Normal case:
863	 * If the abort stack is unused select that.
864	 *
865	 * Fatal error handling:
866	 * If we're already using the abort stack as noted by bit
867	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
868	 * field we're selecting the temporary stack instead to be able to
869	 * make a stack trace of the abort in abort mode.
870	 *
871	 * r1 is initialized as a temporary stack pointer until we've
872	 * switched to system mode.
873	 */
874	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
875	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
876	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
877	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
878	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
879
880	/*
881	 * Store registers on stack fitting struct thread_abort_regs
882	 * start from the end of the struct
883	 * {r2-r11, ip}
884	 * Load content of previously saved {r0-r1} and stores
885	 * it up to the pad field.
886	 * After this is only {usr_sp, usr_lr} missing in the struct
887	 */
888	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
889	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
890	/* Push the original {r0-r1} on the selected stack */
891	stmdb	r1!, {r2-r3}
892	mrs	r3, spsr
893	/* Push {pad, spsr, elr} on the selected stack */
894	stmdb	r1!, {r2, r3, lr}
895
896	cps	#CPSR_MODE_SYS
897	str	lr, [r1, #-4]!
898	str	sp, [r1, #-4]!
899	mov	sp, r1
900
901	bl	abort_handler
902
903	mov	ip, sp
904	ldr	sp, [ip], #4
905	ldr	lr, [ip], #4
906
907	/*
908	 * Even if we entered via CPSR_MODE_UND, we are returning via
909	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
910	 * here.
911	 */
912	cps	#CPSR_MODE_ABT
913	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
914	msr	spsr_fsxc, r1
915
916	/* Update core local flags */
917	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
918	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
919	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
920
921	cmp_spsr_user_mode r1
922	ldm	ip, {r0-r11, ip}
923	movnes	pc, lr
924	b	eret_to_user_mode
925	/* end thread_abort_common */
926
927thread_svc_handler:
928	cpsid	aif
929
930	maybe_restore_mapping
931
932	push	{r0-r7, lr}
933	mrs	r0, spsr
934	push	{r0}
935	mov	r0, sp
936	bl	tee_svc_handler
937	cpsid	aif	/* In case something was unmasked */
938	pop	{r0}
939	msr	spsr_fsxc, r0
940	cmp_spsr_user_mode r0
941	pop	{r0-r7, lr}
942	movnes	pc, lr
943	b	eret_to_user_mode
944	/* end thread_svc_handler */
945
946thread_fiq_handler:
947#if defined(CFG_ARM_GICV3)
948	foreign_intr_handler	fiq
949#else
950	native_intr_handler	fiq
951#endif
952	/* end thread_fiq_handler */
953
954thread_irq_handler:
955#if defined(CFG_ARM_GICV3)
956	native_intr_handler	irq
957#else
958	foreign_intr_handler	irq
959#endif
960	/* end thread_irq_handler */
961
962	/*
963	 * Returns to user mode.
964	 * Expects to be jumped to with lr pointing to the user space
965	 * address to jump to and spsr holding the desired cpsr. Async
966	 * abort, irq and fiq should be masked.
967	 */
968eret_to_user_mode:
969	write_tpidrprw r0
970#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
971	write_tpidruro r1
972#endif
973
974#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
975	ldr	r0, =thread_user_kcode_offset
976	ldr	r0, [r0]
977	read_vbar r1
978	sub	r1, r1, r0
979	write_vbar r1
980	isb
981
982	/* Jump into the reduced mapping before the full mapping is removed */
983	ldr	r1, =1f
984	sub	r1, r1, r0
985	bx	r1
9861:
987#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
988
989#ifdef CFG_WITH_LPAE
990	read_ttbr0_64bit r0, r1
991#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
992	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
993#endif
994	/* switch to user ASID */
995	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
996	write_ttbr0_64bit r0, r1
997	isb
998#else /*!CFG_WITH_LPAE*/
999#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1000	read_ttbr1 r0
1001	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1002	write_ttbr1 r0
1003	isb
1004#endif
1005	read_contextidr r0
1006	orr	r0, r0, #BIT(0)
1007	write_contextidr r0
1008	isb
1009#endif /*!CFG_WITH_LPAE*/
1010
1011	read_tpidrprw r0
1012#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1013	read_tpidruro r1
1014#endif
1015
1016	movs	pc, lr
1017UNWIND(	.fnend)
1018	.global thread_excp_vect_end
1019thread_excp_vect_end:
1020END_FUNC thread_excp_vect
1021