xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision fb7ef469dfeb735e60383ad0e7410fe62dd97eb1)
1/*
2 * Copyright (c) 2016-2017, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm32_macros.S>
30#include <arm.h>
31#include <asm-defines.h>
32#include <asm.S>
33#include <keep.h>
34#include <kernel/abort.h>
35#include <kernel/thread_defs.h>
36#include <kernel/unwind.h>
37#include <mm/core_mmu.h>
38#include <sm/optee_smc.h>
39#include <sm/teesmc_opteed.h>
40#include <sm/teesmc_opteed_macros.h>
41
42#include "thread_private.h"
43
44	.macro cmp_spsr_user_mode reg:req
45		/*
46		 * We're only testing the lower 4 bits as bit 5 (0x10)
47		 * always is set.
48		 */
49		tst	\reg, #0x0f
50	.endm
51
52LOCAL_FUNC vector_std_smc_entry , :
53UNWIND(	.fnstart)
54UNWIND(	.cantunwind)
55	push	{r0-r7}
56	mov	r0, sp
57	bl	thread_handle_std_smc
58	/*
59	 * Normally thread_handle_std_smc() should return via
60	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
61	 * hasn't switched stack (error detected) it will do a normal "C"
62	 * return.
63	 */
64	pop	{r1-r8}
65	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
66	smc	#0
67	b	.	/* SMC should not return */
68UNWIND(	.fnend)
69END_FUNC vector_std_smc_entry
70
71LOCAL_FUNC vector_fast_smc_entry , :
72UNWIND(	.fnstart)
73UNWIND(	.cantunwind)
74	push	{r0-r7}
75	mov	r0, sp
76	bl	thread_handle_fast_smc
77	pop	{r1-r8}
78	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
79	smc	#0
80	b	.	/* SMC should not return */
81UNWIND(	.fnend)
82END_FUNC vector_fast_smc_entry
83
84LOCAL_FUNC vector_fiq_entry , :
85UNWIND(	.fnstart)
86UNWIND(	.cantunwind)
87 	/* Secure Monitor received a FIQ and passed control to us. */
88	bl	thread_check_canaries
89	ldr	lr, =thread_nintr_handler_ptr
90 	ldr	lr, [lr]
91 	blx	lr
92	mov	r1, r0
93	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
94	smc	#0
95	b	.	/* SMC should not return */
96UNWIND(	.fnend)
97END_FUNC vector_fiq_entry
98
99LOCAL_FUNC vector_cpu_on_entry , :
100UNWIND(	.fnstart)
101UNWIND(	.cantunwind)
102	ldr	lr, =thread_cpu_on_handler_ptr
103	ldr	lr, [lr]
104	blx	lr
105	mov	r1, r0
106	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
107	smc	#0
108	b	.	/* SMC should not return */
109UNWIND(	.fnend)
110END_FUNC vector_cpu_on_entry
111
112LOCAL_FUNC vector_cpu_off_entry , :
113UNWIND(	.fnstart)
114UNWIND(	.cantunwind)
115	ldr	lr, =thread_cpu_off_handler_ptr
116	ldr	lr, [lr]
117	blx	lr
118	mov	r1, r0
119	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122UNWIND(	.fnend)
123END_FUNC vector_cpu_off_entry
124
125LOCAL_FUNC vector_cpu_suspend_entry , :
126UNWIND(	.fnstart)
127UNWIND(	.cantunwind)
128	ldr	lr, =thread_cpu_suspend_handler_ptr
129	ldr	lr, [lr]
130	blx	lr
131	mov	r1, r0
132	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
133	smc	#0
134	b	.	/* SMC should not return */
135UNWIND(	.fnend)
136END_FUNC vector_cpu_suspend_entry
137
138LOCAL_FUNC vector_cpu_resume_entry , :
139UNWIND(	.fnstart)
140UNWIND(	.cantunwind)
141	ldr	lr, =thread_cpu_resume_handler_ptr
142	ldr	lr, [lr]
143	blx	lr
144	mov	r1, r0
145	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
146	smc	#0
147	b	.	/* SMC should not return */
148UNWIND(	.fnend)
149END_FUNC vector_cpu_resume_entry
150
151LOCAL_FUNC vector_system_off_entry , :
152UNWIND(	.fnstart)
153UNWIND(	.cantunwind)
154	ldr	lr, =thread_system_off_handler_ptr
155	ldr	lr, [lr]
156	blx	lr
157	mov	r1, r0
158	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
159	smc	#0
160	b	.	/* SMC should not return */
161UNWIND(	.fnend)
162END_FUNC vector_system_off_entry
163
164LOCAL_FUNC vector_system_reset_entry , :
165UNWIND(	.fnstart)
166UNWIND(	.cantunwind)
167	ldr	lr, =thread_system_reset_handler_ptr
168	ldr	lr, [lr]
169	blx	lr
170	mov	r1, r0
171	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
172	smc	#0
173	b	.	/* SMC should not return */
174UNWIND(	.fnend)
175END_FUNC vector_system_reset_entry
176
177/*
178 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
179 * initialization.  Also used when compiled with the internal monitor, but
180 * the cpu_*_entry and system_*_entry are not used then.
181 *
182 * Note that ARM-TF depends on the layout of this vector table, any change
183 * in layout has to be synced with ARM-TF.
184 */
185FUNC thread_vector_table , :
186UNWIND(	.fnstart)
187UNWIND(	.cantunwind)
188	b	vector_std_smc_entry
189	b	vector_fast_smc_entry
190	b	vector_cpu_on_entry
191	b	vector_cpu_off_entry
192	b	vector_cpu_resume_entry
193	b	vector_cpu_suspend_entry
194	b	vector_fiq_entry
195	b	vector_system_off_entry
196	b	vector_system_reset_entry
197UNWIND(	.fnend)
198END_FUNC thread_vector_table
199KEEP_PAGER thread_vector_table
200
201FUNC thread_set_abt_sp , :
202UNWIND(	.fnstart)
203UNWIND(	.cantunwind)
204	mrs	r1, cpsr
205	cps	#CPSR_MODE_ABT
206	mov	sp, r0
207	msr	cpsr, r1
208	bx	lr
209UNWIND(	.fnend)
210END_FUNC thread_set_abt_sp
211
212FUNC thread_set_und_sp , :
213UNWIND(	.fnstart)
214UNWIND(	.cantunwind)
215	mrs	r1, cpsr
216	cps	#CPSR_MODE_UND
217	mov	sp, r0
218	msr	cpsr, r1
219	bx	lr
220UNWIND(	.fnend)
221END_FUNC thread_set_und_sp
222
223FUNC thread_set_irq_sp , :
224UNWIND(	.fnstart)
225UNWIND(	.cantunwind)
226	mrs	r1, cpsr
227	cps	#CPSR_MODE_IRQ
228	mov	sp, r0
229	msr	cpsr, r1
230	bx	lr
231UNWIND(	.fnend)
232END_FUNC thread_set_irq_sp
233
234FUNC thread_set_fiq_sp , :
235UNWIND(	.fnstart)
236UNWIND(	.cantunwind)
237	mrs	r1, cpsr
238	cps	#CPSR_MODE_FIQ
239	mov	sp, r0
240	msr	cpsr, r1
241	bx	lr
242UNWIND(	.fnend)
243END_FUNC thread_set_fiq_sp
244
245/* void thread_resume(struct thread_ctx_regs *regs) */
246FUNC thread_resume , :
247UNWIND(	.fnstart)
248UNWIND(	.cantunwind)
249	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
250
251	cps	#CPSR_MODE_SYS
252	ldm	r12!, {sp, lr}
253
254	cps	#CPSR_MODE_SVC
255	ldm	r12!, {r1, sp, lr}
256	msr	spsr_fsxc, r1
257
258	ldm	r12, {r1, r2}
259
260	/*
261	 * Switching to some other mode than SVC as we need to set spsr in
262	 * order to return into the old state properly and it may be SVC
263	 * mode we're returning to.
264	 */
265	cps	#CPSR_MODE_ABT
266	cmp_spsr_user_mode r2
267	mov	lr, r1
268	msr	spsr_fsxc, r2
269	ldm	r0, {r0-r12}
270	movnes	pc, lr
271	b	eret_to_user_mode
272UNWIND(	.fnend)
273END_FUNC thread_resume
274
275/*
276 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
277 * the banked r8-r12 registers, returns original CPSR.
278 */
279LOCAL_FUNC thread_save_state_fiq , :
280UNWIND(	.fnstart)
281UNWIND(	.cantunwind)
282	mov	r9, lr
283
284	/*
285	 * Uses stack for temporary storage, while storing needed
286	 * context in the thread context struct.
287	 */
288
289	mrs	r8, cpsr
290
291	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
292
293	push	{r4-r7}
294	push	{r0-r3}
295
296	mrs	r6, cpsr		/* Save current CPSR */
297
298	bl	thread_get_ctx_regs
299
300	pop	{r1-r4}			/* r0-r3 pushed above */
301	stm	r0!, {r1-r4}
302	pop	{r1-r4}			/* r4-r7 pushed above */
303	stm	r0!, {r1-r4}
304
305	cps     #CPSR_MODE_SYS
306	stm	r0!, {r8-r12}
307	stm     r0!, {sp, lr}
308
309	cps     #CPSR_MODE_SVC
310	mrs     r1, spsr
311	stm     r0!, {r1, sp, lr}
312
313	/* back to fiq mode */
314	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
315	msr	cpsr, r6		/* Restore mode */
316
317	mov	r0, r8			/* Return original CPSR */
318	bx	r9
319UNWIND(	.fnend)
320END_FUNC thread_save_state_fiq
321
322/*
323 * Disables IRQ and FIQ and saves state of thread, returns original
324 * CPSR.
325 */
326LOCAL_FUNC thread_save_state , :
327UNWIND(	.fnstart)
328UNWIND(	.cantunwind)
329	push	{r12, lr}
330	/*
331	 * Uses stack for temporary storage, while storing needed
332	 * context in the thread context struct.
333	 */
334
335	mrs	r12, cpsr
336
337	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
338
339	push	{r4-r7}
340	push	{r0-r3}
341
342	mov	r5, r12			/* Save CPSR in a preserved register */
343	mrs	r6, cpsr		/* Save current CPSR */
344
345	bl	thread_get_ctx_regs
346
347	pop	{r1-r4}			/* r0-r3 pushed above */
348	stm	r0!, {r1-r4}
349	pop	{r1-r4}			/* r4-r7 pushed above */
350	stm	r0!, {r1-r4}
351	stm	r0!, {r8-r11}
352
353	pop	{r12, lr}
354	stm	r0!, {r12}
355
356        cps     #CPSR_MODE_SYS
357        stm     r0!, {sp, lr}
358
359        cps     #CPSR_MODE_SVC
360        mrs     r1, spsr
361        stm     r0!, {r1, sp, lr}
362
363	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
364	msr	cpsr, r6		/* Restore mode */
365
366	mov	r0, r5			/* Return original CPSR */
367	bx	lr
368UNWIND(	.fnend)
369END_FUNC thread_save_state
370
371FUNC thread_std_smc_entry , :
372UNWIND(	.fnstart)
373UNWIND(	.cantunwind)
374	/* Pass r0-r7 in a struct thread_smc_args */
375	push	{r0-r7}
376	mov	r0, sp
377	bl	__thread_std_smc_entry
378	/*
379	 * Load the returned r0-r3 into preserved registers and skip the
380	 * "returned" r4-r7 since they will not be returned to normal
381	 * world.
382	 */
383	pop	{r4-r7}
384	add	sp, #(4 * 4)
385
386	/* Disable interrupts before switching to temporary stack */
387	cpsid	aif
388	bl	thread_get_tmp_sp
389	mov	sp, r0
390
391	bl	thread_state_free
392
393	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
394	mov	r1, r4
395	mov	r2, r5
396	mov	r3, r6
397	mov	r4, r7
398	smc	#0
399	b	.	/* SMC should not return */
400UNWIND(	.fnend)
401END_FUNC thread_std_smc_entry
402
403
404/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
405FUNC thread_rpc , :
406/*
407 * r0-r2 are used to pass parameters to normal world
408 * r0-r5 are used to pass return vaule back from normal world
409 *
410 * note that r3 is used to pass "resume information", that is, which
411 * thread it is that should resume.
412 *
413 * Since the this function is following AAPCS we need to preserve r4-r5
414 * which are otherwise modified when returning back from normal world.
415 */
416UNWIND(	.fnstart)
417	push	{r4-r5, lr}
418UNWIND(	.save	{r4-r5, lr})
419	push	{r0}
420UNWIND(	.save	{r0})
421
422	bl	thread_save_state
423	mov	r4, r0			/* Save original CPSR */
424
425	/*
426 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
427	 */
428	bl	thread_get_tmp_sp
429	ldr	r5, [sp]		/* Get pointer to rv[] */
430	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
431	mov	sp, r0			/* Switch to tmp stack */
432
433	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
434	mov	r1, r4			/* CPSR to restore */
435	ldr	r2, =.thread_rpc_return
436	bl	thread_state_suspend
437	mov	r4, r0			/* Supply thread index */
438	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
439	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
440	smc	#0
441	b	.	/* SMC should not return */
442
443.thread_rpc_return:
444	/*
445	 * At this point has the stack pointer been restored to the value
446	 * it had when thread_save_state() was called above.
447	 *
448	 * Jumps here from thread_resume above when RPC has returned. The
449	 * IRQ and FIQ bits are restored to what they where when this
450	 * function was originally entered.
451	 */
452	pop	{r12}			/* Get pointer to rv[] */
453	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
454	pop	{r4-r5, pc}
455UNWIND(	.fnend)
456END_FUNC thread_rpc
457KEEP_PAGER thread_rpc
458
459FUNC thread_init_vbar , :
460UNWIND(	.fnstart)
461	/* Set vector (VBAR) */
462	ldr	r0, =thread_vect_table
463	write_vbar r0
464	bx	lr
465UNWIND(	.fnend)
466END_FUNC thread_init_vbar
467KEEP_PAGER thread_init_vbar
468
469/*
470 * Below are low level routines handling entry and return from user mode.
471 *
472 * thread_enter_user_mode() saves all that registers user mode can change
473 * so kernel mode can restore needed registers when resuming execution
474 * after the call to thread_enter_user_mode() has returned.
475 * thread_enter_user_mode() doesn't return directly since it enters user
476 * mode instead, it's thread_unwind_user_mode() that does the
477 * returning by restoring the registers saved by thread_enter_user_mode().
478 *
479 * There's three ways for thread_enter_user_mode() to return to caller,
480 * user TA calls utee_return, user TA calls utee_panic or through an abort.
481 *
482 * Calls to utee_return or utee_panic are handled as:
483 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
484 * calls syscall_return() or syscall_panic().
485 *
486 * These function calls returns normally except thread_svc_handler() which
487 * which is an exception handling routine so it reads return address and
488 * SPSR to restore from the stack. syscall_return() and syscall_panic()
489 * changes return address and SPSR used by thread_svc_handler() to instead of
490 * returning into user mode as with other syscalls it returns into
491 * thread_unwind_user_mode() in kernel mode instead.  When
492 * thread_svc_handler() returns the stack pointer at the point where
493 * thread_enter_user_mode() left it so this is where
494 * thread_unwind_user_mode() can operate.
495 *
496 * Aborts are handled in a similar way but by thread_abort_handler()
497 * instead, when the pager sees that it's an abort from user mode that
498 * can't be handled it updates SPSR and return address used by
499 * thread_abort_handler() to return into thread_unwind_user_mode()
500 * instead.
501 */
502
503/*
504 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
505 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
506 *               unsigned long user_func, unsigned long spsr,
507 *               uint32_t *exit_status0, uint32_t *exit_status1)
508 *
509 */
510FUNC __thread_enter_user_mode , :
511UNWIND(	.fnstart)
512UNWIND(	.cantunwind)
513	/*
514	 * Save all registers to allow syscall_return() to resume execution
515	 * as if this function would have returned. This is also used in
516	 * syscall_panic().
517	 *
518	 * If stack usage of this function is changed
519	 * thread_unwind_user_mode() has to be updated.
520	 */
521	push    {r4-r12,lr}
522
523	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
524	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
525	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
526
527	/*
528	 * Save old user sp and set new user sp.
529	 */
530	cps	#CPSR_MODE_SYS
531	mov	r7, sp
532	mov     sp, r4
533	cps	#CPSR_MODE_SVC
534	push	{r7,r8}
535
536	/* Prepare user mode entry via eret_to_user_mode */
537	cpsid	aif
538	cps	#CPSR_MODE_ABT
539	msr     spsr_fsxc, r6
540	mov	lr, r5
541
542	b	eret_to_user_mode
543UNWIND(	.fnend)
544END_FUNC __thread_enter_user_mode
545
546/*
547 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
548 *              uint32_t exit_status1);
549 * See description in thread.h
550 */
551FUNC thread_unwind_user_mode , :
552UNWIND(	.fnstart)
553UNWIND(	.cantunwind)
554	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
555	str	r1, [ip]
556	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
557	str	r2, [ip]
558
559	/* Restore old user sp */
560	pop	{r4,r7}
561	cps	#CPSR_MODE_SYS
562	mov	sp, r4
563	cps	#CPSR_MODE_SVC
564
565	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
566UNWIND(	.fnend)
567END_FUNC thread_unwind_user_mode
568
569	.macro maybe_restore_mapping
570		/*
571		 * This macro is a bit hard to read due to all the ifdefs,
572		 * we're testing for two different configs which makes four
573		 * different combinations.
574		 *
575		 * - With LPAE, and then some extra code if with
576		 *   CFG_CORE_UNMAP_CORE_AT_EL0
577		 * - Without LPAE, and then some extra code if with
578		 *   CFG_CORE_UNMAP_CORE_AT_EL0
579		 */
580
581		/*
582		 * At this point we can't rely on any memory being writable
583		 * yet, so we're using TPIDRPRW to store r0, and if with
584		 * LPAE TPIDRURO to store r1 too.
585		 */
586		write_tpidrprw r0
587
588#ifdef CFG_WITH_LPAE
589		write_tpidruro r1
590
591		read_ttbr0_64bit r0, r1
592		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
593		beq	11f
594
595#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
596		/*
597		 * Update the mapping to use the full kernel mode mapping.
598		 * Since the translation table could reside above 4GB we'll
599		 * have to use 64-bit arithmetics.
600		 */
601		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
602		sbc	r1, r1, #0
603#endif
604		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
605		write_ttbr0_64bit r0, r1
606		isb
607
608#else /*!CFG_WITH_LPAE*/
609		read_contextidr r0
610		tst	r0, #1
611		beq	11f
612
613		/* Update the mapping to use the full kernel mode mapping. */
614		bic	r0, r0, #1
615		write_contextidr r0
616		isb
617#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
618		read_ttbr1 r0
619		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
620		write_ttbr1 r0
621		isb
622#endif
623
624#endif /*!CFG_WITH_LPAE*/
625
626#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
627		ldr	r0, =thread_vect_table
628		write_vbar r0
629		isb
630
631	11:	/*
632		 * The PC is adjusted unconditionally to guard against the
633		 * case there was an FIQ just before we did the "cpsid aif".
634		 */
635		ldr	r0, =22f
636		bx	r0
637	22:
638#else
639	11:
640#endif
641		read_tpidrprw r0
642#ifdef CFG_WITH_LPAE
643		read_tpidruro r1
644#endif
645	.endm
646
647/* The handler of native interrupt. */
648.macro	native_intr_handler mode:req
649	cpsid	aif
650	maybe_restore_mapping
651
652	/*
653	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
654	 * address
655	 */
656	sub     lr, lr, #4
657
658	/*
659	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
660	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
661	 * because the secure monitor doesn't save those. The treatment of
662	 * the banked fiq registers is somewhat analogous to the lazy save
663	 * of VFP registers.
664	 */
665	push	{r0-r3}
666	.ifc	\mode\(),fiq
667	push	{r8-r12, lr}
668	.else
669	push	{r12, lr}
670	.endif
671
672	bl	thread_check_canaries
673	ldr	lr, =thread_nintr_handler_ptr
674	ldr	lr, [lr]
675	blx	lr
676
677	.ifc	\mode\(),fiq
678	pop	{r8-r12, lr}
679	.else
680	pop	{r12, lr}
681	.endif
682
683	mov	r0, sp
684	mrs	r1, spsr
685	mov	r2, lr
686	add	sp, sp, #(4 * 4)
687	cps	#CPSR_MODE_ABT
688	cmp_spsr_user_mode r1
689	msr	spsr_fsxc, r1
690	mov	lr, r2
691	ldm	r0, {r0-r3}
692	movnes	pc, lr
693	b	eret_to_user_mode
694.endm
695
696/* The handler of foreign interrupt. */
697.macro foreign_intr_handler mode:req
698	cpsid	aif
699	maybe_restore_mapping
700
701	sub	lr, lr, #4
702	push	{lr}
703	push	{r12}
704
705	.ifc	\mode\(),fiq
706	bl	thread_save_state_fiq
707	.else
708	bl	thread_save_state
709	.endif
710
711	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
712	mrs	r1, spsr
713	pop	{r12}
714	pop	{r2}
715	blx	thread_state_suspend
716	mov	r4, r0		/* Supply thread index */
717
718	/*
719	 * Switch to SVC mode and copy current stack pointer as it already
720	 * is the tmp stack.
721	 */
722	mov	r0, sp
723	cps	#CPSR_MODE_SVC
724	mov	sp, r0
725
726	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
727	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
728	mov	r2, #0
729	mov	r3, #0
730	/* r4 is already filled in above */
731	smc	#0
732	b	.	/* SMC should not return */
733.endm
734
735	.section .text.thread_vect_table
736        .align	5
737FUNC thread_vect_table , :
738UNWIND(	.fnstart)
739UNWIND(	.cantunwind)
740	b	.			/* Reset			*/
741	b	thread_und_handler	/* Undefined instruction	*/
742	b	thread_svc_handler	/* System call			*/
743	b	thread_pabort_handler	/* Prefetch abort		*/
744	b	thread_dabort_handler	/* Data abort			*/
745	b	.			/* Reserved			*/
746	b	thread_irq_handler	/* IRQ				*/
747	b	thread_fiq_handler	/* FIQ				*/
748
749thread_und_handler:
750	cpsid	aif
751	maybe_restore_mapping
752	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
753	mrs	r1, spsr
754	tst	r1, #CPSR_T
755	subne	lr, lr, #2
756	subeq	lr, lr, #4
757	mov	r0, #ABORT_TYPE_UNDEF
758	b	thread_abort_common
759
760thread_dabort_handler:
761	cpsid	aif
762	maybe_restore_mapping
763	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
764	sub	lr, lr, #8
765	mov	r0, #ABORT_TYPE_DATA
766	b	thread_abort_common
767
768thread_pabort_handler:
769	cpsid	aif
770	maybe_restore_mapping
771	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
772	sub	lr, lr, #4
773	mov	r0, #ABORT_TYPE_PREFETCH
774
775thread_abort_common:
776	/*
777	 * At this label:
778	 * cpsr is in mode undef or abort
779	 * sp is still pointing to struct thread_core_local belonging to
780	 * this core.
781	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
782	 * {r2-r11, ip} are untouched.
783	 * r0 holds the first argument for abort_handler()
784	 */
785
786	/*
787	 * Update core local flags.
788	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
789	 */
790	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
791	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
792	orr	r1, r1, #THREAD_CLF_ABORT
793
794	/*
795	 * Select stack and update flags accordingly
796	 *
797	 * Normal case:
798	 * If the abort stack is unused select that.
799	 *
800	 * Fatal error handling:
801	 * If we're already using the abort stack as noted by bit
802	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
803	 * field we're selecting the temporary stack instead to be able to
804	 * make a stack trace of the abort in abort mode.
805	 *
806	 * r1 is initialized as a temporary stack pointer until we've
807	 * switched to system mode.
808	 */
809	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
810	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
811	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
812	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
813	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
814
815	/*
816	 * Store registers on stack fitting struct thread_abort_regs
817	 * start from the end of the struct
818	 * {r2-r11, ip}
819	 * Load content of previously saved {r0-r1} and stores
820	 * it up to the pad field.
821	 * After this is only {usr_sp, usr_lr} missing in the struct
822	 */
823	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
824	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
825	/* Push the original {r0-r1} on the selected stack */
826	stmdb	r1!, {r2-r3}
827	mrs	r3, spsr
828	/* Push {pad, spsr, elr} on the selected stack */
829	stmdb	r1!, {r2, r3, lr}
830
831	cps	#CPSR_MODE_SYS
832	str	lr, [r1, #-4]!
833	str	sp, [r1, #-4]!
834	mov	sp, r1
835
836	bl	abort_handler
837
838	mov	ip, sp
839	ldr	sp, [ip], #4
840	ldr	lr, [ip], #4
841
842	/*
843	 * Even if we entered via CPSR_MODE_UND, we are returning via
844	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
845	 * here.
846	 */
847	cps	#CPSR_MODE_ABT
848	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
849	msr	spsr_fsxc, r1
850
851	/* Update core local flags */
852	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
853	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
854	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
855
856	cmp_spsr_user_mode r1
857	ldm	ip, {r0-r11, ip}
858	movnes	pc, lr
859	b	eret_to_user_mode
860	/* end thread_abort_common */
861
862thread_svc_handler:
863	cpsid	aif
864
865	maybe_restore_mapping
866
867	push	{r0-r7, lr}
868	mrs	r0, spsr
869	push	{r0}
870	mov	r0, sp
871	bl	tee_svc_handler
872	cpsid	aif	/* In case something was unmasked */
873	/* Use ip instead of stack pointer as we need to switch mode. */
874	mov	ip, sp
875	add	sp, #(4 * 10)
876	cps	#CPSR_MODE_ABT
877	ldr	r0, [ip], #4
878	msr	spsr_fsxc, r0
879	cmp_spsr_user_mode r0
880	ldm	ip, {r0-r7, lr}
881	movnes	pc, lr
882	b	eret_to_user_mode
883	/* end thread_svc_handler */
884
885thread_fiq_handler:
886#if defined(CFG_ARM_GICV3)
887	foreign_intr_handler	fiq
888#else
889	native_intr_handler	fiq
890#endif
891	/* end thread_fiq_handler */
892
893thread_irq_handler:
894#if defined(CFG_ARM_GICV3)
895	native_intr_handler	irq
896#else
897	foreign_intr_handler	irq
898#endif
899	/* end thread_irq_handler */
900
901	/*
902	 * Returns to user mode.
903	 * Expects to be jumped to with lr pointing to the user space
904	 * address to jump to and spsr holding the desired cpsr. Async
905	 * abort, irq and fiq should be masked.
906	 */
907eret_to_user_mode:
908	write_tpidrprw r0
909#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
910	write_tpidruro r1
911#endif
912
913#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
914	ldr	r0, =thread_user_kcode_offset
915	ldr	r0, [r0]
916	adr	r1, thread_vect_table
917	sub	r1, r1, r0
918	write_vbar r1
919	isb
920
921	/* Jump into the reduced mapping before the full mapping is removed */
922	ldr	r1, =1f
923	sub	r1, r1, r0
924	bx	r1
9251:
926#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
927
928#ifdef CFG_WITH_LPAE
929	read_ttbr0_64bit r0, r1
930#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
931	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
932#endif
933	/* switch to user ASID */
934	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
935	write_ttbr0_64bit r0, r1
936	isb
937#else /*!CFG_WITH_LPAE*/
938#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
939	read_ttbr1 r0
940	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
941	write_ttbr1 r0
942	isb
943#endif
944	read_contextidr r0
945	orr	r0, r0, #BIT(0)
946	write_contextidr r0
947	isb
948#endif /*!CFG_WITH_LPAE*/
949
950	read_tpidrprw r0
951#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
952	read_tpidruro r1
953#endif
954
955	movs	pc, lr
956UNWIND(	.fnend)
957END_FUNC thread_vect_table
958