xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision b1469ba0bfd0371eb52bd50f5c52eeda7a8f5f1e)
1/*
2 * Copyright (c) 2016-2017, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm32_macros.S>
30#include <arm.h>
31#include <asm-defines.h>
32#include <asm.S>
33#include <keep.h>
34#include <kernel/abort.h>
35#include <kernel/thread_defs.h>
36#include <kernel/unwind.h>
37#include <sm/optee_smc.h>
38#include <sm/teesmc_opteed.h>
39#include <sm/teesmc_opteed_macros.h>
40
41#include "thread_private.h"
42
43	.section .text.thread_asm
44
45LOCAL_FUNC vector_std_smc_entry , :
46UNWIND(	.fnstart)
47UNWIND(	.cantunwind)
48	push	{r0-r7}
49	mov	r0, sp
50	bl	thread_handle_std_smc
51	/*
52	 * Normally thread_handle_std_smc() should return via
53	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
54	 * hasn't switched stack (error detected) it will do a normal "C"
55	 * return.
56	 */
57	pop	{r1-r8}
58	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61UNWIND(	.fnend)
62END_FUNC vector_std_smc_entry
63
64LOCAL_FUNC vector_fast_smc_entry , :
65UNWIND(	.fnstart)
66UNWIND(	.cantunwind)
67	push	{r0-r7}
68	mov	r0, sp
69	bl	thread_handle_fast_smc
70	pop	{r1-r8}
71	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
72	smc	#0
73	b	.	/* SMC should not return */
74UNWIND(	.fnend)
75END_FUNC vector_fast_smc_entry
76
77LOCAL_FUNC vector_fiq_entry , :
78UNWIND(	.fnstart)
79UNWIND(	.cantunwind)
80 	/* Secure Monitor received a FIQ and passed control to us. */
81	bl	thread_check_canaries
82	ldr	lr, =thread_nintr_handler_ptr
83 	ldr	lr, [lr]
84 	blx	lr
85	mov	r1, r0
86	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
87	smc	#0
88	b	.	/* SMC should not return */
89UNWIND(	.fnend)
90END_FUNC vector_fiq_entry
91
92LOCAL_FUNC vector_cpu_on_entry , :
93UNWIND(	.fnstart)
94UNWIND(	.cantunwind)
95	ldr	lr, =thread_cpu_on_handler_ptr
96	ldr	lr, [lr]
97	blx	lr
98	mov	r1, r0
99	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102UNWIND(	.fnend)
103END_FUNC vector_cpu_on_entry
104
105LOCAL_FUNC vector_cpu_off_entry , :
106UNWIND(	.fnstart)
107UNWIND(	.cantunwind)
108	ldr	lr, =thread_cpu_off_handler_ptr
109	ldr	lr, [lr]
110	blx	lr
111	mov	r1, r0
112	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
113	smc	#0
114	b	.	/* SMC should not return */
115UNWIND(	.fnend)
116END_FUNC vector_cpu_off_entry
117
118LOCAL_FUNC vector_cpu_suspend_entry , :
119UNWIND(	.fnstart)
120UNWIND(	.cantunwind)
121	ldr	lr, =thread_cpu_suspend_handler_ptr
122	ldr	lr, [lr]
123	blx	lr
124	mov	r1, r0
125	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
126	smc	#0
127	b	.	/* SMC should not return */
128UNWIND(	.fnend)
129END_FUNC vector_cpu_suspend_entry
130
131LOCAL_FUNC vector_cpu_resume_entry , :
132UNWIND(	.fnstart)
133UNWIND(	.cantunwind)
134	ldr	lr, =thread_cpu_resume_handler_ptr
135	ldr	lr, [lr]
136	blx	lr
137	mov	r1, r0
138	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
139	smc	#0
140	b	.	/* SMC should not return */
141UNWIND(	.fnend)
142END_FUNC vector_cpu_resume_entry
143
144LOCAL_FUNC vector_system_off_entry , :
145UNWIND(	.fnstart)
146UNWIND(	.cantunwind)
147	ldr	lr, =thread_system_off_handler_ptr
148	ldr	lr, [lr]
149	blx	lr
150	mov	r1, r0
151	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
152	smc	#0
153	b	.	/* SMC should not return */
154UNWIND(	.fnend)
155END_FUNC vector_system_off_entry
156
157LOCAL_FUNC vector_system_reset_entry , :
158UNWIND(	.fnstart)
159UNWIND(	.cantunwind)
160	ldr	lr, =thread_system_reset_handler_ptr
161	ldr	lr, [lr]
162	blx	lr
163	mov	r1, r0
164	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
165	smc	#0
166	b	.	/* SMC should not return */
167UNWIND(	.fnend)
168END_FUNC vector_system_reset_entry
169
170/*
171 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
172 * initialization.  Also used when compiled with the internal monitor, but
173 * the cpu_*_entry and system_*_entry are not used then.
174 *
175 * Note that ARM-TF depends on the layout of this vector table, any change
176 * in layout has to be synced with ARM-TF.
177 */
178FUNC thread_vector_table , :
179UNWIND(	.fnstart)
180UNWIND(	.cantunwind)
181	b	vector_std_smc_entry
182	b	vector_fast_smc_entry
183	b	vector_cpu_on_entry
184	b	vector_cpu_off_entry
185	b	vector_cpu_resume_entry
186	b	vector_cpu_suspend_entry
187	b	vector_fiq_entry
188	b	vector_system_off_entry
189	b	vector_system_reset_entry
190UNWIND(	.fnend)
191END_FUNC thread_vector_table
192KEEP_PAGER thread_vector_table
193
194FUNC thread_set_abt_sp , :
195UNWIND(	.fnstart)
196UNWIND(	.cantunwind)
197	mrs	r1, cpsr
198	cps	#CPSR_MODE_ABT
199	mov	sp, r0
200	msr	cpsr, r1
201	bx	lr
202UNWIND(	.fnend)
203END_FUNC thread_set_abt_sp
204
205FUNC thread_set_und_sp , :
206UNWIND(	.fnstart)
207UNWIND(	.cantunwind)
208	mrs	r1, cpsr
209	cps	#CPSR_MODE_UND
210	mov	sp, r0
211	msr	cpsr, r1
212	bx	lr
213UNWIND(	.fnend)
214END_FUNC thread_set_und_sp
215
216FUNC thread_set_irq_sp , :
217UNWIND(	.fnstart)
218UNWIND(	.cantunwind)
219	mrs	r1, cpsr
220	cps	#CPSR_MODE_IRQ
221	mov	sp, r0
222	msr	cpsr, r1
223	bx	lr
224UNWIND(	.fnend)
225END_FUNC thread_set_irq_sp
226
227FUNC thread_set_fiq_sp , :
228UNWIND(	.fnstart)
229UNWIND(	.cantunwind)
230	mrs	r1, cpsr
231	cps	#CPSR_MODE_FIQ
232	mov	sp, r0
233	msr	cpsr, r1
234	bx	lr
235UNWIND(	.fnend)
236END_FUNC thread_set_fiq_sp
237
238/* void thread_resume(struct thread_ctx_regs *regs) */
239FUNC thread_resume , :
240UNWIND(	.fnstart)
241UNWIND(	.cantunwind)
242	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
243
244	cps	#CPSR_MODE_SYS
245	ldm	r12!, {sp, lr}
246
247	cps	#CPSR_MODE_SVC
248	ldm	r12!, {r1, sp, lr}
249	msr	spsr_fsxc, r1
250
251	cps	#CPSR_MODE_SVC
252	ldm	r12, {r1, r2}
253	push	{r1, r2}
254
255	ldm	r0, {r0-r12}
256
257	/* Restore CPSR and jump to the instruction to resume at */
258	rfefd	sp!
259UNWIND(	.fnend)
260END_FUNC thread_resume
261
262/*
263 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
264 * the banked r8-r12 registers, returns original CPSR.
265 */
266LOCAL_FUNC thread_save_state_fiq , :
267UNWIND(	.fnstart)
268UNWIND(	.cantunwind)
269	mov	r9, lr
270
271	/*
272	 * Uses stack for temporary storage, while storing needed
273	 * context in the thread context struct.
274	 */
275
276	mrs	r8, cpsr
277
278	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
279
280	push	{r4-r7}
281	push	{r0-r3}
282
283	mrs	r6, cpsr		/* Save current CPSR */
284
285	bl	thread_get_ctx_regs
286
287	pop	{r1-r4}			/* r0-r3 pushed above */
288	stm	r0!, {r1-r4}
289	pop	{r1-r4}			/* r4-r7 pushed above */
290	stm	r0!, {r1-r4}
291
292	cps     #CPSR_MODE_SYS
293	stm	r0!, {r8-r12}
294	stm     r0!, {sp, lr}
295
296	cps     #CPSR_MODE_SVC
297	mrs     r1, spsr
298	stm     r0!, {r1, sp, lr}
299
300	/* back to fiq mode */
301	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
302	msr	cpsr, r6		/* Restore mode */
303
304	mov	r0, r8			/* Return original CPSR */
305	bx	r9
306UNWIND(	.fnend)
307END_FUNC thread_save_state_fiq
308
309/*
310 * Disables IRQ and FIQ and saves state of thread, returns original
311 * CPSR.
312 */
313LOCAL_FUNC thread_save_state , :
314UNWIND(	.fnstart)
315UNWIND(	.cantunwind)
316	push	{r12, lr}
317	/*
318	 * Uses stack for temporary storage, while storing needed
319	 * context in the thread context struct.
320	 */
321
322	mrs	r12, cpsr
323
324	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
325
326	push	{r4-r7}
327	push	{r0-r3}
328
329	mov	r5, r12			/* Save CPSR in a preserved register */
330	mrs	r6, cpsr		/* Save current CPSR */
331
332	bl	thread_get_ctx_regs
333
334	pop	{r1-r4}			/* r0-r3 pushed above */
335	stm	r0!, {r1-r4}
336	pop	{r1-r4}			/* r4-r7 pushed above */
337	stm	r0!, {r1-r4}
338	stm	r0!, {r8-r11}
339
340	pop	{r12, lr}
341	stm	r0!, {r12}
342
343        cps     #CPSR_MODE_SYS
344        stm     r0!, {sp, lr}
345
346        cps     #CPSR_MODE_SVC
347        mrs     r1, spsr
348        stm     r0!, {r1, sp, lr}
349
350	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
351	msr	cpsr, r6		/* Restore mode */
352
353	mov	r0, r5			/* Return original CPSR */
354	bx	lr
355UNWIND(	.fnend)
356END_FUNC thread_save_state
357
358FUNC thread_std_smc_entry , :
359UNWIND(	.fnstart)
360UNWIND(	.cantunwind)
361	/* Pass r0-r7 in a struct thread_smc_args */
362	push	{r0-r7}
363	mov	r0, sp
364	bl	__thread_std_smc_entry
365	/*
366	 * Load the returned r0-r3 into preserved registers and skip the
367	 * "returned" r4-r7 since they will not be returned to normal
368	 * world.
369	 */
370	pop	{r4-r7}
371	add	sp, #(4 * 4)
372
373	/* Disable interrupts before switching to temporary stack */
374	cpsid	aif
375	bl	thread_get_tmp_sp
376	mov	sp, r0
377
378	bl	thread_state_free
379
380	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
381	mov	r1, r4
382	mov	r2, r5
383	mov	r3, r6
384	mov	r4, r7
385	smc	#0
386	b	.	/* SMC should not return */
387UNWIND(	.fnend)
388END_FUNC thread_std_smc_entry
389
390
391/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
392FUNC thread_rpc , :
393/*
394 * r0-r2 are used to pass parameters to normal world
395 * r0-r5 are used to pass return vaule back from normal world
396 *
397 * note that r3 is used to pass "resume information", that is, which
398 * thread it is that should resume.
399 *
400 * Since the this function is following AAPCS we need to preserve r4-r5
401 * which are otherwise modified when returning back from normal world.
402 */
403UNWIND(	.fnstart)
404	push	{r4-r5, lr}
405UNWIND(	.save	{r4-r5, lr})
406	push	{r0}
407UNWIND(	.save	{r0})
408
409	bl	thread_save_state
410	mov	r4, r0			/* Save original CPSR */
411
412	/*
413 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
414	 */
415	bl	thread_get_tmp_sp
416	ldr	r5, [sp]		/* Get pointer to rv[] */
417	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
418	mov	sp, r0			/* Switch to tmp stack */
419
420	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
421	mov	r1, r4			/* CPSR to restore */
422	ldr	r2, =.thread_rpc_return
423	bl	thread_state_suspend
424	mov	r4, r0			/* Supply thread index */
425	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
426	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
427	smc	#0
428	b	.	/* SMC should not return */
429
430.thread_rpc_return:
431	/*
432	 * At this point has the stack pointer been restored to the value
433	 * it had when thread_save_state() was called above.
434	 *
435	 * Jumps here from thread_resume above when RPC has returned. The
436	 * IRQ and FIQ bits are restored to what they where when this
437	 * function was originally entered.
438	 */
439	pop	{r12}			/* Get pointer to rv[] */
440	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
441	pop	{r4-r5, pc}
442UNWIND(	.fnend)
443END_FUNC thread_rpc
444KEEP_PAGER thread_rpc
445
446/* The handler of native interrupt. */
447.macro	native_intr_handler mode:req
448	.ifc	\mode\(),irq
449	/*
450	 * Foreign interrupts should be masked.
451	 * For GICv2, IRQ is for foreign interrupt and already masked by
452	 * hardware in FIQ mode which is used for native interrupt.
453	 * For GICv3, FIQ is for foreign interrupt. It's not masked by hardware
454	 * in IRQ mode which is used for natvie interrupt.
455	 */
456	cpsid	f
457	.endif
458	/*
459	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
460	 * address
461	 */
462	sub     lr, lr, #4
463
464	/*
465	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
466	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
467	 * because the secure monitor doesn't save those. The treatment of
468	 * the banked fiq registers is somewhat analogous to the lazy save
469	 * of VFP registers.
470	 */
471	.ifc	\mode\(),fiq
472	push	{r0-r3, r8-r12, lr}
473	.else
474	push	{r0-r3, r12, lr}
475	.endif
476	bl	thread_check_canaries
477	ldr	lr, =thread_nintr_handler_ptr
478	ldr	lr, [lr]
479	blx	lr
480	.ifc	\mode\(),fiq
481	pop	{r0-r3, r8-r12, lr}
482	.else
483	pop	{r0-r3, r12, lr}
484	.endif
485	movs	pc, lr
486.endm
487
488/* The handler of foreign interrupt. */
489.macro foreign_intr_handler mode:req
490	.ifc	\mode\(),irq
491	/*
492	 * Disable FIQ if the foreign interrupt is sent as IRQ.
493	 * IRQ mode is set up to use tmp stack so FIQ has to be
494	 * disabled before touching the stack. We can also assign
495	 * SVC sp from IRQ sp to get SVC mode into the state we
496	 * need when doing the SMC below.
497	 * If it is sent as FIQ, the IRQ has already been masked by hardware
498	 */
499	cpsid	f
500	.endif
501	sub	lr, lr, #4
502	push	{lr}
503	push	{r12}
504
505	.ifc	\mode\(),fiq
506	bl	thread_save_state_fiq
507	.else
508	bl	thread_save_state
509	.endif
510
511	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
512	mrs	r1, spsr
513	pop	{r12}
514	pop	{r2}
515	blx	thread_state_suspend
516	mov	r4, r0		/* Supply thread index */
517
518	/*
519	 * Switch to SVC mode and copy current stack pointer as it already
520	 * is the tmp stack.
521	 */
522	mov	r0, sp
523	cps	#CPSR_MODE_SVC
524	mov	sp, r0
525
526	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
527	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
528	mov	r2, #0
529	mov	r3, #0
530	/* r4 is already filled in above */
531	smc	#0
532	b	.	/* SMC should not return */
533.endm
534
535LOCAL_FUNC thread_fiq_handler , :
536UNWIND(	.fnstart)
537UNWIND(	.cantunwind)
538#if defined(CFG_ARM_GICV3)
539	foreign_intr_handler	fiq
540#else
541	native_intr_handler	fiq
542#endif
543UNWIND(	.fnend)
544END_FUNC thread_fiq_handler
545
546LOCAL_FUNC thread_irq_handler , :
547UNWIND(	.fnstart)
548UNWIND(	.cantunwind)
549#if defined(CFG_ARM_GICV3)
550	native_intr_handler	irq
551#else
552	foreign_intr_handler	irq
553#endif
554UNWIND(	.fnend)
555END_FUNC thread_irq_handler
556
557FUNC thread_init_vbar , :
558UNWIND(	.fnstart)
559	/* Set vector (VBAR) */
560	ldr	r0, =thread_vect_table
561	write_vbar r0
562	bx	lr
563UNWIND(	.fnend)
564END_FUNC thread_init_vbar
565KEEP_PAGER thread_init_vbar
566
567/*
568 * Below are low level routines handling entry and return from user mode.
569 *
570 * thread_enter_user_mode() saves all that registers user mode can change
571 * so kernel mode can restore needed registers when resuming execution
572 * after the call to thread_enter_user_mode() has returned.
573 * thread_enter_user_mode() doesn't return directly since it enters user
574 * mode instead, it's thread_unwind_user_mode() that does the
575 * returning by restoring the registers saved by thread_enter_user_mode().
576 *
577 * There's three ways for thread_enter_user_mode() to return to caller,
578 * user TA calls utee_return, user TA calls utee_panic or through an abort.
579 *
580 * Calls to utee_return or utee_panic are handled as:
581 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
582 * calls syscall_return() or syscall_panic().
583 *
584 * These function calls returns normally except thread_svc_handler() which
585 * which is an exception handling routine so it reads return address and
586 * SPSR to restore from the stack. syscall_return() and syscall_panic()
587 * changes return address and SPSR used by thread_svc_handler() to instead of
588 * returning into user mode as with other syscalls it returns into
589 * thread_unwind_user_mode() in kernel mode instead.  When
590 * thread_svc_handler() returns the stack pointer at the point where
591 * thread_enter_user_mode() left it so this is where
592 * thread_unwind_user_mode() can operate.
593 *
594 * Aborts are handled in a similar way but by thread_abort_handler()
595 * instead, when the pager sees that it's an abort from user mode that
596 * can't be handled it updates SPSR and return address used by
597 * thread_abort_handler() to return into thread_unwind_user_mode()
598 * instead.
599 */
600
601/*
602 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
603 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
604 *               unsigned long user_func, unsigned long spsr,
605 *               uint32_t *exit_status0, uint32_t *exit_status1)
606 *
607 */
608FUNC __thread_enter_user_mode , :
609UNWIND(	.fnstart)
610UNWIND(	.cantunwind)
611	/*
612	 * Save all registers to allow syscall_return() to resume execution
613	 * as if this function would have returned. This is also used in
614	 * syscall_panic().
615	 *
616	 * If stack usage of this function is changed
617	 * thread_unwind_user_mode() has to be updated.
618	 */
619	push    {r4-r12,lr}
620
621	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
622	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
623	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
624
625	/*
626	 * Set the saved Processors Status Register to user mode to allow
627	 * entry of user mode through movs below.
628	 */
629	msr     spsr_cxsf, r6
630
631	/*
632	 * Save old user sp and set new user sp.
633	 */
634	cps	#CPSR_MODE_SYS
635	mov	r6, sp
636	mov     sp, r4
637	cps	#CPSR_MODE_SVC
638	push	{r6,r7}
639
640	/*
641	* Don't allow return from this function, return is done through
642	* thread_unwind_user_mode() below.
643	*/
644	mov     lr, #0
645	/* Call the user function with its arguments */
646	movs    pc, r5
647UNWIND(	.fnend)
648END_FUNC __thread_enter_user_mode
649
650/*
651 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
652 *              uint32_t exit_status1);
653 * See description in thread.h
654 */
655FUNC thread_unwind_user_mode , :
656UNWIND(	.fnstart)
657UNWIND(	.cantunwind)
658	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
659	str	r1, [ip]
660	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
661	str	r2, [ip]
662
663	/* Restore old user sp */
664	pop	{r4,r7}
665	cps	#CPSR_MODE_SYS
666	mov	sp, r4
667	cps	#CPSR_MODE_SVC
668
669	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
670UNWIND(	.fnend)
671END_FUNC thread_unwind_user_mode
672
673LOCAL_FUNC thread_abort_handler , :
674thread_und_handler:
675UNWIND(	.fnstart)
676UNWIND(	.cantunwind)
677	/*
678	 * Disable both foreign and native interrupts in the thread handlers.
679	 * The tee handlers can decide when the native interrupts should
680	 * be enabled.
681	 */
682	cpsid	f	/* IRQ is already masked by the hardware */
683	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
684	mrs	r1, spsr
685	tst	r1, #CPSR_T
686	subne	lr, lr, #2
687	subeq	lr, lr, #4
688	mov	r0, #ABORT_TYPE_UNDEF
689	b	.thread_abort_generic
690
691thread_dabort_handler:
692	/*
693	 * Disable both foreign and native interrupts in the thread handlers.
694	 * The tee handlers can decide when the native interrupts should
695	 * be enabled.
696	 */
697	cpsid	f	/* IRQ is already masked by the hardware */
698	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
699	sub	lr, lr, #8
700	mov	r0, #ABORT_TYPE_DATA
701	b	.thread_abort_generic
702
703thread_pabort_handler:
704	/*
705	 * Disable both foreign and native interrupts in the thread handlers.
706	 * The tee handlers can decide when the native interrupts should
707	 * be enabled.
708	 */
709	cpsid	f	/* IRQ is already masked by the hardware */
710	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
711	sub	lr, lr, #4
712	mov	r0, #ABORT_TYPE_PREFETCH
713
714.thread_abort_generic:
715	/*
716	 * At this label:
717	 * cpsr is in mode undef or abort
718	 * sp is still pointing to struct thread_core_local belonging to
719	 * this core.
720	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
721	 * {r2-r11, ip} are untouched.
722	 * r0 holds the first argument for abort_handler()
723	 */
724
725	/*
726	 * Update core local flags.
727	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
728	 */
729	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
730	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
731	orr	r1, r1, #THREAD_CLF_ABORT
732
733	/*
734	 * Select stack and update flags accordingly
735	 *
736	 * Normal case:
737	 * If the abort stack is unused select that.
738	 *
739	 * Fatal error handling:
740	 * If we're already using the abort stack as noted by bit
741	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
742	 * field we're selecting the temporary stack instead to be able to
743	 * make a stack trace of the abort in abort mode.
744	 *
745	 * r1 is initialized as a temporary stack pointer until we've
746	 * switched to system mode.
747	 */
748	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
749	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
750	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
751	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
752	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
753
754	/*
755	 * Store registers on stack fitting struct thread_abort_regs
756	 * start from the end of the struct
757	 * {r2-r11, ip}
758	 * Load content of previously saved {r0-r1} and stores
759	 * it up to the pad field.
760	 * After this is only {usr_sp, usr_lr} missing in the struct
761	 */
762	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
763	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
764	/* Push the original {r0-r1} on the selected stack */
765	stmdb	r1!, {r2-r3}
766	mrs	r3, spsr
767	/* Push {pad, spsr, elr} on the selected stack */
768	stmdb	r1!, {r2, r3, lr}
769
770	cps	#CPSR_MODE_SYS
771	str	lr, [r1, #-4]!
772	str	sp, [r1, #-4]!
773	mov	sp, r1
774
775	bl	abort_handler
776
777	mov	ip, sp
778	ldr	sp, [ip], #4
779	ldr	lr, [ip], #4
780
781	/*
782	 * Even if we entered via CPSR_MODE_UND, we are returning via
783	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
784	 * here.
785	 */
786	cps	#CPSR_MODE_ABT
787	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
788	msr	spsr_fsxc, r1
789
790	/* Update core local flags */
791	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
792	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
793	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
794
795	ldm	ip, {r0-r11, ip}
796
797	movs	pc, lr
798UNWIND(	.fnend)
799END_FUNC thread_abort_handler
800
801LOCAL_FUNC thread_svc_handler , :
802UNWIND(	.fnstart)
803UNWIND(	.cantunwind)
804	/*
805	 * Disable both foreign and native interrupts in the thread handlers.
806	 * The tee handlers can decide when the native interrupts should
807	 * be enabled.
808	 */
809	cpsid	f	/* IRQ is already masked by the hardware */
810	push	{r0-r7, lr}
811	mrs	r0, spsr
812	push	{r0}
813	mov	r0, sp
814	bl	tee_svc_handler
815	pop	{r0}
816	msr	spsr_fsxc, r0
817	pop	{r0-r7, lr}
818	movs	pc, lr
819UNWIND(	.fnend)
820END_FUNC thread_svc_handler
821
822	.section .text.thread_vect_table
823        .align	5
824LOCAL_FUNC thread_vect_table , :
825UNWIND(	.fnstart)
826UNWIND(	.cantunwind)
827	b	.			/* Reset			*/
828	b	thread_und_handler	/* Undefined instruction	*/
829	b	thread_svc_handler	/* System call			*/
830	b	thread_pabort_handler	/* Prefetch abort		*/
831	b	thread_dabort_handler	/* Data abort			*/
832	b	.			/* Reserved			*/
833	b	thread_irq_handler	/* IRQ				*/
834	b	thread_fiq_handler	/* FIQ				*/
835UNWIND(	.fnend)
836END_FUNC thread_vect_table
837